]> git.saurik.com Git - apple/hfs.git/blob - livefiles_hfs_plugin/lf_hfs_vfsutils.c
hfs-522.0.9.tar.gz
[apple/hfs.git] / livefiles_hfs_plugin / lf_hfs_vfsutils.c
1 /* Copyright © 2017-2018 Apple Inc. All rights reserved.
2 *
3 * lf_hfs_vfsutils.c
4 * livefiles_hfs
5 *
6 * Created by Or Haimovich on 18/3/18.
7 */
8
9 #include <stdio.h>
10 #include <errno.h>
11 #include <stdlib.h>
12 #include <errno.h>
13 #include <mach/mach.h>
14 #include <sys/disk.h>
15
16 #include "lf_hfs.h"
17 #include "lf_hfs_locks.h"
18 #include "lf_hfs_format.h"
19 #include "lf_hfs.h"
20 #include "lf_hfs_endian.h"
21 #include "lf_hfs_logger.h"
22 #include "lf_hfs_mount.h"
23 #include "lf_hfs_utils.h"
24 #include "lf_hfs_logger.h"
25 #include "lf_hfs_raw_read_write.h"
26 #include "lf_hfs_vfsutils.h"
27 #include "lf_hfs_vfsops.h"
28 #include "lf_hfs_file_mgr_internal.h"
29 #include "lf_hfs_btrees_internal.h"
30 #include "lf_hfs_format.h"
31 #include "lf_hfs_file_extent_mapping.h"
32 #include "lf_hfs_sbunicode.h"
33 #include "lf_hfs_xattr.h"
34 #include "lf_hfs_unicode_wrappers.h"
35 #include "lf_hfs_link.h"
36 #include "lf_hfs_btree.h"
37 #include "lf_hfs_journal.h"
38
39 static int hfs_late_journal_init(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, void *_args);
40 u_int32_t GetFileInfo(ExtendedVCB *vcb, const char *name,
41 struct cat_attr *fattr, struct cat_fork *forkinfo);
42
43 //*******************************************************************************
44 // Routine: hfs_MountHFSVolume
45 //
46 //
47 //*******************************************************************************
48 unsigned char hfs_catname[] = "Catalog B-tree";
49 unsigned char hfs_extname[] = "Extents B-tree";
50 unsigned char hfs_vbmname[] = "Volume Bitmap";
51 unsigned char hfs_attrname[] = "Attribute B-tree";
52 unsigned char hfs_startupname[] = "Startup File";
53
54 //*******************************************************************************
55 //
56 // Sanity check Volume Header Block:
57 // Input argument *vhp is a pointer to a HFSPlusVolumeHeader block that has
58 // not been endian-swapped and represents the on-disk contents of this sector.
59 // This routine will not change the endianness of vhp block.
60 //
61 //*******************************************************************************
62 int hfs_ValidateHFSPlusVolumeHeader(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp)
63 {
64 u_int16_t signature = SWAP_BE16(vhp->signature);
65 u_int16_t hfs_version = SWAP_BE16(vhp->version);
66
67 if (signature == kHFSPlusSigWord)
68 {
69 if (hfs_version != kHFSPlusVersion)
70 {
71 LFHFS_LOG(LEVEL_ERROR, "hfs_ValidateHFSPlusVolumeHeader: invalid HFS+ version: %x\n", hfs_version);
72
73 return (EINVAL);
74 }
75 } else if (signature == kHFSXSigWord)
76 {
77 if (hfs_version != kHFSXVersion)
78 {
79 LFHFS_LOG(LEVEL_ERROR, "hfs_ValidateHFSPlusVolumeHeader: invalid HFSX version: %x\n", hfs_version);
80 return (EINVAL);
81 }
82 } else
83 {
84 /* Removed printf for invalid HFS+ signature because it gives
85 * false error for UFS root volume
86 */
87 LFHFS_LOG(LEVEL_DEBUG, "hfs_ValidateHFSPlusVolumeHeader: unknown Volume Signature : %x\n", signature);
88 return (EINVAL);
89 }
90
91 /* Block size must be at least 512 and a power of 2 */
92 u_int32_t blockSize = SWAP_BE32(vhp->blockSize);
93 if (blockSize < 512 || !powerof2(blockSize))
94 {
95 LFHFS_LOG(LEVEL_DEBUG, "hfs_ValidateHFSPlusVolumeHeader: invalid blocksize (%d) \n", blockSize);
96 return (EINVAL);
97 }
98
99 if (blockSize < hfsmp->hfs_logical_block_size)
100 {
101 LFHFS_LOG(LEVEL_DEBUG, "hfs_ValidateHFSPlusVolumeHeader: invalid physical blocksize (%d), hfs_logical_blocksize (%d) \n",
102 blockSize, hfsmp->hfs_logical_block_size);
103 return (EINVAL);
104 }
105 return 0;
106 }
107
108 //*******************************************************************************
109 // Routine: hfs_MountHFSPlusVolume
110 //
111 //
112 //*******************************************************************************
113
114 int hfs_CollectBtreeStats(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, off_t embeddedOffset, void *args)
115 {
116 int retval = 0;
117 register ExtendedVCB *vcb = HFSTOVCB(hfsmp);
118 u_int32_t blockSize; blockSize = SWAP_BE32(vhp->blockSize);
119
120 /*
121 * pull in the volume UUID while we are still single-threaded.
122 * This brings the volume UUID into the cached one dangling off of the HFSMP
123 * Otherwise it would have to be computed on first access.
124 */
125 uuid_t throwaway;
126 hfs_getvoluuid (hfsmp, throwaway);
127
128 /*
129 * We now always initiate a full bitmap scan even if the volume is read-only because this is
130 * our only shot to do I/Os of dramaticallly different sizes than what the buffer cache ordinarily
131 * expects. TRIMs will not be delivered to the underlying media if the volume is not
132 * read-write though.
133 */
134 hfsmp->scan_var = 0;
135
136 hfs_scan_blocks(hfsmp);
137
138 if (hfsmp->jnl && (hfsmp->hfs_flags & HFS_READ_ONLY) == 0)
139 {
140 hfs_flushvolumeheader(hfsmp, 0);
141 }
142
143 /* kHFSHasFolderCount is only supported/updated on HFSX volumes */
144 if ((hfsmp->hfs_flags & HFS_X) != 0)
145 {
146 hfsmp->hfs_flags |= HFS_FOLDERCOUNT;
147 }
148
149 // Check if we need to do late journal initialization. This only
150 // happens if a previous version of MacOS X (or 9) touched the disk.
151 // In that case hfs_late_journal_init() will go re-locate the journal
152 // and journal_info_block files and validate that they're still kosher.
153 if ( (vcb->vcbAtrb & kHFSVolumeJournaledMask) &&
154 (SWAP_BE32(vhp->lastMountedVersion) != kHFSJMountVersion) &&
155 (hfsmp->jnl == NULL))
156 {
157
158 retval = hfs_late_journal_init(hfsmp, vhp, args);
159 if (retval != 0)
160 {
161 if (retval == EROFS)
162 {
163 // EROFS is a special error code that means the volume has an external
164 // journal which we couldn't find. in that case we do not want to
165 // rewrite the volume header - we'll just refuse to mount the volume.
166 LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: hfs_late_journal_init returned (%d), maybe an external jnl?\n", retval);
167 retval = EINVAL;
168 goto ErrorExit;
169 }
170
171 hfsmp->jnl = NULL;
172
173 // if the journal failed to open, then set the lastMountedVersion
174 // to be "FSK!" which fsck_hfs will see and force the fsck instead
175 // of just bailing out because the volume is journaled.
176 if (!(hfsmp->hfs_flags & HFS_READ_ONLY))
177 {
178 hfsmp->hfs_flags |= HFS_NEED_JNL_RESET;
179
180 uint64_t mdb_offset = (uint64_t)((embeddedOffset / blockSize) + HFS_PRI_SECTOR(blockSize));
181
182 void *pvBuffer = hfs_malloc(hfsmp->hfs_physical_block_size);
183 if (pvBuffer == NULL)
184 {
185 retval = ENOMEM;
186 goto ErrorExit;
187 }
188
189 retval = raw_readwrite_read_mount( hfsmp->hfs_devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys), hfsmp->hfs_physical_block_size, pvBuffer, hfsmp->hfs_physical_block_size, NULL, NULL);
190 if (retval)
191 {
192 LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: JNL header raw_readwrite_read_mount failed with %d\n", retval);
193 hfs_free(pvBuffer);
194 goto ErrorExit;
195 }
196
197 HFSPlusVolumeHeader *jvhp = (HFSPlusVolumeHeader *)(pvBuffer + HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size));
198
199 if (SWAP_BE16(jvhp->signature) == kHFSPlusSigWord || SWAP_BE16(jvhp->signature) == kHFSXSigWord)
200 {
201 LFHFS_LOG(LEVEL_ERROR, "hfs_MountHFSPlusVolume: Journal replay fail. Writing lastMountVersion as FSK!\n");
202 jvhp->lastMountedVersion = SWAP_BE32(kFSKMountVersion);
203
204 retval = raw_readwrite_write_mount( hfsmp->hfs_devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys), hfsmp->hfs_physical_block_size, pvBuffer, hfsmp->hfs_physical_block_size, NULL, NULL);
205 hfs_free(pvBuffer);
206 if (retval)
207 {
208 LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: JNL header raw_readwrite_write_mount failed with %d\n", retval);
209 goto ErrorExit;
210 }
211 }
212 else
213 {
214 hfs_free(pvBuffer);
215 }
216 }
217
218 LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: hfs_late_journal_init returned (%d)\n", retval);
219 retval = EINVAL;
220 goto ErrorExit;
221 }
222 else if (hfsmp->jnl)
223 {
224 hfsmp->hfs_mp->mnt_flag |= MNT_JOURNALED;
225 }
226 }
227 else if (hfsmp->jnl || ((vcb->vcbAtrb & kHFSVolumeJournaledMask) && (hfsmp->hfs_flags & HFS_READ_ONLY)))
228 {
229 struct cat_attr jinfo_attr, jnl_attr;
230 if (hfsmp->hfs_flags & HFS_READ_ONLY)
231 {
232 vcb->vcbAtrb &= ~kHFSVolumeJournaledMask;
233 }
234
235 // if we're here we need to fill in the fileid's for the
236 // journal and journal_info_block.
237 hfsmp->hfs_jnlinfoblkid = GetFileInfo(vcb, ".journal_info_block", &jinfo_attr, NULL);
238 hfsmp->hfs_jnlfileid = GetFileInfo(vcb, ".journal", &jnl_attr, NULL);
239 if (hfsmp->hfs_jnlinfoblkid == 0 || hfsmp->hfs_jnlfileid == 0)
240 {
241 LFHFS_LOG(LEVEL_DEFAULT, "hfs_MountHFSPlusVolume: danger! couldn't find the file-id's for the journal or journal_info_block\n");
242 LFHFS_LOG(LEVEL_DEFAULT, "hfs_MountHFSPlusVolume: jnlfileid %llu, jnlinfoblkid %llu\n", hfsmp->hfs_jnlfileid, hfsmp->hfs_jnlinfoblkid);
243 }
244
245 if (hfsmp->hfs_flags & HFS_READ_ONLY)
246 {
247 vcb->vcbAtrb |= kHFSVolumeJournaledMask;
248 }
249
250 if (hfsmp->jnl == NULL)
251 {
252 hfsmp->hfs_mp->mnt_flag &= ~(u_int64_t)((unsigned int)MNT_JOURNALED);
253 }
254 }
255
256 if ( !(vcb->vcbAtrb & kHFSVolumeHardwareLockMask) ) // if the disk is not write protected
257 {
258 MarkVCBDirty( vcb ); // mark VCB dirty so it will be written
259 }
260
261 /*
262 * Distinguish 3 potential cases involving content protection:
263 * 1. mount point bit set; vcbAtrb does not support it. Fail.
264 * 2. mount point bit set; vcbattrb supports it. we're good.
265 * 3. mount point bit not set; vcbatrb supports it, turn bit on, then good.
266 */
267 if (hfsmp->hfs_mp->mnt_flag & MNT_CPROTECT)
268 {
269 /* Does the mount point support it ? */
270 if ((vcb->vcbAtrb & kHFSContentProtectionMask) == 0)
271 {
272 /* Case 1 above */
273 retval = EINVAL;
274 goto ErrorExit;
275 }
276 }
277 else
278 {
279 /* not requested in the mount point. Is it in FS? */
280 if (vcb->vcbAtrb & kHFSContentProtectionMask)
281 {
282 /* Case 3 above */
283 hfsmp->hfs_mp->mnt_flag |= MNT_CPROTECT;
284 }
285 }
286
287 #if LF_HFS_CHECK_UNMAPPED // TBD:
288 /*
289 * Establish a metadata allocation zone.
290 */
291 hfs_metadatazone_init(hfsmp, false);
292
293
294 /*
295 * Make any metadata zone adjustments.
296 */
297 if (hfsmp->hfs_flags & HFS_METADATA_ZONE)
298 {
299 /* Keep the roving allocator out of the metadata zone. */
300 if (vcb->nextAllocation >= hfsmp->hfs_metazone_start &&
301 vcb->nextAllocation <= hfsmp->hfs_metazone_end)
302 {
303 HFS_UPDATE_NEXT_ALLOCATION(hfsmp, hfsmp->hfs_metazone_end + 1);
304 }
305 }
306 else
307 #endif
308 {
309 if (vcb->nextAllocation <= 1)
310 {
311 vcb->nextAllocation = hfsmp->hfs_min_alloc_start;
312 }
313 }
314 vcb->sparseAllocation = hfsmp->hfs_min_alloc_start;
315
316 /* Setup private/hidden directories for hardlinks. */
317 hfs_privatedir_init(hfsmp, FILE_HARDLINKS);
318 hfs_privatedir_init(hfsmp, DIR_HARDLINKS);
319
320 hfs_remove_orphans(hfsmp);
321
322 /* See if we need to erase unused Catalog nodes due to <rdar://problem/6947811>. */
323 retval = hfs_erase_unused_nodes(hfsmp);
324 if (retval)
325 {
326 LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: hfs_erase_unused_nodes returned (%d) for %s \n", retval, hfsmp->vcbVN);
327 goto ErrorExit;
328 }
329
330 /* Enable extent-based extended attributes by default */
331 hfsmp->hfs_flags |= HFS_XATTR_EXTENTS;
332
333 return (0);
334
335 ErrorExit:
336 /*
337 * A fatal error occurred and the volume cannot be mounted, so
338 * release any resources that we acquired...
339 */
340 hfsUnmount(hfsmp);
341
342 LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: encountered error (%d)\n", retval);
343
344 return (retval);
345 }
346
347
348 int hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, off_t embeddedOffset, u_int64_t disksize, bool bFailForDirty)
349 {
350 int retval = 0;
351
352 register ExtendedVCB *vcb;
353 struct cat_desc cndesc;
354 struct cat_attr cnattr;
355 struct cat_fork cfork;
356 u_int32_t blockSize;
357 uint64_t spare_sectors;
358 int newvnode_flags = 0;
359 BTreeInfoRec btinfo;
360
361 u_int16_t signature = SWAP_BE16(vhp->signature);
362
363 retval = hfs_ValidateHFSPlusVolumeHeader(hfsmp, vhp);
364 if (retval)
365 return retval;
366
367 if (signature == kHFSXSigWord)
368 {
369 /* The in-memory signature is always 'H+'. */
370 signature = kHFSPlusSigWord;
371 hfsmp->hfs_flags |= HFS_X;
372 }
373
374 blockSize = SWAP_BE32(vhp->blockSize);
375 /* don't mount a writable volume if its dirty, it must be cleaned by fsck_hfs */
376 if ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0 &&
377 hfsmp->jnl == NULL &&
378 (SWAP_BE32(vhp->attributes) & kHFSVolumeUnmountedMask) == 0 &&
379 bFailForDirty)
380 {
381 LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: cannot mount dirty non-journaled volumes\n");
382 return (EINVAL);
383 }
384
385 /* Make sure we can live with the physical block size. */
386 if ((disksize & (hfsmp->hfs_logical_block_size - 1)) ||
387 (embeddedOffset & (hfsmp->hfs_logical_block_size - 1)))
388 {
389 LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: hfs_logical_blocksize (%d) \n",hfsmp->hfs_logical_block_size);
390 return (ENXIO);
391 }
392
393 /*
394 * If allocation block size is less than the physical block size,
395 * same data could be cached in two places and leads to corruption.
396 *
397 * HFS Plus reserves one allocation block for the Volume Header.
398 * If the physical size is larger, then when we read the volume header,
399 * we will also end up reading in the next allocation block(s).
400 * If those other allocation block(s) is/are modified, and then the volume
401 * header is modified, the write of the volume header's buffer will write
402 * out the old contents of the other allocation blocks.
403 *
404 * We assume that the physical block size is same as logical block size.
405 * The physical block size value is used to round down the offsets for
406 * reading and writing the primary and alternate volume headers.
407 *
408 * The same logic to ensure good hfs_physical_block_size is also in
409 * hfs_mountfs so that hfs_mountfs, hfs_MountHFSPlusVolume and
410 * later are doing the I/Os using same block size.
411 */
412 if (blockSize < hfsmp->hfs_physical_block_size)
413 {
414 hfsmp->hfs_physical_block_size = hfsmp->hfs_logical_block_size;
415 hfsmp->hfs_log_per_phys = 1;
416 }
417
418 /*
419 * The VolumeHeader seems OK: transfer info from it into VCB
420 * Note - the VCB starts out clear (all zeros)
421 */
422 vcb = HFSTOVCB(hfsmp);
423
424 vcb->vcbSigWord = signature;
425 vcb->vcbJinfoBlock = SWAP_BE32(vhp->journalInfoBlock);
426 vcb->vcbLsMod = to_bsd_time(SWAP_BE32(vhp->modifyDate));
427 vcb->vcbAtrb = SWAP_BE32(vhp->attributes);
428 vcb->vcbClpSiz = SWAP_BE32(vhp->rsrcClumpSize);
429 vcb->vcbNxtCNID = SWAP_BE32(vhp->nextCatalogID);
430 vcb->vcbVolBkUp = to_bsd_time(SWAP_BE32(vhp->backupDate));
431 vcb->vcbWrCnt = SWAP_BE32(vhp->writeCount);
432 vcb->vcbFilCnt = SWAP_BE32(vhp->fileCount);
433 vcb->vcbDirCnt = SWAP_BE32(vhp->folderCount);
434
435 /* copy 32 bytes of Finder info */
436 bcopy(vhp->finderInfo, vcb->vcbFndrInfo, sizeof(vhp->finderInfo));
437
438 vcb->vcbAlBlSt = 0; /* hfs+ allocation blocks start at first block of volume */
439 if ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0)
440 {
441 vcb->vcbWrCnt++; /* compensate for write of Volume Header on last flush */
442 }
443
444 /* Now fill in the Extended VCB info */
445 vcb->nextAllocation = SWAP_BE32(vhp->nextAllocation);
446 vcb->totalBlocks = SWAP_BE32(vhp->totalBlocks);
447 vcb->allocLimit = vcb->totalBlocks;
448 vcb->freeBlocks = SWAP_BE32(vhp->freeBlocks);
449 vcb->blockSize = blockSize;
450 vcb->encodingsBitmap = SWAP_BE64(vhp->encodingsBitmap);
451 vcb->localCreateDate = SWAP_BE32(vhp->createDate);
452
453 vcb->hfsPlusIOPosOffset = (uint32_t) embeddedOffset;
454
455 /* Default to no free block reserve */
456 vcb->reserveBlocks = 0;
457
458 /*
459 * Update the logical block size in the mount struct
460 * (currently set up from the wrapper MDB) using the
461 * new blocksize value:
462 */
463 hfsmp->hfs_logBlockSize = BestBlockSizeFit(vcb->blockSize, MAXBSIZE, hfsmp->hfs_logical_block_size);
464 vcb->vcbVBMIOSize = MIN(vcb->blockSize, MAXPHYSIO);
465
466 /*
467 * Validate and initialize the location of the alternate volume header.
468 *
469 * Note that there may be spare sectors beyond the end of the filesystem that still
470 * belong to our partition.
471 */
472 spare_sectors = hfsmp->hfs_logical_block_count - (((uint64_t)vcb->totalBlocks * blockSize) / hfsmp->hfs_logical_block_size);
473
474 /*
475 * Differentiate between "innocuous" spare sectors and the more unusual
476 * degenerate case:
477 *
478 * *** Innocuous spare sectors exist if:
479 *
480 * A) the number of bytes assigned to the partition (by multiplying logical
481 * block size * logical block count) is greater than the filesystem size
482 * (by multiplying allocation block count and allocation block size)
483 *
484 * and
485 *
486 * B) the remainder is less than the size of a full allocation block's worth of bytes.
487 *
488 * This handles the normal case where there may be a few extra sectors, but the two
489 * are fundamentally in sync.
490 *
491 * *** Degenerate spare sectors exist if:
492 * A) The number of bytes assigned to the partition (by multiplying logical
493 * block size * logical block count) is greater than the filesystem size
494 * (by multiplying allocation block count and block size).
495 *
496 * and
497 *
498 * B) the remainder is greater than a full allocation's block worth of bytes.
499 * In this case, a smaller file system exists in a larger partition.
500 * This can happen in various ways, including when volume is resized but the
501 * partition is yet to be resized. Under this condition, we have to assume that
502 * a partition management software may resize the partition to match
503 * the file system size in the future. Therefore we should update
504 * alternate volume header at two locations on the disk,
505 * a. 1024 bytes before end of the partition
506 * b. 1024 bytes before end of the file system
507 */
508
509 if (spare_sectors > (uint64_t)(blockSize / hfsmp->hfs_logical_block_size))
510 {
511 /*
512 * Handle the degenerate case above. FS < partition size.
513 * AVH located at 1024 bytes from the end of the partition
514 */
515 hfsmp->hfs_partition_avh_sector = (hfsmp->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) + HFS_ALT_SECTOR(hfsmp->hfs_logical_block_size, hfsmp->hfs_logical_block_count);
516
517 /* AVH located at 1024 bytes from the end of the filesystem */
518 hfsmp->hfs_fs_avh_sector = (hfsmp->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) + HFS_ALT_SECTOR(hfsmp->hfs_logical_block_size, (((uint64_t)vcb->totalBlocks * blockSize) / hfsmp->hfs_logical_block_size));
519 }
520 else
521 {
522 /* Innocuous spare sectors; Partition & FS notion are in sync */
523 hfsmp->hfs_partition_avh_sector = (hfsmp->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) + HFS_ALT_SECTOR(hfsmp->hfs_logical_block_size, hfsmp->hfs_logical_block_count);
524
525 hfsmp->hfs_fs_avh_sector = hfsmp->hfs_partition_avh_sector;
526 }
527
528 LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: partition_avh_sector=%qu, fs_avh_sector=%qu\n", hfsmp->hfs_partition_avh_sector, hfsmp->hfs_fs_avh_sector);
529
530 bzero(&cndesc, sizeof(cndesc));
531 cndesc.cd_parentcnid = kHFSRootParentID;
532 cndesc.cd_flags |= CD_ISMETA;
533 bzero(&cnattr, sizeof(cnattr));
534 cnattr.ca_linkcount = 1;
535 cnattr.ca_mode = S_IFREG;
536
537 /*
538 * Set up Extents B-tree vnode
539 */
540 cndesc.cd_nameptr = hfs_extname;
541 cndesc.cd_namelen = strlen((char *)hfs_extname);
542 cndesc.cd_cnid = cnattr.ca_fileid = kHFSExtentsFileID;
543
544 cfork.cf_size = SWAP_BE64 (vhp->extentsFile.logicalSize);
545 cfork.cf_new_size= 0;
546 cfork.cf_clump = SWAP_BE32 (vhp->extentsFile.clumpSize);
547 cfork.cf_blocks = SWAP_BE32 (vhp->extentsFile.totalBlocks);
548 cfork.cf_vblocks = 0;
549 cnattr.ca_blocks = cfork.cf_blocks;
550
551 for (int iExtentCounter = 0; iExtentCounter < kHFSPlusExtentDensity; iExtentCounter++)
552 {
553 cfork.cf_extents[iExtentCounter].startBlock = SWAP_BE32 (vhp->extentsFile.extents[iExtentCounter].startBlock);
554 cfork.cf_extents[iExtentCounter].blockCount = SWAP_BE32 (vhp->extentsFile.extents[iExtentCounter].blockCount);
555 }
556
557 retval = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, &cfork, &hfsmp->hfs_extents_vp, &newvnode_flags);
558 if (retval)
559 {
560 LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: hfs_getnewvnode returned (%d) getting extentoverflow BT\n", retval);
561 goto ErrorExit;
562 }
563
564 hfsmp->hfs_extents_cp = VTOC(hfsmp->hfs_extents_vp);
565 retval = MacToVFSError(BTOpenPath(VTOF(hfsmp->hfs_extents_vp), (KeyCompareProcPtr) CompareExtentKeysPlus));
566
567 hfs_unlock(hfsmp->hfs_extents_cp);
568
569 if (retval)
570 {
571 LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: BTOpenPath returned (%d) getting extentoverflow BT\n", retval);
572 goto ErrorExit;
573 }
574
575 /*
576 * Set up Catalog B-tree vnode
577 */
578 cndesc.cd_nameptr = hfs_catname;
579 cndesc.cd_namelen = strlen((char *)hfs_catname);
580 cndesc.cd_cnid = cnattr.ca_fileid = kHFSCatalogFileID;
581
582 cfork.cf_size = SWAP_BE64 (vhp->catalogFile.logicalSize);
583 cfork.cf_clump = SWAP_BE32 (vhp->catalogFile.clumpSize);
584 cfork.cf_blocks = SWAP_BE32 (vhp->catalogFile.totalBlocks);
585 cfork.cf_vblocks = 0;
586 cnattr.ca_blocks = cfork.cf_blocks;
587
588 for (int iExtentCounter = 0; iExtentCounter < kHFSPlusExtentDensity; iExtentCounter++)
589 {
590 cfork.cf_extents[iExtentCounter].startBlock = SWAP_BE32 (vhp->catalogFile.extents[iExtentCounter].startBlock);
591 cfork.cf_extents[iExtentCounter].blockCount = SWAP_BE32 (vhp->catalogFile.extents[iExtentCounter].blockCount);
592 }
593
594 retval = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, &cfork, &hfsmp->hfs_catalog_vp, &newvnode_flags);
595 if (retval)
596 {
597 LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: hfs_getnewvnode returned (%d) getting catalog BT\n", retval);
598 goto ErrorExit;
599 }
600 hfsmp->hfs_catalog_cp = VTOC(hfsmp->hfs_catalog_vp);
601 retval = MacToVFSError(BTOpenPath(VTOF(hfsmp->hfs_catalog_vp), (KeyCompareProcPtr) CompareExtendedCatalogKeys));
602
603 if (retval)
604 {
605 LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: BTOpenPath returned (%d) getting catalog BT\n", retval);
606 hfs_unlock(hfsmp->hfs_catalog_cp);
607 goto ErrorExit;
608 }
609
610 if ((hfsmp->hfs_flags & HFS_X) &&
611 BTGetInformation(VTOF(hfsmp->hfs_catalog_vp), 0, &btinfo) == 0)
612 {
613 if (btinfo.keyCompareType == kHFSBinaryCompare)
614 {
615 hfsmp->hfs_flags |= HFS_CASE_SENSITIVE;
616 /* Install a case-sensitive key compare */
617 (void) BTOpenPath(VTOF(hfsmp->hfs_catalog_vp), (KeyCompareProcPtr)cat_binarykeycompare);
618 }
619 }
620
621 hfs_unlock(hfsmp->hfs_catalog_cp);
622
623 /*
624 * Set up Allocation file vnode
625 */
626 cndesc.cd_nameptr = hfs_vbmname;
627 cndesc.cd_namelen = strlen((char *)hfs_vbmname);
628 cndesc.cd_cnid = cnattr.ca_fileid = kHFSAllocationFileID;
629
630 cfork.cf_size = SWAP_BE64 (vhp->allocationFile.logicalSize);
631 cfork.cf_clump = SWAP_BE32 (vhp->allocationFile.clumpSize);
632 cfork.cf_blocks = SWAP_BE32 (vhp->allocationFile.totalBlocks);
633 cfork.cf_vblocks = 0;
634 cnattr.ca_blocks = cfork.cf_blocks;
635
636 for (int iExtentCounter = 0; iExtentCounter < kHFSPlusExtentDensity; iExtentCounter++)
637 {
638 cfork.cf_extents[iExtentCounter].startBlock = SWAP_BE32 (vhp->allocationFile.extents[iExtentCounter].startBlock);
639 cfork.cf_extents[iExtentCounter].blockCount = SWAP_BE32 (vhp->allocationFile.extents[iExtentCounter].blockCount);
640 }
641
642 retval = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, &cfork, &hfsmp->hfs_allocation_vp, &newvnode_flags);
643 if (retval)
644 {
645 LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: hfs_getnewvnode returned (%d) getting bitmap\n", retval);
646 goto ErrorExit;
647 }
648 hfsmp->hfs_allocation_cp = VTOC(hfsmp->hfs_allocation_vp);
649 hfs_unlock(hfsmp->hfs_allocation_cp);
650
651 /*
652 * Set up Attribute B-tree vnode
653 */
654 if (vhp->attributesFile.totalBlocks != 0) {
655 cndesc.cd_nameptr = hfs_attrname;
656 cndesc.cd_namelen = strlen((char *)hfs_attrname);
657 cndesc.cd_cnid = cnattr.ca_fileid = kHFSAttributesFileID;
658
659 cfork.cf_size = SWAP_BE64 (vhp->attributesFile.logicalSize);
660 cfork.cf_clump = SWAP_BE32 (vhp->attributesFile.clumpSize);
661 cfork.cf_blocks = SWAP_BE32 (vhp->attributesFile.totalBlocks);
662 cfork.cf_vblocks = 0;
663 cnattr.ca_blocks = cfork.cf_blocks;
664
665 for (int iExtentCounter = 0; iExtentCounter < kHFSPlusExtentDensity; iExtentCounter++)
666 {
667 cfork.cf_extents[iExtentCounter].startBlock = SWAP_BE32 (vhp->attributesFile.extents[iExtentCounter].startBlock);
668 cfork.cf_extents[iExtentCounter].blockCount = SWAP_BE32 (vhp->attributesFile.extents[iExtentCounter].blockCount);
669 }
670 retval = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, &cfork, &hfsmp->hfs_attribute_vp, &newvnode_flags);
671 if (retval)
672 {
673 LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: hfs_getnewvnode returned (%d) getting EA BT\n", retval);
674 goto ErrorExit;
675 }
676 hfsmp->hfs_attribute_cp = VTOC(hfsmp->hfs_attribute_vp);
677
678 retval = MacToVFSError(BTOpenPath(VTOF(hfsmp->hfs_attribute_vp),(KeyCompareProcPtr) hfs_attrkeycompare));
679 hfs_unlock(hfsmp->hfs_attribute_cp);
680 if (retval)
681 {
682 LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: BTOpenPath returned (%d) getting EA BT\n", retval);
683 goto ErrorExit;
684 }
685
686 /* Initialize vnode for virtual attribute data file that spans the
687 * entire file system space for performing I/O to attribute btree
688 * We hold iocount on the attrdata vnode for the entire duration
689 * of mount (similar to btree vnodes)
690 */
691 retval = init_attrdata_vnode(hfsmp);
692 if (retval)
693 {
694 LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: init_attrdata_vnode returned (%d) for virtual EA file\n", retval);
695 goto ErrorExit;
696 }
697 }
698
699 /*
700 * Set up Startup file vnode
701 */
702 if (vhp->startupFile.totalBlocks != 0) {
703 cndesc.cd_nameptr = hfs_startupname;
704 cndesc.cd_namelen = strlen((char *)hfs_startupname);
705 cndesc.cd_cnid = cnattr.ca_fileid = kHFSStartupFileID;
706
707 cfork.cf_size = SWAP_BE64 (vhp->startupFile.logicalSize);
708 cfork.cf_clump = SWAP_BE32 (vhp->startupFile.clumpSize);
709 cfork.cf_blocks = SWAP_BE32 (vhp->startupFile.totalBlocks);
710 cfork.cf_vblocks = 0;
711 cnattr.ca_blocks = cfork.cf_blocks;
712 for (int iExtentCounter = 0; iExtentCounter < kHFSPlusExtentDensity; iExtentCounter++)
713 {
714 cfork.cf_extents[iExtentCounter].startBlock = SWAP_BE32 (vhp->startupFile.extents[iExtentCounter].startBlock);
715 cfork.cf_extents[iExtentCounter].blockCount = SWAP_BE32 (vhp->startupFile.extents[iExtentCounter].blockCount);
716 }
717
718 retval = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, &cfork, &hfsmp->hfs_startup_vp, &newvnode_flags);
719 if (retval)
720 {
721 LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: hfs_getnewvnode returned (%d) getting startup file\n", retval);
722 goto ErrorExit;
723 }
724 hfsmp->hfs_startup_cp = VTOC(hfsmp->hfs_startup_vp);
725 hfs_unlock(hfsmp->hfs_startup_cp);
726 }
727
728 /*
729 * Pick up volume name and create date
730 *
731 * Acquiring the volume name should not manipulate the bitmap, only the catalog
732 * btree and possibly the extents overflow b-tree.
733 */
734 retval = cat_idlookup(hfsmp, kHFSRootFolderID, 0, 0, &cndesc, &cnattr, NULL);
735 if (retval)
736 {
737 LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: cat_idlookup returned (%d) getting rootfolder \n", retval);
738 goto ErrorExit;
739 }
740 vcb->hfs_itime = cnattr.ca_itime;
741 vcb->volumeNameEncodingHint = cndesc.cd_encoding;
742 bcopy(cndesc.cd_nameptr, vcb->vcbVN, min(255, cndesc.cd_namelen));
743 cat_releasedesc(&cndesc);
744
745 return (0);
746
747 ErrorExit:
748 /*
749 * A fatal error occurred and the volume cannot be mounted, so
750 * release any resources that we acquired...
751 */
752 hfsUnmount(hfsmp);
753
754 LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: encountered error (%d)\n", retval);
755
756 return (retval);
757 }
758
759 u_int32_t BestBlockSizeFit(u_int32_t allocationBlockSize, u_int32_t blockSizeLimit, u_int32_t baseMultiple) {
760 /*
761 Compute the optimal (largest) block size (no larger than allocationBlockSize) that is less than the
762 specified limit but still an even multiple of the baseMultiple.
763 */
764 int baseBlockCount, blockCount;
765 u_int32_t trialBlockSize;
766
767 if (allocationBlockSize % baseMultiple != 0) {
768 /*
769 Whoops: the allocation blocks aren't even multiples of the specified base:
770 no amount of dividing them into even parts will be a multiple, either then!
771 */
772 return 512; /* Hope for the best */
773 };
774
775 /* Try the obvious winner first, to prevent 12K allocation blocks, for instance,
776 from being handled as two 6K logical blocks instead of 3 4K logical blocks.
777 Even though the former (the result of the loop below) is the larger allocation
778 block size, the latter is more efficient: */
779 if (allocationBlockSize % PAGE_SIZE == 0) return (u_int32_t)PAGE_SIZE;
780
781 /* No clear winner exists: pick the largest even fraction <= MAXBSIZE: */
782 baseBlockCount = allocationBlockSize / baseMultiple; /* Now guaranteed to be an even multiple */
783
784 for (blockCount = baseBlockCount; blockCount > 0; --blockCount) {
785 trialBlockSize = blockCount * baseMultiple;
786 if (allocationBlockSize % trialBlockSize == 0) { /* An even multiple? */
787 if ((trialBlockSize <= blockSizeLimit) &&
788 (trialBlockSize % baseMultiple == 0)) {
789 return trialBlockSize;
790 };
791 };
792 };
793
794 /* Note: we should never get here, since blockCount = 1 should always work,
795 but this is nice and safe and makes the compiler happy, too ... */
796 return 512;
797 }
798
799 /*
800 * Lock the HFS global journal lock
801 */
802 int
803 hfs_lock_global (struct hfsmount *hfsmp, enum hfs_locktype locktype)
804 {
805 pthread_t thread = pthread_self();
806
807 if (hfsmp->hfs_global_lockowner == thread) {
808 LFHFS_LOG(LEVEL_ERROR, "hfs_lock_global: locking against myself!");
809 hfs_assert(0);
810 }
811
812 if (locktype == HFS_SHARED_LOCK) {
813 lf_lck_rw_lock_shared (&hfsmp->hfs_global_lock);
814 hfsmp->hfs_global_lockowner = HFS_SHARED_OWNER;
815 }
816 else {
817 lf_lck_rw_lock_exclusive (&hfsmp->hfs_global_lock);
818 hfsmp->hfs_global_lockowner = thread;
819 }
820
821 return 0;
822 }
823
824 /*
825 * Unlock the HFS global journal lock
826 */
827 void
828 hfs_unlock_global (struct hfsmount *hfsmp)
829 {
830 pthread_t thread = pthread_self();
831
832 /* HFS_LOCK_EXCLUSIVE */
833 if (hfsmp->hfs_global_lockowner == thread) {
834 hfsmp->hfs_global_lockowner = NULL;
835 lf_lck_rw_unlock_exclusive(&hfsmp->hfs_global_lock);
836 }
837 /* HFS_LOCK_SHARED */
838 else {
839 lf_lck_rw_unlock_shared(&hfsmp->hfs_global_lock);
840 }
841 }
842
843 int
844 hfs_start_transaction(struct hfsmount *hfsmp)
845 {
846 int ret = 0, unlock_on_err = 0;
847 pthread_t thread = pthread_self();
848
849 #ifdef HFS_CHECK_LOCK_ORDER
850 /*
851 * You cannot start a transaction while holding a system
852 * file lock. (unless the transaction is nested.)
853 */
854 if (hfsmp->jnl && journal_owner(hfsmp->jnl) != thread) {
855 if (hfsmp->hfs_catalog_cp && hfsmp->hfs_catalog_cp->c_lockowner == thread) {
856 LFHFS_LOG(LEVEL_ERROR, "hfs_start_transaction: bad lock order (cat before jnl)\n");
857 hfs_assert(0);
858 }
859 if (hfsmp->hfs_attribute_cp && hfsmp->hfs_attribute_cp->c_lockowner == thread) {
860 LFHFS_LOG(LEVEL_ERROR, "hfs_start_transaction: bad lock order (attr before jnl)\n");
861 hfs_assert(0);
862 }
863 if (hfsmp->hfs_extents_cp && hfsmp->hfs_extents_cp->c_lockowner == thread) {
864 LFHFS_LOG(LEVEL_ERROR, "hfs_start_transaction: bad lock order (ext before jnl)\n");
865 hfs_assert(0);
866 }
867 }
868 #endif /* HFS_CHECK_LOCK_ORDER */
869
870 again:
871
872 if (hfsmp->jnl) {
873 if (journal_owner(hfsmp->jnl) != thread)
874 {
875 /*
876 * The global lock should be held shared if journal is
877 * active to prevent disabling. If we're not the owner
878 * of the journal lock, verify that we're not already
879 * holding the global lock exclusive before moving on.
880 */
881 if (hfsmp->hfs_global_lockowner == thread) {
882 ret = EBUSY;
883 goto out;
884 }
885
886 hfs_lock_global (hfsmp, HFS_SHARED_LOCK);
887
888 // Things could have changed
889 if (!hfsmp->jnl) {
890 hfs_unlock_global(hfsmp);
891 goto again;
892 }
893 unlock_on_err = 1;
894 }
895 }
896 else
897 {
898 // No journal
899 if (hfsmp->hfs_global_lockowner != thread) {
900 hfs_lock_global(hfsmp, HFS_EXCLUSIVE_LOCK);
901
902 // Things could have changed
903 if (hfsmp->jnl) {
904 hfs_unlock_global(hfsmp);
905 goto again;
906 }
907
908 ExtendedVCB * vcb = HFSTOVCB(hfsmp);
909 if (vcb->vcbAtrb & kHFSVolumeUnmountedMask) {
910 // clear kHFSVolumeUnmountedMask
911 hfs_flushvolumeheader(hfsmp, HFS_FVH_SKIP_TRANSACTION);
912 }
913 unlock_on_err = 1;
914 }
915 }
916
917 if (hfsmp->jnl)
918 {
919 ret = journal_start_transaction(hfsmp->jnl);
920 }
921 else
922 {
923 ret = 0;
924 }
925
926 if (ret == 0)
927 ++hfsmp->hfs_transaction_nesting;
928
929 goto out;
930
931 out:
932 if (ret != 0 && unlock_on_err) {
933 hfs_unlock_global (hfsmp);
934 }
935
936 return ret;
937 }
938
939 int
940 hfs_end_transaction(struct hfsmount *hfsmp)
941 {
942 int ret;
943
944 hfs_assert(!hfsmp->jnl || journal_owner(hfsmp->jnl) == pthread_self());
945 hfs_assert(hfsmp->hfs_transaction_nesting > 0);
946
947 if (hfsmp->jnl && hfsmp->hfs_transaction_nesting == 1)
948 hfs_flushvolumeheader(hfsmp, HFS_FVH_FLUSH_IF_DIRTY);
949
950 bool need_unlock = !--hfsmp->hfs_transaction_nesting;
951
952 if (hfsmp->jnl)
953 {
954 ret = journal_end_transaction(hfsmp->jnl);
955 }
956 else
957 {
958 ret = 0;
959 }
960
961 if (need_unlock) {
962 hfs_unlock_global (hfsmp);
963 }
964
965 return ret;
966 }
967
968
969 /*
970 * Flush the contents of the journal to the disk.
971 *
972 * - HFS_FLUSH_JOURNAL
973 * Wait to write in-memory journal to the disk consistently.
974 * This means that the journal still contains uncommitted
975 * transactions and the file system metadata blocks in
976 * the journal transactions might be written asynchronously
977 * to the disk. But there is no guarantee that they are
978 * written to the disk before returning to the caller.
979 * Note that this option is sufficient for file system
980 * data integrity as it guarantees consistent journal
981 * content on the disk.
982 *
983 * - HFS_FLUSH_JOURNAL_META
984 * Wait to write in-memory journal to the disk
985 * consistently, and also wait to write all asynchronous
986 * metadata blocks to its corresponding locations
987 * consistently on the disk. This is overkill in normal
988 * scenarios but is useful whenever the metadata blocks
989 * are required to be consistent on-disk instead of
990 * just the journalbeing consistent; like before live
991 * verification and live volume resizing. The update of the
992 * metadata doesn't include a barrier of track cache flush.
993 *
994 * - HFS_FLUSH_FULL
995 * HFS_FLUSH_JOURNAL + force a track cache flush to media
996 *
997 * - HFS_FLUSH_CACHE
998 * Force a track cache flush to media.
999 *
1000 * - HFS_FLUSH_BARRIER
1001 * Barrier-only flush to ensure write order
1002 *
1003 */
1004 errno_t hfs_flush(struct hfsmount *hfsmp, hfs_flush_mode_t mode) {
1005 errno_t error = 0;
1006 int options = 0;
1007 dk_synchronize_t sync_req = { .options = DK_SYNCHRONIZE_OPTION_BARRIER };
1008
1009 switch (mode) {
1010 case HFS_FLUSH_JOURNAL_META:
1011 // wait for journal, metadata blocks and previous async flush to finish
1012 SET(options, JOURNAL_WAIT_FOR_IO);
1013
1014 // no break
1015
1016 case HFS_FLUSH_JOURNAL:
1017 case HFS_FLUSH_JOURNAL_BARRIER:
1018 case HFS_FLUSH_FULL:
1019
1020 if (mode == HFS_FLUSH_JOURNAL_BARRIER &&
1021 !(hfsmp->hfs_flags & HFS_FEATURE_BARRIER))
1022 mode = HFS_FLUSH_FULL;
1023
1024 if (mode == HFS_FLUSH_FULL)
1025 SET(options, JOURNAL_FLUSH_FULL);
1026
1027 /* Only peek at hfsmp->jnl while holding the global lock */
1028 hfs_lock_global (hfsmp, HFS_SHARED_LOCK);
1029
1030 if (hfsmp->jnl) {
1031 ExtendedVCB * vcb = HFSTOVCB(hfsmp);
1032 if (!(vcb->vcbAtrb & kHFSVolumeUnmountedMask)) {
1033 // Set kHFSVolumeUnmountedMask
1034 hfs_flushvolumeheader(hfsmp, HFS_FVH_MARK_UNMOUNT);
1035 }
1036 error = journal_flush(hfsmp->jnl, options);
1037 }
1038
1039 hfs_unlock_global (hfsmp);
1040
1041 /*
1042 * This may result in a double barrier as
1043 * journal_flush may have issued a barrier itself
1044 */
1045 if (mode == HFS_FLUSH_JOURNAL_BARRIER)
1046 error = ioctl(hfsmp->hfs_devvp->psFSRecord->iFD, DKIOCSYNCHRONIZE, (caddr_t)&sync_req);
1047 break;
1048
1049 case HFS_FLUSH_CACHE:
1050 // Do a full sync
1051 sync_req.options = 0;
1052
1053 // no break
1054
1055 case HFS_FLUSH_BARRIER:
1056 // If barrier only flush doesn't support, fall back to use full flush.
1057 if (!(hfsmp->hfs_flags & HFS_FEATURE_BARRIER))
1058 sync_req.options = 0;
1059
1060 error = ioctl(hfsmp->hfs_devvp->psFSRecord->iFD, DKIOCSYNCHRONIZE, (caddr_t)&sync_req);
1061 break;
1062
1063 default:
1064 error = EINVAL;
1065 }
1066
1067 return error;
1068 }
1069
1070
1071 #define MALLOC_TRACER 0
1072
1073 #if MALLOC_TRACER
1074 #define MALLOC_TRACER_SIZE 100000
1075 typedef struct {
1076 void *pv;
1077 size_t uSize;
1078 } MallocTracer_S;
1079 MallocTracer_S gpsMallocTracer[MALLOC_TRACER_SIZE];
1080 MallocTracer_S gpsFreeTracer[MALLOC_TRACER_SIZE];
1081 uint32_t guIndex = 0, guOutdex = 0, guSize=0, guTotal = 0;
1082 uint64_t guTotalConsumption = 0;
1083 #endif
1084
1085 void*
1086 hfs_malloc(size_t size)
1087 {
1088 if (!size) {
1089 panic("Malloc size is 0");
1090 }
1091 void *pv = malloc(size);
1092
1093 #if MALLOC_TRACER
1094 gpsMallocTracer[guIndex].pv = pv;
1095 gpsMallocTracer[guIndex].uSize = (uint32_t)size;
1096 guIndex = (guIndex+1) % MALLOC_TRACER_SIZE;
1097 guTotal++;
1098 guSize++;
1099 guTotalConsumption += size;
1100 #endif
1101 return pv;
1102 }
1103
1104 void
1105 hfs_free(void *ptr)
1106 {
1107 if (!ptr)
1108 return;
1109
1110 free(ptr);
1111
1112 #if MALLOC_TRACER
1113 gpsFreeTracer[guOutdex].pv = ptr;
1114 bool bCont = true;
1115 uint32_t u=guIndex;
1116 do {
1117 u = (u)?(u-1):(MALLOC_TRACER_SIZE-1);
1118 if (gpsMallocTracer[u].pv == ptr) {
1119 break;
1120 }
1121 bCont = (guTotal<MALLOC_TRACER_SIZE)?(u):(u != guIndex);
1122 } while( bCont );
1123
1124 if (!bCont) {
1125 panic("undetectable free");
1126 assert(0);
1127 }
1128 //gpsFreeTracer[guOutdex].uSize = gpsMallocTracer[u].uSize;
1129 //gpsFreeTracer[guOutdex].uSize = guSize;
1130 gpsFreeTracer[guOutdex].uSize = guIndex;
1131
1132 guOutdex = (guOutdex+1) % MALLOC_TRACER_SIZE;
1133 guSize--;
1134 guTotalConsumption -= gpsMallocTracer[u].uSize;
1135 #endif
1136 }
1137
1138 void*
1139 hfs_mallocz(size_t size)
1140 {
1141 void *ptr = hfs_malloc(size);
1142 if ( ptr == NULL )
1143 return ptr;
1144 bzero(ptr, size);
1145 return ptr;
1146 }
1147
1148 /*
1149 * Lock the HFS mount lock
1150 *
1151 * Note: this is a mutex, not a rw lock!
1152 */
1153 void
1154 hfs_lock_mount (struct hfsmount *hfsmp)
1155 {
1156 lf_lck_mtx_lock (&(hfsmp->hfs_mutex));
1157 }
1158
1159 /*
1160 * Unlock the HFS mount lock
1161 *
1162 * Note: this is a mutex, not a rw lock!
1163 */
1164 void hfs_unlock_mount (struct hfsmount *hfsmp)
1165 {
1166 lf_lck_mtx_unlock (&(hfsmp->hfs_mutex));
1167 }
1168
1169 /*
1170 * ReleaseMetaFileVNode
1171 *
1172 * vp L - -
1173 */
1174 static void ReleaseMetaFileVNode(struct vnode *vp)
1175 {
1176 struct filefork *fp;
1177
1178 if (vp && (fp = VTOF(vp)))
1179 {
1180 if (fp->fcbBTCBPtr != NULL)
1181 {
1182 (void)hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
1183 (void) BTClosePath(fp);
1184 hfs_unlock(VTOC(vp));
1185 }
1186
1187 /* release the node even if BTClosePath fails */
1188 hfs_vnop_reclaim(vp);
1189 }
1190 }
1191
1192 /*************************************************************
1193 *
1194 * Unmounts a hfs volume.
1195 * At this point vflush() has been called (to dump all non-metadata files)
1196 *
1197 *************************************************************/
1198
1199 int
1200 hfsUnmount( register struct hfsmount *hfsmp)
1201 {
1202
1203 /* Get rid of our attribute data vnode (if any). This is done
1204 * after the vflush() during mount, so we don't need to worry
1205 * about any locks.
1206 */
1207 if (hfsmp->hfs_attrdata_vp) {
1208 ReleaseMetaFileVNode(hfsmp->hfs_attrdata_vp);
1209 hfsmp->hfs_attrdata_vp = NULL;
1210 }
1211
1212 if (hfsmp->hfs_startup_vp) {
1213 ReleaseMetaFileVNode(hfsmp->hfs_startup_vp);
1214 hfsmp->hfs_startup_cp = NULL;
1215 hfsmp->hfs_startup_vp = NULL;
1216 }
1217
1218 if (hfsmp->hfs_attribute_vp) {
1219 ReleaseMetaFileVNode(hfsmp->hfs_attribute_vp);
1220 hfsmp->hfs_attribute_cp = NULL;
1221 hfsmp->hfs_attribute_vp = NULL;
1222 }
1223
1224 if (hfsmp->hfs_catalog_vp) {
1225 ReleaseMetaFileVNode(hfsmp->hfs_catalog_vp);
1226 hfsmp->hfs_catalog_cp = NULL;
1227 hfsmp->hfs_catalog_vp = NULL;
1228 }
1229
1230 if (hfsmp->hfs_extents_vp) {
1231 ReleaseMetaFileVNode(hfsmp->hfs_extents_vp);
1232 hfsmp->hfs_extents_cp = NULL;
1233 hfsmp->hfs_extents_vp = NULL;
1234 }
1235
1236 if (hfsmp->hfs_allocation_vp) {
1237 ReleaseMetaFileVNode(hfsmp->hfs_allocation_vp);
1238 hfsmp->hfs_allocation_cp = NULL;
1239 hfsmp->hfs_allocation_vp = NULL;
1240 }
1241 return (0);
1242 }
1243
1244 /*
1245 * RequireFileLock
1246 *
1247 * Check to see if a vnode is locked in the current context
1248 * This is to be used for debugging purposes only!!
1249 */
1250 void RequireFileLock(FileReference vp, int shareable)
1251 {
1252 int locked;
1253
1254 /* The extents btree and allocation bitmap are always exclusive. */
1255 if (VTOC(vp)->c_fileid == kHFSExtentsFileID ||
1256 VTOC(vp)->c_fileid == kHFSAllocationFileID) {
1257 shareable = 0;
1258 }
1259
1260 locked = VTOC(vp)->c_lockowner == pthread_self();
1261
1262 if (!locked && !shareable)
1263 {
1264 switch (VTOC(vp)->c_fileid) {
1265 case kHFSExtentsFileID:
1266 LFHFS_LOG(LEVEL_ERROR, "RequireFileLock: extents btree not locked! v: 0x%08X\n #\n", (u_int)vp);
1267 break;
1268 case kHFSCatalogFileID:
1269 LFHFS_LOG(LEVEL_ERROR, "RequireFileLock: catalog btree not locked! v: 0x%08X\n #\n", (u_int)vp);
1270 break;
1271 case kHFSAllocationFileID:
1272 /* The allocation file can hide behind the jornal lock. */
1273 if (VTOHFS(vp)->jnl == NULL)
1274 {
1275 LFHFS_LOG(LEVEL_ERROR, "RequireFileLock: allocation file not locked! v: 0x%08X\n #\n", (u_int)vp);
1276 }
1277 return;
1278 case kHFSStartupFileID:
1279 LFHFS_LOG(LEVEL_ERROR, "RequireFileLock: startup file not locked! v: 0x%08X\n #\n", (u_int)vp);
1280 break;
1281 case kHFSAttributesFileID:
1282 LFHFS_LOG(LEVEL_ERROR, "RequireFileLock: attributes btree not locked! v: 0x%08X\n #\n", (u_int)vp);
1283 break;
1284 default:
1285 return;
1286 }
1287 hfs_assert(0);
1288 }
1289 }
1290
1291 /*
1292 * Test if fork has overflow extents.
1293 *
1294 * Returns:
1295 * non-zero - overflow extents exist
1296 * zero - overflow extents do not exist
1297 */
1298 bool overflow_extents(struct filefork *fp)
1299 {
1300 u_int32_t blocks;
1301
1302 if (fp->ff_extents[7].blockCount == 0)
1303 return false;
1304
1305 blocks = fp->ff_extents[0].blockCount +
1306 fp->ff_extents[1].blockCount +
1307 fp->ff_extents[2].blockCount +
1308 fp->ff_extents[3].blockCount +
1309 fp->ff_extents[4].blockCount +
1310 fp->ff_extents[5].blockCount +
1311 fp->ff_extents[6].blockCount +
1312 fp->ff_extents[7].blockCount;
1313
1314 return fp->ff_blocks > blocks;
1315 }
1316
1317
1318 /*
1319 * Lock HFS system file(s).
1320 *
1321 * This function accepts a @flags parameter which indicates which
1322 * system file locks are required. The value it returns should be
1323 * used in a subsequent call to hfs_systemfile_unlock. The caller
1324 * should treat this value as opaque; it may or may not have a
1325 * relation to the @flags field that is passed in. The *only*
1326 * guarantee that we make is that a value of zero means that no locks
1327 * were taken and that there is no need to call hfs_systemfile_unlock
1328 * (although it is harmless to do so). Recursion is supported but
1329 * care must still be taken to ensure correct lock ordering. Note
1330 * that requests for certain locks may cause other locks to also be
1331 * taken, including locks that are not possible to ask for via the
1332 * @flags parameter.
1333 */
1334 int
1335 hfs_systemfile_lock(struct hfsmount *hfsmp, int flags, enum hfs_locktype locktype)
1336 {
1337 pthread_t thread = pthread_self();
1338
1339 /*
1340 * Locking order is Catalog file, Attributes file, Startup file, Bitmap file, Extents file
1341 */
1342 if (flags & SFL_CATALOG) {
1343 if (hfsmp->hfs_catalog_cp
1344 && hfsmp->hfs_catalog_cp->c_lockowner != thread) {
1345 #ifdef HFS_CHECK_LOCK_ORDER
1346 if (hfsmp->hfs_attribute_cp && hfsmp->hfs_attribute_cp->c_lockowner == current_thread()) {
1347 LFHFS_LOG(LEVEL_ERROR, "hfs_systemfile_lock: bad lock order (Attributes before Catalog)");
1348 hfs_assert(0);
1349 }
1350 if (hfsmp->hfs_startup_cp && hfsmp->hfs_startup_cp->c_lockowner == current_thread()) {
1351 LFHFS_LOG(LEVEL_ERROR, "hfs_systemfile_lock: bad lock order (Startup before Catalog)");
1352 hfs_assert(0);
1353 }
1354 if (hfsmp-> hfs_extents_cp && hfsmp->hfs_extents_cp->c_lockowner == current_thread()) {
1355 LFHFS_LOG(LEVEL_ERROR, "hfs_systemfile_lock: bad lock order (Extents before Catalog)");
1356 hfs_assert(0);
1357 }
1358 #endif /* HFS_CHECK_LOCK_ORDER */
1359
1360 (void) hfs_lock(hfsmp->hfs_catalog_cp, locktype, HFS_LOCK_DEFAULT);
1361 /*
1362 * When the catalog file has overflow extents then
1363 * also acquire the extents b-tree lock if its not
1364 * already requested.
1365 */
1366 if (((flags & SFL_EXTENTS) == 0) &&
1367 (hfsmp->hfs_catalog_vp != NULL) &&
1368 (overflow_extents(VTOF(hfsmp->hfs_catalog_vp)))) {
1369 flags |= SFL_EXTENTS;
1370 }
1371 } else {
1372 flags &= ~SFL_CATALOG;
1373 }
1374 }
1375
1376 if (flags & SFL_ATTRIBUTE) {
1377 if (hfsmp->hfs_attribute_cp
1378 && hfsmp->hfs_attribute_cp->c_lockowner != thread) {
1379 #ifdef HFS_CHECK_LOCK_ORDER
1380 if (hfsmp->hfs_startup_cp && hfsmp->hfs_startup_cp->c_lockowner == current_thread()) {
1381 LFHFS_LOG(LEVEL_ERROR, "hfs_systemfile_lock: bad lock order (Startup before Attributes)");
1382 hfs_assert(0);
1383 }
1384 if (hfsmp->hfs_extents_cp && hfsmp->hfs_extents_cp->c_lockowner == current_thread()) {
1385 LFHFS_LOG(LEVEL_ERROR, "hfs_systemfile_lock: bad lock order (Extents before Attributes)");
1386 hfs_assert(0);
1387 }
1388 #endif /* HFS_CHECK_LOCK_ORDER */
1389
1390 (void) hfs_lock(hfsmp->hfs_attribute_cp, locktype, HFS_LOCK_DEFAULT);
1391 /*
1392 * When the attribute file has overflow extents then
1393 * also acquire the extents b-tree lock if its not
1394 * already requested.
1395 */
1396 if (((flags & SFL_EXTENTS) == 0) &&
1397 (hfsmp->hfs_attribute_vp != NULL) &&
1398 (overflow_extents(VTOF(hfsmp->hfs_attribute_vp)))) {
1399 flags |= SFL_EXTENTS;
1400 }
1401 } else {
1402 flags &= ~SFL_ATTRIBUTE;
1403 }
1404 }
1405
1406 if (flags & SFL_STARTUP) {
1407 if (hfsmp->hfs_startup_cp
1408 && hfsmp->hfs_startup_cp->c_lockowner != thread) {
1409 #ifdef HFS_CHECK_LOCK_ORDER
1410 if (hfsmp-> hfs_extents_cp && hfsmp->hfs_extents_cp->c_lockowner == current_thread()) {
1411 LFHFS_LOG(LEVEL_ERROR, "hfs_systemfile_lock: bad lock order (Extents before Startup)");
1412 hfs_assert(0);
1413 }
1414 #endif /* HFS_CHECK_LOCK_ORDER */
1415
1416 (void) hfs_lock(hfsmp->hfs_startup_cp, locktype, HFS_LOCK_DEFAULT);
1417 /*
1418 * When the startup file has overflow extents then
1419 * also acquire the extents b-tree lock if its not
1420 * already requested.
1421 */
1422 if (((flags & SFL_EXTENTS) == 0) &&
1423 (hfsmp->hfs_startup_vp != NULL) &&
1424 (overflow_extents(VTOF(hfsmp->hfs_startup_vp)))) {
1425 flags |= SFL_EXTENTS;
1426 }
1427 } else {
1428 flags &= ~SFL_STARTUP;
1429 }
1430 }
1431
1432 /*
1433 * To prevent locks being taken in the wrong order, the extent lock
1434 * gets a bitmap lock as well.
1435 */
1436 if (flags & (SFL_BITMAP | SFL_EXTENTS)) {
1437 if (hfsmp->hfs_allocation_cp) {
1438 (void) hfs_lock(hfsmp->hfs_allocation_cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
1439 /*
1440 * The bitmap lock is also grabbed when only extent lock
1441 * was requested. Set the bitmap lock bit in the lock
1442 * flags which callers will use during unlock.
1443 */
1444 flags |= SFL_BITMAP;
1445
1446 } else {
1447 flags &= ~SFL_BITMAP;
1448 }
1449 }
1450
1451 if (flags & SFL_EXTENTS) {
1452 /*
1453 * Since the extents btree lock is recursive we always
1454 * need exclusive access.
1455 */
1456 if (hfsmp->hfs_extents_cp) {
1457 (void) hfs_lock(hfsmp->hfs_extents_cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
1458 } else {
1459 flags &= ~SFL_EXTENTS;
1460 }
1461 }
1462
1463 return (flags);
1464 }
1465
1466 /*
1467 * unlock HFS system file(s).
1468 */
1469 void
1470 hfs_systemfile_unlock(struct hfsmount *hfsmp, int flags)
1471 {
1472 if (!flags)
1473 return;
1474
1475 if (flags & SFL_STARTUP && hfsmp->hfs_startup_cp) {
1476 hfs_unlock(hfsmp->hfs_startup_cp);
1477 }
1478 if (flags & SFL_ATTRIBUTE && hfsmp->hfs_attribute_cp) {
1479 hfs_unlock(hfsmp->hfs_attribute_cp);
1480 }
1481 if (flags & SFL_CATALOG && hfsmp->hfs_catalog_cp) {
1482 hfs_unlock(hfsmp->hfs_catalog_cp);
1483 }
1484 if (flags & SFL_BITMAP && hfsmp->hfs_allocation_cp) {
1485 hfs_unlock(hfsmp->hfs_allocation_cp);
1486 }
1487 if (flags & SFL_EXTENTS && hfsmp->hfs_extents_cp) {
1488 hfs_unlock(hfsmp->hfs_extents_cp);
1489 }
1490 }
1491
1492 u_int32_t
1493 hfs_freeblks(struct hfsmount * hfsmp, int wantreserve)
1494 {
1495 u_int32_t freeblks;
1496 u_int32_t rsrvblks;
1497 u_int32_t loanblks;
1498
1499 /*
1500 * We don't bother taking the mount lock
1501 * to look at these values since the values
1502 * themselves are each updated atomically
1503 * on aligned addresses.
1504 */
1505 freeblks = hfsmp->freeBlocks;
1506 rsrvblks = hfsmp->reserveBlocks;
1507 loanblks = hfsmp->loanedBlocks + hfsmp->lockedBlocks;
1508 if (wantreserve) {
1509 if (freeblks > rsrvblks)
1510 freeblks -= rsrvblks;
1511 else
1512 freeblks = 0;
1513 }
1514 if (freeblks > loanblks)
1515 freeblks -= loanblks;
1516 else
1517 freeblks = 0;
1518
1519 return (freeblks);
1520 }
1521
1522 /*
1523 * Map HFS Common errors (negative) to BSD error codes (positive).
1524 * Positive errors (ie BSD errors) are passed through unchanged.
1525 */
1526 short MacToVFSError(OSErr err)
1527 {
1528 if (err >= 0)
1529 return err;
1530
1531 /* BSD/VFS internal errnos */
1532 switch (err) {
1533 case HFS_ERESERVEDNAME: /* -8 */
1534 return err;
1535 }
1536
1537 switch (err) {
1538 case dskFulErr: /* -34 */
1539 case btNoSpaceAvail: /* -32733 */
1540 return ENOSPC;
1541 case fxOvFlErr: /* -32750 */
1542 return EOVERFLOW;
1543
1544 case btBadNode: /* -32731 */
1545 return EIO;
1546
1547 case memFullErr: /* -108 */
1548 return ENOMEM; /* +12 */
1549
1550 case cmExists: /* -32718 */
1551 case btExists: /* -32734 */
1552 return EEXIST; /* +17 */
1553
1554 case cmNotFound: /* -32719 */
1555 case btNotFound: /* -32735 */
1556 return ENOENT; /* 28 */
1557
1558 case cmNotEmpty: /* -32717 */
1559 return ENOTEMPTY; /* 66 */
1560
1561 case cmFThdDirErr: /* -32714 */
1562 return EISDIR; /* 21 */
1563
1564 case fxRangeErr: /* -32751 */
1565 return ERANGE;
1566
1567 case bdNamErr: /* -37 */
1568 return ENAMETOOLONG; /* 63 */
1569
1570 case paramErr: /* -50 */
1571 case fileBoundsErr: /* -1309 */
1572 return EINVAL; /* +22 */
1573
1574 case fsBTBadNodeSize:
1575 return ENXIO;
1576
1577 default:
1578 return EIO; /* +5 */
1579 }
1580 }
1581
1582 /*
1583 * Find the current thread's directory hint for a given index.
1584 *
1585 * Requires an exclusive lock on directory cnode.
1586 *
1587 * Use detach if the cnode lock must be dropped while the hint is still active.
1588 */
1589 directoryhint_t*
1590 hfs_getdirhint(struct cnode *dcp, int index, int detach)
1591 {
1592
1593 directoryhint_t *hint;
1594 boolean_t need_remove, need_init;
1595 const u_int8_t* name;
1596 struct timeval tv;
1597 microtime(&tv);
1598
1599 /*
1600 * Look for an existing hint first. If not found, create a new one (when
1601 * the list is not full) or recycle the oldest hint. Since new hints are
1602 * always added to the head of the list, the last hint is always the
1603 * oldest.
1604 */
1605 TAILQ_FOREACH(hint, &dcp->c_hintlist, dh_link)
1606 {
1607 if (hint->dh_index == index)
1608 break;
1609 }
1610 if (hint != NULL)
1611 { /* found an existing hint */
1612 need_init = false;
1613 need_remove = true;
1614 }
1615 else
1616 { /* cannot find an existing hint */
1617 need_init = true;
1618 if (dcp->c_dirhintcnt < HFS_MAXDIRHINTS)
1619 { /* we don't need recycling */
1620 /* Create a default directory hint */
1621 hint = hfs_malloc(sizeof(struct directoryhint));
1622 ++dcp->c_dirhintcnt;
1623 need_remove = false;
1624 }
1625 else
1626 {
1627 /* recycle the last (i.e., the oldest) hint */
1628 hint = TAILQ_LAST(&dcp->c_hintlist, hfs_hinthead);
1629 if ((hint->dh_desc.cd_flags & CD_HASBUF) && (name = hint->dh_desc.cd_nameptr))
1630 {
1631 hint->dh_desc.cd_nameptr = NULL;
1632 hint->dh_desc.cd_namelen = 0;
1633 hint->dh_desc.cd_flags &= ~CD_HASBUF;
1634 hfs_free((void*)name);
1635 }
1636 need_remove = true;
1637 }
1638 }
1639
1640 if (need_remove)
1641 TAILQ_REMOVE(&dcp->c_hintlist, hint, dh_link);
1642
1643 if (detach)
1644 --dcp->c_dirhintcnt;
1645 else
1646 TAILQ_INSERT_HEAD(&dcp->c_hintlist, hint, dh_link);
1647
1648 if (need_init)
1649 {
1650 hint->dh_index = index;
1651 hint->dh_desc.cd_flags = 0;
1652 hint->dh_desc.cd_encoding = 0;
1653 hint->dh_desc.cd_namelen = 0;
1654 hint->dh_desc.cd_nameptr = NULL;
1655 hint->dh_desc.cd_parentcnid = dcp->c_fileid;
1656 hint->dh_desc.cd_hint = dcp->c_childhint;
1657 hint->dh_desc.cd_cnid = 0;
1658 }
1659 hint->dh_time = (uint32_t) tv.tv_sec;
1660 return (hint);
1661 }
1662
1663 /*
1664 * Insert a detached directory hint back into the list of dirhints.
1665 *
1666 * Requires an exclusive lock on directory cnode.
1667 */
1668 void
1669 hfs_insertdirhint(struct cnode *dcp, directoryhint_t * hint)
1670 {
1671 directoryhint_t *test;
1672
1673 TAILQ_FOREACH(test, &dcp->c_hintlist, dh_link)
1674 {
1675 if (test == hint)
1676 {
1677 LFHFS_LOG(LEVEL_ERROR, "hfs_insertdirhint: hint %p already on list!", hint);
1678 hfs_assert(0);
1679 }
1680 }
1681
1682 TAILQ_INSERT_HEAD(&dcp->c_hintlist, hint, dh_link);
1683 ++dcp->c_dirhintcnt;
1684 }
1685
1686 /*
1687 * Release a single directory hint.
1688 *
1689 * Requires an exclusive lock on directory cnode.
1690 */
1691 void
1692 hfs_reldirhint(struct cnode *dcp, directoryhint_t * relhint)
1693 {
1694 const u_int8_t * name;
1695 directoryhint_t *hint;
1696
1697 /* Check if item is on list (could be detached) */
1698 TAILQ_FOREACH(hint, &dcp->c_hintlist, dh_link)
1699 {
1700 if (hint == relhint)
1701 {
1702 TAILQ_REMOVE(&dcp->c_hintlist, relhint, dh_link);
1703 --dcp->c_dirhintcnt;
1704 break;
1705 }
1706 }
1707 name = relhint->dh_desc.cd_nameptr;
1708 if ((relhint->dh_desc.cd_flags & CD_HASBUF) && (name != NULL))
1709 {
1710 relhint->dh_desc.cd_nameptr = NULL;
1711 relhint->dh_desc.cd_namelen = 0;
1712 relhint->dh_desc.cd_flags &= ~CD_HASBUF;
1713 hfs_free((void*)name);
1714 }
1715 hfs_free(relhint);
1716 }
1717
1718 /*
1719 * Perform a case-insensitive compare of two UTF-8 filenames.
1720 *
1721 * Returns 0 if the strings match.
1722 */
1723 int
1724 hfs_namecmp(const u_int8_t *str1, size_t len1, const u_int8_t *str2, size_t len2)
1725 {
1726 u_int16_t *ustr1, *ustr2;
1727 size_t ulen1, ulen2;
1728 size_t maxbytes;
1729 int cmp = -1;
1730
1731 if (len1 != len2)
1732 return (cmp);
1733
1734 maxbytes = kHFSPlusMaxFileNameChars << 1;
1735 ustr1 = hfs_malloc(maxbytes << 1);
1736 ustr2 = ustr1 + (maxbytes >> 1);
1737
1738 if (utf8_decodestr(str1, len1, ustr1, &ulen1, maxbytes, ':', UTF_DECOMPOSED | UTF_ESCAPE_ILLEGAL) != 0)
1739 goto out;
1740 if (utf8_decodestr(str2, len2, ustr2, &ulen2, maxbytes, ':', UTF_DECOMPOSED | UTF_ESCAPE_ILLEGAL) != 0)
1741 goto out;
1742
1743 ulen1 = ulen1 / sizeof(UniChar);
1744 ulen2 = ulen2 / sizeof(UniChar);
1745 cmp = FastUnicodeCompare(ustr1, ulen1, ustr2, ulen2);
1746 out:
1747 hfs_free(ustr1);
1748 return (cmp);
1749 }
1750
1751 /*
1752 * Perform a case-insensitive apendix cmp of two UTF-8 filenames.
1753 *
1754 * Returns 0 if the str2 is the same as the end of str1.
1755 */
1756 int
1757 hfs_apendixcmp(const u_int8_t *str1, size_t len1, const u_int8_t *str2, size_t len2)
1758 {
1759 u_int16_t *ustr1, *ustr2, *original_allocation;
1760 size_t ulen1, ulen2;
1761 size_t maxbytes;
1762 int cmp = -1;
1763
1764 maxbytes = kHFSPlusMaxFileNameChars << 1;
1765 ustr1 = hfs_malloc(maxbytes << 1);
1766 ustr2 = ustr1 + (maxbytes >> 1);
1767 original_allocation = ustr1;
1768
1769 if (utf8_decodestr(str1, len1, ustr1, &ulen1, maxbytes, ':', UTF_DECOMPOSED | UTF_ESCAPE_ILLEGAL) != 0)
1770 goto out;
1771 if (utf8_decodestr(str2, len2, ustr2, &ulen2, maxbytes, ':', UTF_DECOMPOSED | UTF_ESCAPE_ILLEGAL) != 0)
1772 goto out;
1773
1774 ulen1 = ulen1 / sizeof(UniChar);
1775 ulen2 = ulen2 / sizeof(UniChar);
1776 ustr1+= ulen1 - ulen2;
1777 cmp = FastUnicodeCompare(ustr1, ulen2, ustr2, ulen2);
1778 out:
1779 hfs_free(original_allocation);
1780 return (cmp);
1781 }
1782
1783 /*
1784 * Perform a case-insensitive strstr of two UTF-8 filenames.
1785 *
1786 * Returns 0 if the str2 in str1 match.
1787 */
1788 int
1789 hfs_strstr(const u_int8_t *str1, size_t len1, const u_int8_t *str2, size_t len2)
1790 {
1791 u_int16_t *ustr1, *ustr2, *original_allocation;
1792 size_t ulen1, ulen2;
1793 size_t maxbytes;
1794 int cmp = 0;
1795
1796 maxbytes = kHFSPlusMaxFileNameChars << 1;
1797 ustr1 = hfs_malloc(maxbytes << 1);
1798 ustr2 = ustr1 + (maxbytes >> 1);
1799 original_allocation = ustr1;
1800 if (utf8_decodestr(str1, len1, ustr1, &ulen1, maxbytes, ':', UTF_DECOMPOSED | UTF_ESCAPE_ILLEGAL) != 0)
1801 {
1802 goto out;
1803 }
1804 if (utf8_decodestr(str2, len2, ustr2, &ulen2, maxbytes, ':', UTF_DECOMPOSED | UTF_ESCAPE_ILLEGAL) != 0)
1805 {
1806 goto out;
1807 }
1808
1809 ulen1 = ulen1 / sizeof(UniChar);
1810 ulen2 = ulen2 / sizeof(UniChar);
1811
1812 do {
1813 if (ulen1-- < ulen2)
1814 {
1815 cmp = 1;
1816 break;
1817 }
1818 } while (FastUnicodeCompare(ustr1++, ulen2, ustr2, ulen2) != 0);
1819
1820 out:
1821 hfs_free(original_allocation);
1822 return cmp;
1823 }
1824
1825 /*
1826 * Release directory hints for given directory
1827 *
1828 * Requires an exclusive lock on directory cnode.
1829 */
1830 void
1831 hfs_reldirhints(struct cnode *dcp, int stale_hints_only)
1832 {
1833 struct timeval tv;
1834 directoryhint_t *hint, *prev;
1835 const u_int8_t * name;
1836
1837 if (stale_hints_only)
1838 microuptime(&tv);
1839
1840 /* searching from the oldest to the newest, so we can stop early when releasing stale hints only */
1841 TAILQ_FOREACH_REVERSE_SAFE(hint, &dcp->c_hintlist, hfs_hinthead, dh_link, prev) {
1842 if (stale_hints_only && (tv.tv_sec - hint->dh_time) < HFS_DIRHINT_TTL)
1843 break; /* stop here if this entry is too new */
1844 name = hint->dh_desc.cd_nameptr;
1845 if ((hint->dh_desc.cd_flags & CD_HASBUF) && (name != NULL)) {
1846 hint->dh_desc.cd_nameptr = NULL;
1847 hint->dh_desc.cd_namelen = 0;
1848 hint->dh_desc.cd_flags &= ~CD_HASBUF;
1849 hfs_free((void *)name);
1850 }
1851 TAILQ_REMOVE(&dcp->c_hintlist, hint, dh_link);
1852 hfs_free(hint);
1853 --dcp->c_dirhintcnt;
1854 }
1855 }
1856
1857 /* hfs_erase_unused_nodes
1858 *
1859 * Check wheter a volume may suffer from unused Catalog B-tree nodes that
1860 * are not zeroed (due to <rdar://problem/6947811>). If so, just write
1861 * zeroes to the unused nodes.
1862 *
1863 * How do we detect when a volume needs this repair? We can't always be
1864 * certain. If a volume was created after a certain date, then it may have
1865 * been created with the faulty newfs_hfs. Since newfs_hfs only created one
1866 * clump, we can assume that if a Catalog B-tree is larger than its clump size,
1867 * that means that the entire first clump must have been written to, which means
1868 * there shouldn't be unused and unwritten nodes in that first clump, and this
1869 * repair is not needed.
1870 *
1871 * We have defined a bit in the Volume Header's attributes to indicate when the
1872 * unused nodes have been repaired. A newer newfs_hfs will set this bit.
1873 * As will fsck_hfs when it repairs the unused nodes.
1874 */
1875 int hfs_erase_unused_nodes(struct hfsmount *hfsmp)
1876 {
1877 int result;
1878 struct filefork *catalog;
1879 int lockflags;
1880
1881 if (hfsmp->vcbAtrb & kHFSUnusedNodeFixMask)
1882 {
1883 /* This volume has already been checked and repaired. */
1884 return 0;
1885 }
1886
1887 if ((hfsmp->localCreateDate < kHFSUnusedNodesFixDate))
1888 {
1889 /* This volume is too old to have had the problem. */
1890 hfsmp->vcbAtrb |= kHFSUnusedNodeFixMask;
1891 return 0;
1892 }
1893
1894 catalog = hfsmp->hfs_catalog_cp->c_datafork;
1895 if (catalog->ff_size > catalog->ff_clumpsize)
1896 {
1897 /* The entire first clump must have been in use at some point. */
1898 hfsmp->vcbAtrb |= kHFSUnusedNodeFixMask;
1899 return 0;
1900 }
1901
1902 /*
1903 * If we get here, we need to zero out those unused nodes.
1904 *
1905 * We start a transaction and lock the catalog since we're going to be
1906 * making on-disk changes. But note that BTZeroUnusedNodes doens't actually
1907 * do its writing via the journal, because that would be too much I/O
1908 * to fit in a transaction, and it's a pain to break it up into multiple
1909 * transactions. (It behaves more like growing a B-tree would.)
1910 */
1911 LFHFS_LOG(LEVEL_DEBUG, "hfs_erase_unused_nodes: updating volume %s.\n", hfsmp->vcbVN);
1912 result = hfs_start_transaction(hfsmp);
1913 if (result)
1914 goto done;
1915 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
1916 result = BTZeroUnusedNodes(catalog);
1917 // vnode_waitforwrites(hfsmp->hfs_catalog_vp, 0, 0, 0, "hfs_erase_unused_nodes");
1918 hfs_systemfile_unlock(hfsmp, lockflags);
1919 hfs_end_transaction(hfsmp);
1920 if (result == 0)
1921 hfsmp->vcbAtrb |= kHFSUnusedNodeFixMask;
1922
1923 LFHFS_LOG(LEVEL_DEBUG, "hfs_erase_unused_nodes: done updating volume %s.\n", hfsmp->vcbVN);
1924
1925 done:
1926 return result;
1927 }
1928
1929 /*
1930 * On HFS Plus Volumes, there can be orphaned files or directories
1931 * These are files or directories that were unlinked while busy.
1932 * If the volume was not cleanly unmounted then some of these may
1933 * have persisted and need to be removed.
1934 */
1935 void
1936 hfs_remove_orphans(struct hfsmount * hfsmp)
1937 {
1938 BTreeIterator * iterator = NULL;
1939 FSBufferDescriptor btdata;
1940 struct HFSPlusCatalogFile filerec;
1941 struct HFSPlusCatalogKey * keyp;
1942 FCB *fcb;
1943 ExtendedVCB *vcb;
1944 char filename[32];
1945 char tempname[32];
1946 size_t namelen;
1947 cat_cookie_t cookie;
1948 int catlock = 0;
1949 int catreserve = 0;
1950 bool started_tr = false;
1951 int lockflags;
1952 int result;
1953 int orphaned_files = 0;
1954 int orphaned_dirs = 0;
1955
1956 bzero(&cookie, sizeof(cookie));
1957
1958 if (hfsmp->hfs_flags & HFS_CLEANED_ORPHANS)
1959 return;
1960
1961 vcb = HFSTOVCB(hfsmp);
1962 fcb = VTOF(hfsmp->hfs_catalog_vp);
1963
1964 btdata.bufferAddress = &filerec;
1965 btdata.itemSize = sizeof(filerec);
1966 btdata.itemCount = 1;
1967
1968 iterator = hfs_mallocz(sizeof(BTreeIterator));
1969 if (iterator == NULL)
1970 return;
1971
1972 /* Build a key to "temp" */
1973 keyp = (HFSPlusCatalogKey*)&iterator->key;
1974 keyp->parentID = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
1975 keyp->nodeName.length = 4; /* "temp" */
1976 keyp->keyLength = kHFSPlusCatalogKeyMinimumLength + keyp->nodeName.length * 2;
1977 keyp->nodeName.unicode[0] = 't';
1978 keyp->nodeName.unicode[1] = 'e';
1979 keyp->nodeName.unicode[2] = 'm';
1980 keyp->nodeName.unicode[3] = 'p';
1981
1982 /*
1983 * Position the iterator just before the first real temp file/dir.
1984 */
1985 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
1986 (void) BTSearchRecord(fcb, iterator, NULL, NULL, iterator);
1987 hfs_systemfile_unlock(hfsmp, lockflags);
1988
1989 /* Visit all the temp files/dirs in the HFS+ private directory. */
1990 for (;;) {
1991 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
1992 result = BTIterateRecord(fcb, kBTreeNextRecord, iterator, &btdata, NULL);
1993 hfs_systemfile_unlock(hfsmp, lockflags);
1994 if (result)
1995 break;
1996 if (keyp->parentID != hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid)
1997 break;
1998
1999 (void) utf8_encodestr(keyp->nodeName.unicode, keyp->nodeName.length * 2,
2000 (u_int8_t *)filename, &namelen, sizeof(filename), 0, UTF_ADD_NULL_TERM);
2001
2002 (void) snprintf(tempname, sizeof(tempname), "%s%d", HFS_DELETE_PREFIX, filerec.fileID);
2003
2004 /*
2005 * Delete all files (and directories) named "tempxxx",
2006 * where xxx is the file's cnid in decimal.
2007 *
2008 */
2009 if (bcmp(tempname, filename, namelen + 1) != 0)
2010 continue;
2011
2012 struct filefork dfork;
2013 struct filefork rfork;
2014 struct cnode cnode;
2015 int mode = 0;
2016
2017 bzero(&dfork, sizeof(dfork));
2018 bzero(&rfork, sizeof(rfork));
2019 bzero(&cnode, sizeof(cnode));
2020
2021 if (hfs_start_transaction(hfsmp) != 0) {
2022 LFHFS_LOG(LEVEL_ERROR, "hfs_remove_orphans: failed to start transaction\n");
2023 goto exit;
2024 }
2025 started_tr = true;
2026
2027 /*
2028 * Reserve some space in the Catalog file.
2029 */
2030 if (cat_preflight(hfsmp, CAT_DELETE, &cookie) != 0) {
2031 LFHFS_LOG(LEVEL_ERROR, "hfs_remove_orphans: cat_preflight failed\n");
2032 goto exit;
2033 }
2034 catreserve = 1;
2035
2036 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_EXTENTS | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
2037 catlock = 1;
2038
2039 /* Build a fake cnode */
2040 cat_convertattr(hfsmp, (CatalogRecord *)&filerec, &cnode.c_attr, &dfork.ff_data, &rfork.ff_data);
2041 cnode.c_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
2042 cnode.c_desc.cd_nameptr = (const u_int8_t *)filename;
2043 cnode.c_desc.cd_namelen = namelen;
2044 cnode.c_desc.cd_cnid = cnode.c_attr.ca_fileid;
2045 cnode.c_blocks = dfork.ff_blocks + rfork.ff_blocks;
2046
2047 /* Position iterator at previous entry */
2048 if (BTIterateRecord(fcb, kBTreePrevRecord, iterator,
2049 NULL, NULL) != 0) {
2050 break;
2051 }
2052
2053 /* Truncate the file to zero (both forks) */
2054 if (dfork.ff_blocks > 0) {
2055 u_int64_t fsize;
2056
2057 dfork.ff_cp = &cnode;
2058 cnode.c_datafork = &dfork;
2059 cnode.c_rsrcfork = NULL;
2060 fsize = (u_int64_t)dfork.ff_blocks * (u_int64_t)HFSTOVCB(hfsmp)->blockSize;
2061 while (fsize > 0) {
2062 if (fsize > HFS_BIGFILE_SIZE) {
2063 fsize -= HFS_BIGFILE_SIZE;
2064 } else {
2065 fsize = 0;
2066 }
2067
2068 if (TruncateFileC(vcb, (FCB*)&dfork, fsize, 1, 0, cnode.c_attr.ca_fileid, false) != 0) {
2069 LFHFS_LOG(LEVEL_ERROR, "hfs_remove_orphans: error truncating data fork!\n");
2070 break;
2071 }
2072
2073 //
2074 // if we're iteratively truncating this file down,
2075 // then end the transaction and start a new one so
2076 // that no one transaction gets too big.
2077 //
2078 if (fsize > 0) {
2079 /* Drop system file locks before starting
2080 * another transaction to preserve lock order.
2081 */
2082 hfs_systemfile_unlock(hfsmp, lockflags);
2083 catlock = 0;
2084 hfs_end_transaction(hfsmp);
2085
2086 if (hfs_start_transaction(hfsmp) != 0) {
2087 started_tr = false;
2088 goto exit;
2089 }
2090 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_EXTENTS | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
2091 catlock = 1;
2092 }
2093 }
2094 }
2095
2096 if (rfork.ff_blocks > 0) {
2097 rfork.ff_cp = &cnode;
2098 cnode.c_datafork = NULL;
2099 cnode.c_rsrcfork = &rfork;
2100 if (TruncateFileC(vcb, (FCB*)&rfork, 0, 1, 1, cnode.c_attr.ca_fileid, false) != 0) {
2101 LFHFS_LOG(LEVEL_ERROR, "hfs_remove_orphans: error truncating rsrc fork!\n");
2102 break;
2103 }
2104 }
2105
2106 // Deal with extended attributes
2107 if (ISSET(cnode.c_attr.ca_recflags, kHFSHasAttributesMask)) {
2108 // hfs_removeallattr uses its own transactions
2109 hfs_systemfile_unlock(hfsmp, lockflags);
2110 catlock = false;
2111 hfs_end_transaction(hfsmp);
2112
2113 hfs_removeallattr(hfsmp, cnode.c_attr.ca_fileid, &started_tr);
2114
2115 if (!started_tr) {
2116 if (hfs_start_transaction(hfsmp) != 0) {
2117 LFHFS_LOG(LEVEL_ERROR, "hfs_remove_orphans:: failed to start transaction\n");
2118 goto exit;
2119 }
2120 started_tr = true;
2121 }
2122
2123 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_EXTENTS | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
2124 }
2125
2126 /* Remove the file or folder record from the Catalog */
2127 if (cat_delete(hfsmp, &cnode.c_desc, &cnode.c_attr) != 0) {
2128 LFHFS_LOG(LEVEL_ERROR, "hfs_remove_orphans: error deleting cat rec for id %d!\n", cnode.c_desc.cd_cnid);
2129 hfs_systemfile_unlock(hfsmp, lockflags);
2130 catlock = 0;
2131 hfs_volupdate(hfsmp, VOL_UPDATE, 0);
2132 break;
2133 }
2134
2135 mode = cnode.c_attr.ca_mode & S_IFMT;
2136
2137 if (mode == S_IFDIR) {
2138 orphaned_dirs++;
2139 }
2140 else {
2141 orphaned_files++;
2142 }
2143
2144 /* Update parent and volume counts */
2145 hfsmp->hfs_private_attr[FILE_HARDLINKS].ca_entries--;
2146 if (mode == S_IFDIR) {
2147 DEC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[FILE_HARDLINKS]);
2148 }
2149
2150 (void)cat_update(hfsmp, &hfsmp->hfs_private_desc[FILE_HARDLINKS],
2151 &hfsmp->hfs_private_attr[FILE_HARDLINKS], NULL, NULL);
2152
2153 /* Drop locks and end the transaction */
2154 hfs_systemfile_unlock(hfsmp, lockflags);
2155 cat_postflight(hfsmp, &cookie);
2156 catlock = catreserve = 0;
2157
2158 /*
2159 Now that Catalog is unlocked, update the volume info, making
2160 sure to differentiate between files and directories
2161 */
2162 if (mode == S_IFDIR) {
2163 hfs_volupdate(hfsmp, VOL_RMDIR, 0);
2164 }
2165 else{
2166 hfs_volupdate(hfsmp, VOL_RMFILE, 0);
2167 }
2168
2169 hfs_end_transaction(hfsmp);
2170 started_tr = false;
2171 } /* end for */
2172
2173 exit:
2174
2175 if (orphaned_files > 0 || orphaned_dirs > 0)
2176 LFHFS_LOG(LEVEL_ERROR, "hfs_remove_orphans: Removed %d orphaned / unlinked files and %d directories \n", orphaned_files, orphaned_dirs);
2177
2178 if (catlock) {
2179 hfs_systemfile_unlock(hfsmp, lockflags);
2180 }
2181 if (catreserve) {
2182 cat_postflight(hfsmp, &cookie);
2183 }
2184 if (started_tr) {
2185 hfs_end_transaction(hfsmp);
2186 }
2187
2188 hfs_free(iterator);
2189 hfsmp->hfs_flags |= HFS_CLEANED_ORPHANS;
2190 }
2191
2192
2193 u_int32_t GetFileInfo(ExtendedVCB *vcb, const char *name,
2194 struct cat_attr *fattr, struct cat_fork *forkinfo) {
2195
2196 struct hfsmount * hfsmp;
2197 struct cat_desc jdesc;
2198 int lockflags;
2199 int error;
2200
2201 if (vcb->vcbSigWord != kHFSPlusSigWord)
2202 return (0);
2203
2204 hfsmp = VCBTOHFS(vcb);
2205
2206 memset(&jdesc, 0, sizeof(struct cat_desc));
2207 jdesc.cd_parentcnid = kRootDirID;
2208 jdesc.cd_nameptr = (const u_int8_t *)name;
2209 jdesc.cd_namelen = strlen(name);
2210
2211 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
2212 error = cat_lookup(hfsmp, &jdesc, 0, NULL, fattr, forkinfo, NULL);
2213 hfs_systemfile_unlock(hfsmp, lockflags);
2214
2215 if (error == 0) {
2216 return (fattr->ca_fileid);
2217 } else if (hfsmp->hfs_flags & HFS_READ_ONLY) {
2218 return (0);
2219 }
2220
2221 return (0); /* XXX what callers expect on an error */
2222 }
2223
2224
2225 int hfs_early_journal_init(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp,
2226 void *_args, off_t embeddedOffset, daddr64_t mdb_offset,
2227 HFSMasterDirectoryBlock *mdbp) {
2228
2229 JournalInfoBlock *jibp;
2230 void *bp = NULL;
2231 void *jinfo_bp = NULL;
2232 int sectors_per_fsblock, arg_flags=0, arg_tbufsz=0;
2233 int retval = 0;
2234 uint32_t blksize = hfsmp->hfs_logical_block_size;
2235 struct vnode *devvp;
2236 struct hfs_mount_args *args = _args;
2237 u_int32_t jib_flags;
2238 u_int64_t jib_offset;
2239 u_int64_t jib_size;
2240
2241 devvp = hfsmp->hfs_devvp;
2242
2243 if (args != NULL && (args->flags & HFSFSMNT_EXTENDED_ARGS)) {
2244 arg_flags = args->journal_flags;
2245 arg_tbufsz = args->journal_tbuffer_size;
2246 }
2247
2248 sectors_per_fsblock = SWAP_BE32(vhp->blockSize) / blksize;
2249
2250 // Read Journal Info
2251 jinfo_bp = hfs_malloc(hfsmp->hfs_physical_block_size);
2252 if (!jinfo_bp) {
2253 goto cleanup_dev_name;
2254 }
2255
2256 uint32_t ujournalInfoBlock = SWAP_BE32(vhp->journalInfoBlock);
2257 uint64_t u64JournalOffset =
2258 (daddr64_t)((embeddedOffset/blksize) + ((u_int64_t)ujournalInfoBlock*sectors_per_fsblock));
2259 retval = raw_readwrite_read_mount(devvp, u64JournalOffset, hfsmp->hfs_physical_block_size,
2260 jinfo_bp, hfsmp->hfs_physical_block_size, NULL, NULL);
2261
2262 if (retval) {
2263 goto cleanup_dev_name;
2264 }
2265
2266 jibp = jinfo_bp;
2267 jib_flags = SWAP_BE32(jibp->flags);
2268 jib_size = SWAP_BE64(jibp->size);
2269
2270 if (!(jib_flags & kJIJournalInFSMask)) {
2271 goto cleanup_dev_name;
2272 }
2273
2274 hfsmp->jvp = hfsmp->hfs_devvp;
2275 jib_offset = SWAP_BE64(jibp->offset);
2276
2277 // save this off for the hack-y check in hfs_remove()
2278 hfsmp->jnl_start = jib_offset / SWAP_BE32(vhp->blockSize);
2279 hfsmp->jnl_size = jib_size;
2280
2281 if ((hfsmp->hfs_flags & HFS_READ_ONLY) && (hfsmp->hfs_mp->mnt_flag & MNT_ROOTFS) == 0) {
2282 // if the file system is read-only, check if the journal is empty.
2283 // if it is, then we can allow the mount. otherwise we have to
2284 // return failure.
2285 retval = journal_is_clean(hfsmp->jvp,
2286 jib_offset + embeddedOffset,
2287 jib_size,
2288 devvp,
2289 hfsmp->hfs_logical_block_size,
2290 hfsmp->hfs_mp);
2291
2292 hfsmp->jnl = NULL;
2293
2294 hfs_free(jinfo_bp);
2295 jinfo_bp = NULL;
2296
2297 if (retval) {
2298 LFHFS_LOG(LEVEL_ERROR, "hfs: early journal init: the volume is read-only and journal is dirty. Can not mount volume.\n");
2299 }
2300
2301 goto cleanup_dev_name;
2302 }
2303
2304 if (jib_flags & kJIJournalNeedInitMask) {
2305 LFHFS_LOG(LEVEL_ERROR, "hfs: Initializing the journal (joffset 0x%llx sz 0x%llx)...\n",
2306 jib_offset + embeddedOffset, jib_size);
2307 hfsmp->jnl = journal_create(hfsmp->jvp,
2308 jib_offset + embeddedOffset,
2309 jib_size,
2310 devvp,
2311 blksize,
2312 arg_flags,
2313 arg_tbufsz,
2314 NULL,
2315 hfsmp->hfs_mp,
2316 hfsmp->hfs_mp);
2317
2318 // no need to start a transaction here... if this were to fail
2319 // we'd just re-init it on the next mount.
2320 jib_flags &= ~kJIJournalNeedInitMask;
2321 jibp->flags = SWAP_BE32(jib_flags);
2322 raw_readwrite_write_mount(devvp, u64JournalOffset, hfsmp->hfs_physical_block_size,
2323 jinfo_bp, hfsmp->hfs_physical_block_size, NULL, NULL);
2324 jinfo_bp = NULL;
2325 jibp = NULL;
2326 } else {
2327 LFHFS_LOG(LEVEL_DEFAULT, "hfs: Opening the journal (jib_offset 0x%llx size 0x%llx vhp_blksize %d)...\n",
2328 jib_offset + embeddedOffset,
2329 jib_size, SWAP_BE32(vhp->blockSize));
2330
2331 hfsmp->jnl = journal_open(hfsmp->jvp,
2332 jib_offset + embeddedOffset,
2333 jib_size,
2334 devvp,
2335 blksize,
2336 arg_flags,
2337 arg_tbufsz,
2338 NULL,
2339 hfsmp->hfs_mp,
2340 hfsmp->hfs_mp);
2341
2342 if (hfsmp->jnl && mdbp) {
2343 // reload the mdb because it could have changed
2344 // if the journal had to be replayed.
2345 if (mdb_offset == 0) {
2346 mdb_offset = (daddr64_t)((embeddedOffset / blksize) + HFS_PRI_SECTOR(blksize));
2347 }
2348
2349 bp = hfs_malloc(hfsmp->hfs_physical_block_size);
2350 if (!bp) {
2351 goto cleanup_dev_name;
2352 }
2353
2354 uint64_t u64MDBOffset = HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys);
2355 retval = raw_readwrite_read_mount(devvp, u64MDBOffset, hfsmp->hfs_physical_block_size, bp, hfsmp->hfs_physical_block_size, NULL, NULL);
2356
2357 if (retval) {
2358 LFHFS_LOG(LEVEL_ERROR, "hfs: failed to reload the mdb after opening the journal (retval %d)!\n", retval);
2359 goto cleanup_dev_name;
2360 }
2361
2362 bcopy(bp + HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size), mdbp, 512);
2363 }
2364 }
2365
2366 // if we expected the journal to be there and we couldn't
2367 // create it or open it then we have to bail out.
2368 if (hfsmp->jnl == NULL) {
2369 LFHFS_LOG(LEVEL_ERROR, "hfs: early jnl init: failed to open/create the journal (retval %d).\n", retval);
2370 retval = EINVAL;
2371 goto cleanup_dev_name;
2372 }
2373
2374 cleanup_dev_name:
2375 if (bp)
2376 hfs_free(bp);
2377
2378 if (jinfo_bp)
2379 hfs_free(jinfo_bp);
2380
2381 return retval;
2382 }
2383
2384 //
2385 // This function will go and re-locate the .journal_info_block and
2386 // the .journal files in case they moved (which can happen if you
2387 // run Norton SpeedDisk). If we fail to find either file we just
2388 // disable journaling for this volume and return. We turn off the
2389 // journaling bit in the vcb and assume it will get written to disk
2390 // later (if it doesn't on the next mount we'd do the same thing
2391 // again which is harmless). If we disable journaling we don't
2392 // return an error so that the volume is still mountable.
2393 //
2394 // If the info we find for the .journal_info_block and .journal files
2395 // isn't what we had stored, we re-set our cached info and proceed
2396 // with opening the journal normally.
2397 //
2398 static int hfs_late_journal_init(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, void *_args) {
2399 JournalInfoBlock *jibp;
2400 void *jinfo_bp = NULL;
2401 int sectors_per_fsblock, arg_flags=0, arg_tbufsz=0;
2402 int retval, write_jibp = 0, recreate_journal = 0;
2403 struct vnode *devvp;
2404 struct cat_attr jib_attr, jattr;
2405 struct cat_fork jib_fork, jfork;
2406 ExtendedVCB *vcb;
2407 u_int32_t fid;
2408 struct hfs_mount_args *args = _args;
2409 u_int32_t jib_flags;
2410 u_int64_t jib_offset;
2411 u_int64_t jib_size;
2412
2413 devvp = hfsmp->hfs_devvp;
2414 vcb = HFSTOVCB(hfsmp);
2415
2416 if (args != NULL && (args->flags & HFSFSMNT_EXTENDED_ARGS)) {
2417 if (args->journal_disable) {
2418 return 0;
2419 }
2420
2421 arg_flags = args->journal_flags;
2422 arg_tbufsz = args->journal_tbuffer_size;
2423 }
2424
2425 fid = GetFileInfo(vcb, ".journal_info_block", &jib_attr, &jib_fork);
2426 if (fid == 0 || jib_fork.cf_extents[0].startBlock == 0 || jib_fork.cf_size == 0) {
2427 LFHFS_LOG(LEVEL_ERROR, "hfs: can't find the .journal_info_block! disabling journaling (start: %d).\n",
2428 fid ? jib_fork.cf_extents[0].startBlock : 0);
2429 vcb->vcbAtrb &= ~kHFSVolumeJournaledMask;
2430 return 0;
2431 }
2432 hfsmp->hfs_jnlinfoblkid = fid;
2433
2434 // make sure the journal_info_block begins where we think it should.
2435 if (SWAP_BE32(vhp->journalInfoBlock) != jib_fork.cf_extents[0].startBlock) {
2436 LFHFS_LOG(LEVEL_ERROR, "hfs: The journal_info_block moved (was: %d; is: %d). Fixing up\n",
2437 SWAP_BE32(vhp->journalInfoBlock), jib_fork.cf_extents[0].startBlock);
2438
2439 vcb->vcbJinfoBlock = jib_fork.cf_extents[0].startBlock;
2440 vhp->journalInfoBlock = SWAP_BE32(jib_fork.cf_extents[0].startBlock);
2441 recreate_journal = 1;
2442 }
2443
2444
2445 sectors_per_fsblock = SWAP_BE32(vhp->blockSize) / hfsmp->hfs_logical_block_size;
2446
2447 // Read journal info
2448 jinfo_bp = hfs_malloc(hfsmp->hfs_physical_block_size);
2449 if (!jinfo_bp) {
2450 LFHFS_LOG(LEVEL_ERROR, "hfs: can't alloc memory.\n");
2451 vcb->vcbAtrb &= ~kHFSVolumeJournaledMask;
2452 return 0;
2453 }
2454
2455 uint64_t u64JournalOffset =
2456 (vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size +
2457 ((u_int64_t)SWAP_BE32(vhp->journalInfoBlock)*sectors_per_fsblock));
2458
2459 retval = raw_readwrite_read_mount(devvp, u64JournalOffset, hfsmp->hfs_physical_block_size, jinfo_bp, hfsmp->hfs_physical_block_size, NULL, NULL);
2460
2461 if (retval) {
2462 if (jinfo_bp) {
2463 hfs_free(jinfo_bp);
2464 }
2465 LFHFS_LOG(LEVEL_ERROR, "hfs: can't read journal info block. disabling journaling.\n");
2466 vcb->vcbAtrb &= ~kHFSVolumeJournaledMask;
2467 return 0;
2468 }
2469
2470 jibp = jinfo_bp;
2471 jib_flags = SWAP_BE32(jibp->flags);
2472 jib_offset = SWAP_BE64(jibp->offset);
2473 jib_size = SWAP_BE64(jibp->size);
2474
2475 fid = GetFileInfo(vcb, ".journal", &jattr, &jfork);
2476 if (fid == 0 || jfork.cf_extents[0].startBlock == 0 || jfork.cf_size == 0) {
2477 LFHFS_LOG(LEVEL_ERROR, "hfs: can't find the journal file! disabling journaling (start: %d)\n",
2478 fid ? jfork.cf_extents[0].startBlock : 0);
2479 hfs_free(jinfo_bp);
2480 vcb->vcbAtrb &= ~kHFSVolumeJournaledMask;
2481 return 0;
2482 }
2483 hfsmp->hfs_jnlfileid = fid;
2484
2485 // make sure the journal file begins where we think it should.
2486 if ((jib_flags & kJIJournalInFSMask) && (jib_offset / (u_int64_t)vcb->blockSize) != jfork.cf_extents[0].startBlock) {
2487 LFHFS_LOG(LEVEL_ERROR, "hfs: The journal file moved (was: %lld; is: %d). Fixing up\n",
2488 (jib_offset / (u_int64_t)vcb->blockSize), jfork.cf_extents[0].startBlock);
2489
2490 jib_offset = (u_int64_t)jfork.cf_extents[0].startBlock * (u_int64_t)vcb->blockSize;
2491 write_jibp = 1;
2492 recreate_journal = 1;
2493 }
2494
2495 // check the size of the journal file.
2496 if (jib_size != (u_int64_t)jfork.cf_extents[0].blockCount*vcb->blockSize) {
2497 LFHFS_LOG(LEVEL_ERROR, "hfs: The journal file changed size! (was %lld; is %lld). Fixing up.\n",
2498 jib_size, (u_int64_t)jfork.cf_extents[0].blockCount*vcb->blockSize);
2499
2500 jib_size = (u_int64_t)jfork.cf_extents[0].blockCount * vcb->blockSize;
2501 write_jibp = 1;
2502 recreate_journal = 1;
2503 }
2504
2505 if (!(jib_flags & kJIJournalInFSMask)) {
2506 LFHFS_LOG(LEVEL_ERROR, "hfs: No support for journal on a different volume\n");
2507 hfs_free(jinfo_bp);
2508 vcb->vcbAtrb &= ~kHFSVolumeJournaledMask;
2509 return 0;
2510 }
2511
2512 hfsmp->jvp = hfsmp->hfs_devvp;
2513 jib_offset += (off_t)vcb->hfsPlusIOPosOffset;
2514
2515 // save this off for the hack-y check in hfs_remove()
2516 hfsmp->jnl_start = jib_offset / SWAP_BE32(vhp->blockSize);
2517 hfsmp->jnl_size = jib_size;
2518
2519 if ((hfsmp->hfs_flags & HFS_READ_ONLY) && (hfsmp->hfs_mp->mnt_flag & MNT_ROOTFS) == 0) {
2520 // if the file system is read-only, check if the journal is empty.
2521 // if it is, then we can allow the mount. otherwise we have to
2522 // return failure.
2523 retval = journal_is_clean(hfsmp->jvp,
2524 jib_offset,
2525 jib_size,
2526 devvp,
2527 hfsmp->hfs_logical_block_size,
2528 hfsmp->hfs_mp);
2529
2530 hfsmp->jnl = NULL;
2531
2532 hfs_free(jinfo_bp);
2533
2534 if (retval) {
2535 LFHFS_LOG(LEVEL_ERROR, "hfs_late_journal_init: volume on is read-only and journal is dirty. Can not mount volume.\n");
2536 }
2537
2538 return retval;
2539 }
2540
2541 if ((jib_flags & kJIJournalNeedInitMask) || recreate_journal) {
2542 LFHFS_LOG(LEVEL_ERROR, "hfs: Initializing the journal (joffset 0x%llx sz 0x%llx)...\n",
2543 jib_offset, jib_size);
2544 hfsmp->jnl = journal_create(hfsmp->jvp,
2545 jib_offset,
2546 jib_size,
2547 devvp,
2548 hfsmp->hfs_logical_block_size,
2549 arg_flags,
2550 arg_tbufsz,
2551 NULL,
2552 hfsmp->hfs_mp,
2553 hfsmp->hfs_mp);
2554
2555 // no need to start a transaction here... if this were to fail
2556 // we'd just re-init it on the next mount.
2557 jib_flags &= ~kJIJournalNeedInitMask;
2558 write_jibp = 1;
2559
2560 } else {
2561 //
2562 // if we weren't the last person to mount this volume
2563 // then we need to throw away the journal because it
2564 // is likely that someone else mucked with the disk.
2565 // if the journal is empty this is no big deal. if the
2566 // disk is dirty this prevents us from replaying the
2567 // journal over top of changes that someone else made.
2568 //
2569 arg_flags |= JOURNAL_RESET;
2570
2571 //printf("hfs: Opening the journal (joffset 0x%llx sz 0x%llx vhp_blksize %d)...\n",
2572 // jib_offset,
2573 // jib_size, SWAP_BE32(vhp->blockSize));
2574
2575 hfsmp->jnl = journal_open(hfsmp->jvp,
2576 jib_offset,
2577 jib_size,
2578 devvp,
2579 hfsmp->hfs_logical_block_size,
2580 arg_flags,
2581 arg_tbufsz,
2582 NULL,
2583 hfsmp->hfs_mp,
2584 hfsmp->hfs_mp);
2585 }
2586
2587
2588 if (write_jibp) {
2589 jibp->flags = SWAP_BE32(jib_flags);
2590 jibp->offset = SWAP_BE64(jib_offset);
2591 jibp->size = SWAP_BE64(jib_size);
2592
2593 uint64_t uActualWrite = 0;
2594 retval = raw_readwrite_write_mount(devvp, u64JournalOffset, hfsmp->hfs_physical_block_size, jinfo_bp, hfsmp->hfs_physical_block_size, &uActualWrite, NULL);
2595 }
2596
2597 if (jinfo_bp) {
2598 hfs_free(jinfo_bp);
2599 }
2600
2601 // if we expected the journal to be there and we couldn't
2602 // create it or open it then we have to bail out.
2603 if (hfsmp->jnl == NULL) {
2604 LFHFS_LOG(LEVEL_ERROR, "hfs: late jnl init: failed to open/create the journal (retval %d).\n", retval);
2605 return EINVAL;
2606 }
2607
2608 return 0;
2609 }
2610