2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* @(#)hfs_vfsutils.c 4.0
30 * (c) 1997-2002 Apple Computer, Inc. All Rights Reserved
32 * hfs_vfsutils.c -- Routines that go between the HFS layer and the VFS.
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
40 #include <sys/mount.h>
41 #include <sys/mount_internal.h>
43 #include <sys/buf_internal.h>
45 #include <sys/unistd.h>
46 #include <sys/utfconv.h>
47 #include <sys/kauth.h>
48 #include <sys/fcntl.h>
49 #include <sys/fsctl.h>
50 #include <sys/vnode_internal.h>
51 #include <kern/clock.h>
54 #include <libkern/OSAtomic.h>
56 /* for parsing boot-args */
57 #include <pexpert/pexpert.h>
60 #include <sys/cprotect.h>
64 #include "hfs_catalog.h"
66 #include "hfs_mount.h"
67 #include "hfs_endian.h"
68 #include "hfs_cnode.h"
69 #include "hfs_fsctl.h"
71 #include "hfscommon/headers/FileMgrInternal.h"
72 #include "hfscommon/headers/BTreesInternal.h"
73 #include "hfscommon/headers/HFSUnicodeWrappers.h"
75 /* Enable/disable debugging code for live volume resizing, defined in hfs_resize.c */
76 extern int hfs_resize_debug
;
78 static void ReleaseMetaFileVNode(struct vnode
*vp
);
79 static int hfs_late_journal_init(struct hfsmount
*hfsmp
, HFSPlusVolumeHeader
*vhp
, void *_args
);
81 static u_int32_t
hfs_hotfile_freeblocks(struct hfsmount
*);
82 static void hfs_thaw_locked(struct hfsmount
*hfsmp
);
84 #define HFS_MOUNT_DEBUG 1
87 //*******************************************************************************
88 // Note: Finder information in the HFS/HFS+ metadata are considered opaque and
89 // hence are not in the right byte order on little endian machines. It is
90 // the responsibility of the finder and other clients to swap the data.
91 //*******************************************************************************
93 //*******************************************************************************
94 // Routine: hfs_MountHFSVolume
97 //*******************************************************************************
98 unsigned char hfs_catname
[] = "Catalog B-tree";
99 unsigned char hfs_extname
[] = "Extents B-tree";
100 unsigned char hfs_vbmname
[] = "Volume Bitmap";
101 unsigned char hfs_attrname
[] = "Attribute B-tree";
102 unsigned char hfs_startupname
[] = "Startup File";
105 OSErr
hfs_MountHFSVolume(struct hfsmount
*hfsmp
, HFSMasterDirectoryBlock
*mdb
,
106 __unused
struct proc
*p
)
108 ExtendedVCB
*vcb
= HFSTOVCB(hfsmp
);
111 struct cat_desc cndesc
;
112 struct cat_attr cnattr
;
113 struct cat_fork fork
;
114 int newvnode_flags
= 0;
116 /* Block size must be a multiple of 512 */
117 if (SWAP_BE32(mdb
->drAlBlkSiz
) == 0 ||
118 (SWAP_BE32(mdb
->drAlBlkSiz
) & 0x01FF) != 0)
121 /* don't mount a writeable volume if its dirty, it must be cleaned by fsck_hfs */
122 if (((hfsmp
->hfs_flags
& HFS_READ_ONLY
) == 0) &&
123 ((SWAP_BE16(mdb
->drAtrb
) & kHFSVolumeUnmountedMask
) == 0)) {
126 hfsmp
->hfs_flags
|= HFS_STANDARD
;
128 * The MDB seems OK: transfer info from it into VCB
129 * Note - the VCB starts out clear (all zeros)
132 vcb
->vcbSigWord
= SWAP_BE16 (mdb
->drSigWord
);
133 vcb
->hfs_itime
= to_bsd_time(LocalToUTC(SWAP_BE32(mdb
->drCrDate
)));
134 vcb
->localCreateDate
= SWAP_BE32 (mdb
->drCrDate
);
135 vcb
->vcbLsMod
= to_bsd_time(LocalToUTC(SWAP_BE32(mdb
->drLsMod
)));
136 vcb
->vcbAtrb
= SWAP_BE16 (mdb
->drAtrb
);
137 vcb
->vcbNmFls
= SWAP_BE16 (mdb
->drNmFls
);
138 vcb
->vcbVBMSt
= SWAP_BE16 (mdb
->drVBMSt
);
139 vcb
->nextAllocation
= SWAP_BE16 (mdb
->drAllocPtr
);
140 vcb
->totalBlocks
= SWAP_BE16 (mdb
->drNmAlBlks
);
141 vcb
->allocLimit
= vcb
->totalBlocks
;
142 vcb
->blockSize
= SWAP_BE32 (mdb
->drAlBlkSiz
);
143 vcb
->vcbClpSiz
= SWAP_BE32 (mdb
->drClpSiz
);
144 vcb
->vcbAlBlSt
= SWAP_BE16 (mdb
->drAlBlSt
);
145 vcb
->vcbNxtCNID
= SWAP_BE32 (mdb
->drNxtCNID
);
146 vcb
->freeBlocks
= SWAP_BE16 (mdb
->drFreeBks
);
147 vcb
->vcbVolBkUp
= to_bsd_time(LocalToUTC(SWAP_BE32(mdb
->drVolBkUp
)));
148 vcb
->vcbWrCnt
= SWAP_BE32 (mdb
->drWrCnt
);
149 vcb
->vcbNmRtDirs
= SWAP_BE16 (mdb
->drNmRtDirs
);
150 vcb
->vcbFilCnt
= SWAP_BE32 (mdb
->drFilCnt
);
151 vcb
->vcbDirCnt
= SWAP_BE32 (mdb
->drDirCnt
);
152 bcopy(mdb
->drFndrInfo
, vcb
->vcbFndrInfo
, sizeof(vcb
->vcbFndrInfo
));
153 if ((hfsmp
->hfs_flags
& HFS_READ_ONLY
) == 0)
154 vcb
->vcbWrCnt
++; /* Compensate for write of MDB on last flush */
156 /* convert hfs encoded name into UTF-8 string */
157 error
= hfs_to_utf8(vcb
, mdb
->drVN
, NAME_MAX
, &utf8chars
, vcb
->vcbVN
);
159 * When an HFS name cannot be encoded with the current
160 * volume encoding we use MacRoman as a fallback.
162 if (error
|| (utf8chars
== 0)) {
163 error
= mac_roman_to_utf8(mdb
->drVN
, NAME_MAX
, &utf8chars
, vcb
->vcbVN
);
164 /* If we fail to encode to UTF8 from Mac Roman, the name is bad. Deny the mount */
170 hfsmp
->hfs_logBlockSize
= BestBlockSizeFit(vcb
->blockSize
, MAXBSIZE
, hfsmp
->hfs_logical_block_size
);
171 vcb
->vcbVBMIOSize
= kHFSBlockSize
;
173 /* Generate the partition-based AVH location */
174 hfsmp
->hfs_partition_avh_sector
= HFS_ALT_SECTOR(hfsmp
->hfs_logical_block_size
,
175 hfsmp
->hfs_logical_block_count
);
177 /* HFS standard is read-only, so just stuff the FS location in here, too */
178 hfsmp
->hfs_fs_avh_sector
= hfsmp
->hfs_partition_avh_sector
;
180 bzero(&cndesc
, sizeof(cndesc
));
181 cndesc
.cd_parentcnid
= kHFSRootParentID
;
182 cndesc
.cd_flags
|= CD_ISMETA
;
183 bzero(&cnattr
, sizeof(cnattr
));
184 cnattr
.ca_linkcount
= 1;
185 cnattr
.ca_mode
= S_IFREG
;
186 bzero(&fork
, sizeof(fork
));
189 * Set up Extents B-tree vnode
191 cndesc
.cd_nameptr
= hfs_extname
;
192 cndesc
.cd_namelen
= strlen((char *)hfs_extname
);
193 cndesc
.cd_cnid
= cnattr
.ca_fileid
= kHFSExtentsFileID
;
194 fork
.cf_size
= SWAP_BE32(mdb
->drXTFlSize
);
195 fork
.cf_blocks
= fork
.cf_size
/ vcb
->blockSize
;
196 fork
.cf_clump
= SWAP_BE32(mdb
->drXTClpSiz
);
198 fork
.cf_extents
[0].startBlock
= SWAP_BE16(mdb
->drXTExtRec
[0].startBlock
);
199 fork
.cf_extents
[0].blockCount
= SWAP_BE16(mdb
->drXTExtRec
[0].blockCount
);
200 fork
.cf_extents
[1].startBlock
= SWAP_BE16(mdb
->drXTExtRec
[1].startBlock
);
201 fork
.cf_extents
[1].blockCount
= SWAP_BE16(mdb
->drXTExtRec
[1].blockCount
);
202 fork
.cf_extents
[2].startBlock
= SWAP_BE16(mdb
->drXTExtRec
[2].startBlock
);
203 fork
.cf_extents
[2].blockCount
= SWAP_BE16(mdb
->drXTExtRec
[2].blockCount
);
204 cnattr
.ca_blocks
= fork
.cf_blocks
;
206 error
= hfs_getnewvnode(hfsmp
, NULL
, NULL
, &cndesc
, 0, &cnattr
, &fork
,
207 &hfsmp
->hfs_extents_vp
, &newvnode_flags
);
209 if (HFS_MOUNT_DEBUG
) {
210 printf("hfs_mounthfs (std): error creating Ext Vnode (%d) \n", error
);
214 error
= MacToVFSError(BTOpenPath(VTOF(hfsmp
->hfs_extents_vp
),
215 (KeyCompareProcPtr
)CompareExtentKeys
));
217 if (HFS_MOUNT_DEBUG
) {
218 printf("hfs_mounthfs (std): error opening Ext Vnode (%d) \n", error
);
220 hfs_unlock(VTOC(hfsmp
->hfs_extents_vp
));
223 hfsmp
->hfs_extents_cp
= VTOC(hfsmp
->hfs_extents_vp
);
226 * Set up Catalog B-tree vnode...
228 cndesc
.cd_nameptr
= hfs_catname
;
229 cndesc
.cd_namelen
= strlen((char *)hfs_catname
);
230 cndesc
.cd_cnid
= cnattr
.ca_fileid
= kHFSCatalogFileID
;
231 fork
.cf_size
= SWAP_BE32(mdb
->drCTFlSize
);
232 fork
.cf_blocks
= fork
.cf_size
/ vcb
->blockSize
;
233 fork
.cf_clump
= SWAP_BE32(mdb
->drCTClpSiz
);
235 fork
.cf_extents
[0].startBlock
= SWAP_BE16(mdb
->drCTExtRec
[0].startBlock
);
236 fork
.cf_extents
[0].blockCount
= SWAP_BE16(mdb
->drCTExtRec
[0].blockCount
);
237 fork
.cf_extents
[1].startBlock
= SWAP_BE16(mdb
->drCTExtRec
[1].startBlock
);
238 fork
.cf_extents
[1].blockCount
= SWAP_BE16(mdb
->drCTExtRec
[1].blockCount
);
239 fork
.cf_extents
[2].startBlock
= SWAP_BE16(mdb
->drCTExtRec
[2].startBlock
);
240 fork
.cf_extents
[2].blockCount
= SWAP_BE16(mdb
->drCTExtRec
[2].blockCount
);
241 cnattr
.ca_blocks
= fork
.cf_blocks
;
243 error
= hfs_getnewvnode(hfsmp
, NULL
, NULL
, &cndesc
, 0, &cnattr
, &fork
,
244 &hfsmp
->hfs_catalog_vp
, &newvnode_flags
);
246 if (HFS_MOUNT_DEBUG
) {
247 printf("hfs_mounthfs (std): error creating catalog Vnode (%d) \n", error
);
249 hfs_unlock(VTOC(hfsmp
->hfs_extents_vp
));
252 error
= MacToVFSError(BTOpenPath(VTOF(hfsmp
->hfs_catalog_vp
),
253 (KeyCompareProcPtr
)CompareCatalogKeys
));
255 if (HFS_MOUNT_DEBUG
) {
256 printf("hfs_mounthfs (std): error opening catalog Vnode (%d) \n", error
);
258 hfs_unlock(VTOC(hfsmp
->hfs_catalog_vp
));
259 hfs_unlock(VTOC(hfsmp
->hfs_extents_vp
));
262 hfsmp
->hfs_catalog_cp
= VTOC(hfsmp
->hfs_catalog_vp
);
265 * Set up dummy Allocation file vnode (used only for locking bitmap)
267 cndesc
.cd_nameptr
= hfs_vbmname
;
268 cndesc
.cd_namelen
= strlen((char *)hfs_vbmname
);
269 cndesc
.cd_cnid
= cnattr
.ca_fileid
= kHFSAllocationFileID
;
270 bzero(&fork
, sizeof(fork
));
271 cnattr
.ca_blocks
= 0;
273 error
= hfs_getnewvnode(hfsmp
, NULL
, NULL
, &cndesc
, 0, &cnattr
, &fork
,
274 &hfsmp
->hfs_allocation_vp
, &newvnode_flags
);
276 if (HFS_MOUNT_DEBUG
) {
277 printf("hfs_mounthfs (std): error creating bitmap Vnode (%d) \n", error
);
279 hfs_unlock(VTOC(hfsmp
->hfs_catalog_vp
));
280 hfs_unlock(VTOC(hfsmp
->hfs_extents_vp
));
283 hfsmp
->hfs_allocation_cp
= VTOC(hfsmp
->hfs_allocation_vp
);
285 /* mark the volume dirty (clear clean unmount bit) */
286 vcb
->vcbAtrb
&= ~kHFSVolumeUnmountedMask
;
288 if (error
== noErr
) {
289 error
= cat_idlookup(hfsmp
, kHFSRootFolderID
, 0, 0, NULL
, NULL
, NULL
);
290 if (HFS_MOUNT_DEBUG
) {
291 printf("hfs_mounthfs (std): error looking up root folder (%d) \n", error
);
295 if (error
== noErr
) {
296 /* If the disk isn't write protected.. */
297 if ( !(vcb
->vcbAtrb
& kHFSVolumeHardwareLockMask
)) {
298 MarkVCBDirty (vcb
); // mark VCB dirty so it will be written
303 * all done with system files so we can unlock now...
305 hfs_unlock(VTOC(hfsmp
->hfs_allocation_vp
));
306 hfs_unlock(VTOC(hfsmp
->hfs_catalog_vp
));
307 hfs_unlock(VTOC(hfsmp
->hfs_extents_vp
));
309 if (error
== noErr
) {
310 /* If successful, then we can just return once we've unlocked the cnodes */
314 //-- Release any resources allocated so far before exiting with an error:
316 hfsUnmount(hfsmp
, NULL
);
323 //*******************************************************************************
324 // Routine: hfs_MountHFSPlusVolume
327 //*******************************************************************************
329 OSErr
hfs_MountHFSPlusVolume(struct hfsmount
*hfsmp
, HFSPlusVolumeHeader
*vhp
,
330 off_t embeddedOffset
, u_int64_t disksize
, __unused
struct proc
*p
, void *args
, kauth_cred_t cred
)
332 register ExtendedVCB
*vcb
;
333 struct cat_desc cndesc
;
334 struct cat_attr cnattr
;
335 struct cat_fork cfork
;
337 daddr64_t spare_sectors
;
338 struct BTreeInfoRec btinfo
;
340 u_int16_t hfs_version
;
341 int newvnode_flags
= 0;
344 char converted_volname
[256];
345 size_t volname_length
= 0;
346 size_t conv_volname_length
= 0;
348 signature
= SWAP_BE16(vhp
->signature
);
349 hfs_version
= SWAP_BE16(vhp
->version
);
351 if (signature
== kHFSPlusSigWord
) {
352 if (hfs_version
!= kHFSPlusVersion
) {
353 printf("hfs_mount: invalid HFS+ version: %x\n", hfs_version
);
356 } else if (signature
== kHFSXSigWord
) {
357 if (hfs_version
!= kHFSXVersion
) {
358 printf("hfs_mount: invalid HFSX version: %x\n", hfs_version
);
361 /* The in-memory signature is always 'H+'. */
362 signature
= kHFSPlusSigWord
;
363 hfsmp
->hfs_flags
|= HFS_X
;
365 /* Removed printf for invalid HFS+ signature because it gives
366 * false error for UFS root volume
368 if (HFS_MOUNT_DEBUG
) {
369 printf("hfs_mounthfsplus: unknown Volume Signature : %x\n", signature
);
374 /* Block size must be at least 512 and a power of 2 */
375 blockSize
= SWAP_BE32(vhp
->blockSize
);
376 if (blockSize
< 512 || !powerof2(blockSize
)) {
377 if (HFS_MOUNT_DEBUG
) {
378 printf("hfs_mounthfsplus: invalid blocksize (%d) \n", blockSize
);
383 /* don't mount a writable volume if its dirty, it must be cleaned by fsck_hfs */
384 if ((hfsmp
->hfs_flags
& HFS_READ_ONLY
) == 0 && hfsmp
->jnl
== NULL
&&
385 (SWAP_BE32(vhp
->attributes
) & kHFSVolumeUnmountedMask
) == 0) {
386 if (HFS_MOUNT_DEBUG
) {
387 printf("hfs_mounthfsplus: cannot mount dirty non-journaled volumes\n");
392 /* Make sure we can live with the physical block size. */
393 if ((disksize
& (hfsmp
->hfs_logical_block_size
- 1)) ||
394 (embeddedOffset
& (hfsmp
->hfs_logical_block_size
- 1)) ||
395 (blockSize
< hfsmp
->hfs_logical_block_size
)) {
396 if (HFS_MOUNT_DEBUG
) {
397 printf("hfs_mounthfsplus: invalid physical blocksize (%d), hfs_logical_blocksize (%d) \n",
398 blockSize
, hfsmp
->hfs_logical_block_size
);
403 /* If allocation block size is less than the physical
404 * block size, we assume that the physical block size
405 * is same as logical block size. The physical block
406 * size value is used to round down the offsets for
407 * reading and writing the primary and alternate volume
408 * headers at physical block boundary and will cause
409 * problems if it is less than the block size.
411 if (blockSize
< hfsmp
->hfs_physical_block_size
) {
412 hfsmp
->hfs_physical_block_size
= hfsmp
->hfs_logical_block_size
;
413 hfsmp
->hfs_log_per_phys
= 1;
417 * The VolumeHeader seems OK: transfer info from it into VCB
418 * Note - the VCB starts out clear (all zeros)
420 vcb
= HFSTOVCB(hfsmp
);
422 vcb
->vcbSigWord
= signature
;
423 vcb
->vcbJinfoBlock
= SWAP_BE32(vhp
->journalInfoBlock
);
424 vcb
->vcbLsMod
= to_bsd_time(SWAP_BE32(vhp
->modifyDate
));
425 vcb
->vcbAtrb
= SWAP_BE32(vhp
->attributes
);
426 vcb
->vcbClpSiz
= SWAP_BE32(vhp
->rsrcClumpSize
);
427 vcb
->vcbNxtCNID
= SWAP_BE32(vhp
->nextCatalogID
);
428 vcb
->vcbVolBkUp
= to_bsd_time(SWAP_BE32(vhp
->backupDate
));
429 vcb
->vcbWrCnt
= SWAP_BE32(vhp
->writeCount
);
430 vcb
->vcbFilCnt
= SWAP_BE32(vhp
->fileCount
);
431 vcb
->vcbDirCnt
= SWAP_BE32(vhp
->folderCount
);
433 /* copy 32 bytes of Finder info */
434 bcopy(vhp
->finderInfo
, vcb
->vcbFndrInfo
, sizeof(vhp
->finderInfo
));
436 vcb
->vcbAlBlSt
= 0; /* hfs+ allocation blocks start at first block of volume */
437 if ((hfsmp
->hfs_flags
& HFS_READ_ONLY
) == 0)
438 vcb
->vcbWrCnt
++; /* compensate for write of Volume Header on last flush */
440 /* Now fill in the Extended VCB info */
441 vcb
->nextAllocation
= SWAP_BE32(vhp
->nextAllocation
);
442 vcb
->totalBlocks
= SWAP_BE32(vhp
->totalBlocks
);
443 vcb
->allocLimit
= vcb
->totalBlocks
;
444 vcb
->freeBlocks
= SWAP_BE32(vhp
->freeBlocks
);
445 vcb
->blockSize
= blockSize
;
446 vcb
->encodingsBitmap
= SWAP_BE64(vhp
->encodingsBitmap
);
447 vcb
->localCreateDate
= SWAP_BE32(vhp
->createDate
);
449 vcb
->hfsPlusIOPosOffset
= embeddedOffset
;
451 /* Default to no free block reserve */
452 vcb
->reserveBlocks
= 0;
455 * Update the logical block size in the mount struct
456 * (currently set up from the wrapper MDB) using the
457 * new blocksize value:
459 hfsmp
->hfs_logBlockSize
= BestBlockSizeFit(vcb
->blockSize
, MAXBSIZE
, hfsmp
->hfs_logical_block_size
);
460 vcb
->vcbVBMIOSize
= min(vcb
->blockSize
, MAXPHYSIO
);
463 * Validate and initialize the location of the alternate volume header.
465 * Note that there may be spare sectors beyond the end of the filesystem that still
466 * belong to our partition.
469 spare_sectors
= hfsmp
->hfs_logical_block_count
-
470 (((daddr64_t
)vcb
->totalBlocks
* blockSize
) /
471 hfsmp
->hfs_logical_block_size
);
474 * Differentiate between "innocuous" spare sectors and the more unusual
477 * *** Innocuous spare sectors exist if:
479 * A) the number of bytes assigned to the partition (by multiplying logical
480 * block size * logical block count) is greater than the filesystem size
481 * (by multiplying allocation block count and allocation block size)
485 * B) the remainder is less than the size of a full allocation block's worth of bytes.
487 * This handles the normal case where there may be a few extra sectors, but the two
488 * are fundamentally in sync.
490 * *** Degenerate spare sectors exist if:
491 * A) The number of bytes assigned to the partition (by multiplying logical
492 * block size * logical block count) is greater than the filesystem size
493 * (by multiplying allocation block count and block size).
497 * B) the remainder is greater than a full allocation's block worth of bytes.
498 * In this case, a smaller file system exists in a larger partition.
499 * This can happen in various ways, including when volume is resized but the
500 * partition is yet to be resized. Under this condition, we have to assume that
501 * a partition management software may resize the partition to match
502 * the file system size in the future. Therefore we should update
503 * alternate volume header at two locations on the disk,
504 * a. 1024 bytes before end of the partition
505 * b. 1024 bytes before end of the file system
508 if (spare_sectors
> (daddr64_t
)(blockSize
/ hfsmp
->hfs_logical_block_size
)) {
510 * Handle the degenerate case above. FS < partition size.
511 * AVH located at 1024 bytes from the end of the partition
513 hfsmp
->hfs_partition_avh_sector
= (hfsmp
->hfsPlusIOPosOffset
/ hfsmp
->hfs_logical_block_size
) +
514 HFS_ALT_SECTOR(hfsmp
->hfs_logical_block_size
, hfsmp
->hfs_logical_block_count
);
516 /* AVH located at 1024 bytes from the end of the filesystem */
517 hfsmp
->hfs_fs_avh_sector
= (hfsmp
->hfsPlusIOPosOffset
/ hfsmp
->hfs_logical_block_size
) +
518 HFS_ALT_SECTOR(hfsmp
->hfs_logical_block_size
,
519 (((daddr64_t
)vcb
->totalBlocks
* blockSize
) / hfsmp
->hfs_logical_block_size
));
522 /* Innocuous spare sectors; Partition & FS notion are in sync */
523 hfsmp
->hfs_partition_avh_sector
= (hfsmp
->hfsPlusIOPosOffset
/ hfsmp
->hfs_logical_block_size
) +
524 HFS_ALT_SECTOR(hfsmp
->hfs_logical_block_size
, hfsmp
->hfs_logical_block_count
);
526 hfsmp
->hfs_fs_avh_sector
= hfsmp
->hfs_partition_avh_sector
;
528 if (hfs_resize_debug
) {
529 printf ("hfs_MountHFSPlusVolume: partition_avh_sector=%qu, fs_avh_sector=%qu\n",
530 hfsmp
->hfs_partition_avh_sector
, hfsmp
->hfs_fs_avh_sector
);
533 bzero(&cndesc
, sizeof(cndesc
));
534 cndesc
.cd_parentcnid
= kHFSRootParentID
;
535 cndesc
.cd_flags
|= CD_ISMETA
;
536 bzero(&cnattr
, sizeof(cnattr
));
537 cnattr
.ca_linkcount
= 1;
538 cnattr
.ca_mode
= S_IFREG
;
541 * Set up Extents B-tree vnode
543 cndesc
.cd_nameptr
= hfs_extname
;
544 cndesc
.cd_namelen
= strlen((char *)hfs_extname
);
545 cndesc
.cd_cnid
= cnattr
.ca_fileid
= kHFSExtentsFileID
;
547 cfork
.cf_size
= SWAP_BE64 (vhp
->extentsFile
.logicalSize
);
548 cfork
.cf_new_size
= 0;
549 cfork
.cf_clump
= SWAP_BE32 (vhp
->extentsFile
.clumpSize
);
550 cfork
.cf_blocks
= SWAP_BE32 (vhp
->extentsFile
.totalBlocks
);
551 cfork
.cf_vblocks
= 0;
552 cnattr
.ca_blocks
= cfork
.cf_blocks
;
553 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
554 cfork
.cf_extents
[i
].startBlock
=
555 SWAP_BE32 (vhp
->extentsFile
.extents
[i
].startBlock
);
556 cfork
.cf_extents
[i
].blockCount
=
557 SWAP_BE32 (vhp
->extentsFile
.extents
[i
].blockCount
);
559 retval
= hfs_getnewvnode(hfsmp
, NULL
, NULL
, &cndesc
, 0, &cnattr
, &cfork
,
560 &hfsmp
->hfs_extents_vp
, &newvnode_flags
);
563 if (HFS_MOUNT_DEBUG
) {
564 printf("hfs_mounthfsplus: hfs_getnewvnode returned (%d) getting extentoverflow BT\n", retval
);
568 hfsmp
->hfs_extents_cp
= VTOC(hfsmp
->hfs_extents_vp
);
569 hfs_unlock(hfsmp
->hfs_extents_cp
);
571 retval
= MacToVFSError(BTOpenPath(VTOF(hfsmp
->hfs_extents_vp
),
572 (KeyCompareProcPtr
) CompareExtentKeysPlus
));
575 if (HFS_MOUNT_DEBUG
) {
576 printf("hfs_mounthfsplus: BTOpenPath returned (%d) getting extentoverflow BT\n", retval
);
581 * Set up Catalog B-tree vnode
583 cndesc
.cd_nameptr
= hfs_catname
;
584 cndesc
.cd_namelen
= strlen((char *)hfs_catname
);
585 cndesc
.cd_cnid
= cnattr
.ca_fileid
= kHFSCatalogFileID
;
587 cfork
.cf_size
= SWAP_BE64 (vhp
->catalogFile
.logicalSize
);
588 cfork
.cf_clump
= SWAP_BE32 (vhp
->catalogFile
.clumpSize
);
589 cfork
.cf_blocks
= SWAP_BE32 (vhp
->catalogFile
.totalBlocks
);
590 cfork
.cf_vblocks
= 0;
591 cnattr
.ca_blocks
= cfork
.cf_blocks
;
592 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
593 cfork
.cf_extents
[i
].startBlock
=
594 SWAP_BE32 (vhp
->catalogFile
.extents
[i
].startBlock
);
595 cfork
.cf_extents
[i
].blockCount
=
596 SWAP_BE32 (vhp
->catalogFile
.extents
[i
].blockCount
);
598 retval
= hfs_getnewvnode(hfsmp
, NULL
, NULL
, &cndesc
, 0, &cnattr
, &cfork
,
599 &hfsmp
->hfs_catalog_vp
, &newvnode_flags
);
601 if (HFS_MOUNT_DEBUG
) {
602 printf("hfs_mounthfsplus: hfs_getnewvnode returned (%d) getting catalog BT\n", retval
);
606 hfsmp
->hfs_catalog_cp
= VTOC(hfsmp
->hfs_catalog_vp
);
607 hfs_unlock(hfsmp
->hfs_catalog_cp
);
609 retval
= MacToVFSError(BTOpenPath(VTOF(hfsmp
->hfs_catalog_vp
),
610 (KeyCompareProcPtr
) CompareExtendedCatalogKeys
));
612 if (HFS_MOUNT_DEBUG
) {
613 printf("hfs_mounthfsplus: BTOpenPath returned (%d) getting catalog BT\n", retval
);
617 if ((hfsmp
->hfs_flags
& HFS_X
) &&
618 BTGetInformation(VTOF(hfsmp
->hfs_catalog_vp
), 0, &btinfo
) == 0) {
619 if (btinfo
.keyCompareType
== kHFSBinaryCompare
) {
620 hfsmp
->hfs_flags
|= HFS_CASE_SENSITIVE
;
621 /* Install a case-sensitive key compare */
622 (void) BTOpenPath(VTOF(hfsmp
->hfs_catalog_vp
),
623 (KeyCompareProcPtr
)cat_binarykeycompare
);
628 * Set up Allocation file vnode
630 cndesc
.cd_nameptr
= hfs_vbmname
;
631 cndesc
.cd_namelen
= strlen((char *)hfs_vbmname
);
632 cndesc
.cd_cnid
= cnattr
.ca_fileid
= kHFSAllocationFileID
;
634 cfork
.cf_size
= SWAP_BE64 (vhp
->allocationFile
.logicalSize
);
635 cfork
.cf_clump
= SWAP_BE32 (vhp
->allocationFile
.clumpSize
);
636 cfork
.cf_blocks
= SWAP_BE32 (vhp
->allocationFile
.totalBlocks
);
637 cfork
.cf_vblocks
= 0;
638 cnattr
.ca_blocks
= cfork
.cf_blocks
;
639 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
640 cfork
.cf_extents
[i
].startBlock
=
641 SWAP_BE32 (vhp
->allocationFile
.extents
[i
].startBlock
);
642 cfork
.cf_extents
[i
].blockCount
=
643 SWAP_BE32 (vhp
->allocationFile
.extents
[i
].blockCount
);
645 retval
= hfs_getnewvnode(hfsmp
, NULL
, NULL
, &cndesc
, 0, &cnattr
, &cfork
,
646 &hfsmp
->hfs_allocation_vp
, &newvnode_flags
);
648 if (HFS_MOUNT_DEBUG
) {
649 printf("hfs_mounthfsplus: hfs_getnewvnode returned (%d) getting bitmap\n", retval
);
653 hfsmp
->hfs_allocation_cp
= VTOC(hfsmp
->hfs_allocation_vp
);
654 hfs_unlock(hfsmp
->hfs_allocation_cp
);
657 * Set up Attribute B-tree vnode
659 if (vhp
->attributesFile
.totalBlocks
!= 0) {
660 cndesc
.cd_nameptr
= hfs_attrname
;
661 cndesc
.cd_namelen
= strlen((char *)hfs_attrname
);
662 cndesc
.cd_cnid
= cnattr
.ca_fileid
= kHFSAttributesFileID
;
664 cfork
.cf_size
= SWAP_BE64 (vhp
->attributesFile
.logicalSize
);
665 cfork
.cf_clump
= SWAP_BE32 (vhp
->attributesFile
.clumpSize
);
666 cfork
.cf_blocks
= SWAP_BE32 (vhp
->attributesFile
.totalBlocks
);
667 cfork
.cf_vblocks
= 0;
668 cnattr
.ca_blocks
= cfork
.cf_blocks
;
669 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
670 cfork
.cf_extents
[i
].startBlock
=
671 SWAP_BE32 (vhp
->attributesFile
.extents
[i
].startBlock
);
672 cfork
.cf_extents
[i
].blockCount
=
673 SWAP_BE32 (vhp
->attributesFile
.extents
[i
].blockCount
);
675 retval
= hfs_getnewvnode(hfsmp
, NULL
, NULL
, &cndesc
, 0, &cnattr
, &cfork
,
676 &hfsmp
->hfs_attribute_vp
, &newvnode_flags
);
678 if (HFS_MOUNT_DEBUG
) {
679 printf("hfs_mounthfsplus: hfs_getnewvnode returned (%d) getting EA BT\n", retval
);
683 hfsmp
->hfs_attribute_cp
= VTOC(hfsmp
->hfs_attribute_vp
);
684 hfs_unlock(hfsmp
->hfs_attribute_cp
);
685 retval
= MacToVFSError(BTOpenPath(VTOF(hfsmp
->hfs_attribute_vp
),
686 (KeyCompareProcPtr
) hfs_attrkeycompare
));
688 if (HFS_MOUNT_DEBUG
) {
689 printf("hfs_mounthfsplus: BTOpenPath returned (%d) getting EA BT\n", retval
);
694 /* Initialize vnode for virtual attribute data file that spans the
695 * entire file system space for performing I/O to attribute btree
696 * We hold iocount on the attrdata vnode for the entire duration
697 * of mount (similar to btree vnodes)
699 retval
= init_attrdata_vnode(hfsmp
);
701 if (HFS_MOUNT_DEBUG
) {
702 printf("hfs_mounthfsplus: init_attrdata_vnode returned (%d) for virtual EA file\n", retval
);
709 * Set up Startup file vnode
711 if (vhp
->startupFile
.totalBlocks
!= 0) {
712 cndesc
.cd_nameptr
= hfs_startupname
;
713 cndesc
.cd_namelen
= strlen((char *)hfs_startupname
);
714 cndesc
.cd_cnid
= cnattr
.ca_fileid
= kHFSStartupFileID
;
716 cfork
.cf_size
= SWAP_BE64 (vhp
->startupFile
.logicalSize
);
717 cfork
.cf_clump
= SWAP_BE32 (vhp
->startupFile
.clumpSize
);
718 cfork
.cf_blocks
= SWAP_BE32 (vhp
->startupFile
.totalBlocks
);
719 cfork
.cf_vblocks
= 0;
720 cnattr
.ca_blocks
= cfork
.cf_blocks
;
721 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
722 cfork
.cf_extents
[i
].startBlock
=
723 SWAP_BE32 (vhp
->startupFile
.extents
[i
].startBlock
);
724 cfork
.cf_extents
[i
].blockCount
=
725 SWAP_BE32 (vhp
->startupFile
.extents
[i
].blockCount
);
727 retval
= hfs_getnewvnode(hfsmp
, NULL
, NULL
, &cndesc
, 0, &cnattr
, &cfork
,
728 &hfsmp
->hfs_startup_vp
, &newvnode_flags
);
730 if (HFS_MOUNT_DEBUG
) {
731 printf("hfs_mounthfsplus: hfs_getnewvnode returned (%d) getting startup file\n", retval
);
735 hfsmp
->hfs_startup_cp
= VTOC(hfsmp
->hfs_startup_vp
);
736 hfs_unlock(hfsmp
->hfs_startup_cp
);
740 * Pick up volume name and create date
742 * Acquiring the volume name should not manipulate the bitmap, only the catalog
743 * btree and possibly the extents overflow b-tree.
745 retval
= cat_idlookup(hfsmp
, kHFSRootFolderID
, 0, 0, &cndesc
, &cnattr
, NULL
);
747 if (HFS_MOUNT_DEBUG
) {
748 printf("hfs_mounthfsplus: cat_idlookup returned (%d) getting rootfolder \n", retval
);
752 vcb
->hfs_itime
= cnattr
.ca_itime
;
753 vcb
->volumeNameEncodingHint
= cndesc
.cd_encoding
;
754 bcopy(cndesc
.cd_nameptr
, vcb
->vcbVN
, min(255, cndesc
.cd_namelen
));
755 volname_length
= strlen ((const char*)vcb
->vcbVN
);
756 cat_releasedesc(&cndesc
);
758 #define DKIOCCSSETLVNAME _IOW('d', 198, char[256])
761 /* Send the volume name down to CoreStorage if necessary */
762 retval
= utf8_normalizestr(vcb
->vcbVN
, volname_length
, (u_int8_t
*)converted_volname
, &conv_volname_length
, 256, UTF_PRECOMPOSED
);
764 (void) VNOP_IOCTL (hfsmp
->hfs_devvp
, DKIOCCSSETLVNAME
, converted_volname
, 0, vfs_context_current());
767 /* reset retval == 0. we don't care about errors in volname conversion */
772 * We now always initiate a full bitmap scan even if the volume is read-only because this is
773 * our only shot to do I/Os of dramaticallly different sizes than what the buffer cache ordinarily
774 * expects. TRIMs will not be delivered to the underlying media if the volume is not
777 thread_t allocator_scanner
;
780 /* Take the HFS mount mutex and wait on scan_var */
781 hfs_lock_mount (hfsmp
);
783 kernel_thread_start ((thread_continue_t
) hfs_scan_blocks
, hfsmp
, &allocator_scanner
);
784 /* Wait until it registers that it's got the appropriate locks */
785 while ((hfsmp
->scan_var
& HFS_ALLOCATOR_SCAN_INFLIGHT
) == 0) {
786 (void) msleep (&hfsmp
->scan_var
, &hfsmp
->hfs_mutex
, (PDROP
| PINOD
), "hfs_scan_blocks", 0);
787 if (hfsmp
->scan_var
& HFS_ALLOCATOR_SCAN_INFLIGHT
) {
791 hfs_lock_mount (hfsmp
);
795 thread_deallocate (allocator_scanner
);
797 /* mark the volume dirty (clear clean unmount bit) */
798 vcb
->vcbAtrb
&= ~kHFSVolumeUnmountedMask
;
799 if (hfsmp
->jnl
&& (hfsmp
->hfs_flags
& HFS_READ_ONLY
) == 0) {
800 hfs_flushvolumeheader(hfsmp
, TRUE
, 0);
803 /* kHFSHasFolderCount is only supported/updated on HFSX volumes */
804 if ((hfsmp
->hfs_flags
& HFS_X
) != 0) {
805 hfsmp
->hfs_flags
|= HFS_FOLDERCOUNT
;
809 // Check if we need to do late journal initialization. This only
810 // happens if a previous version of MacOS X (or 9) touched the disk.
811 // In that case hfs_late_journal_init() will go re-locate the journal
812 // and journal_info_block files and validate that they're still kosher.
814 if ( (vcb
->vcbAtrb
& kHFSVolumeJournaledMask
)
815 && (SWAP_BE32(vhp
->lastMountedVersion
) != kHFSJMountVersion
)
816 && (hfsmp
->jnl
== NULL
)) {
818 retval
= hfs_late_journal_init(hfsmp
, vhp
, args
);
820 if (retval
== EROFS
) {
821 // EROFS is a special error code that means the volume has an external
822 // journal which we couldn't find. in that case we do not want to
823 // rewrite the volume header - we'll just refuse to mount the volume.
824 if (HFS_MOUNT_DEBUG
) {
825 printf("hfs_mounthfsplus: hfs_late_journal_init returned (%d), maybe an external jnl?\n", retval
);
833 // if the journal failed to open, then set the lastMountedVersion
834 // to be "FSK!" which fsck_hfs will see and force the fsck instead
835 // of just bailing out because the volume is journaled.
836 if (!(hfsmp
->hfs_flags
& HFS_READ_ONLY
)) {
837 HFSPlusVolumeHeader
*jvhp
;
838 daddr64_t mdb_offset
;
839 struct buf
*bp
= NULL
;
841 hfsmp
->hfs_flags
|= HFS_NEED_JNL_RESET
;
843 mdb_offset
= (daddr64_t
)((embeddedOffset
/ blockSize
) + HFS_PRI_SECTOR(blockSize
));
846 retval
= (int)buf_meta_bread(hfsmp
->hfs_devvp
,
847 HFS_PHYSBLK_ROUNDDOWN(mdb_offset
, hfsmp
->hfs_log_per_phys
),
848 hfsmp
->hfs_physical_block_size
, cred
, &bp
);
850 jvhp
= (HFSPlusVolumeHeader
*)(buf_dataptr(bp
) + HFS_PRI_OFFSET(hfsmp
->hfs_physical_block_size
));
852 if (SWAP_BE16(jvhp
->signature
) == kHFSPlusSigWord
|| SWAP_BE16(jvhp
->signature
) == kHFSXSigWord
) {
853 printf ("hfs(3): Journal replay fail. Writing lastMountVersion as FSK!\n");
854 jvhp
->lastMountedVersion
= SWAP_BE32(kFSKMountVersion
);
862 // clear this so the error exit path won't try to use it
867 if (HFS_MOUNT_DEBUG
) {
868 printf("hfs_mounthfsplus: hfs_late_journal_init returned (%d)\n", retval
);
872 } else if (hfsmp
->jnl
) {
873 vfs_setflags(hfsmp
->hfs_mp
, (u_int64_t
)((unsigned int)MNT_JOURNALED
));
875 } else if (hfsmp
->jnl
|| ((vcb
->vcbAtrb
& kHFSVolumeJournaledMask
) && (hfsmp
->hfs_flags
& HFS_READ_ONLY
))) {
876 struct cat_attr jinfo_attr
, jnl_attr
;
878 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
879 vcb
->vcbAtrb
&= ~kHFSVolumeJournaledMask
;
882 // if we're here we need to fill in the fileid's for the
883 // journal and journal_info_block.
884 hfsmp
->hfs_jnlinfoblkid
= GetFileInfo(vcb
, kRootDirID
, ".journal_info_block", &jinfo_attr
, NULL
);
885 hfsmp
->hfs_jnlfileid
= GetFileInfo(vcb
, kRootDirID
, ".journal", &jnl_attr
, NULL
);
886 if (hfsmp
->hfs_jnlinfoblkid
== 0 || hfsmp
->hfs_jnlfileid
== 0) {
887 printf("hfs: danger! couldn't find the file-id's for the journal or journal_info_block\n");
888 printf("hfs: jnlfileid %d, jnlinfoblkid %d\n", hfsmp
->hfs_jnlfileid
, hfsmp
->hfs_jnlinfoblkid
);
891 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
892 vcb
->vcbAtrb
|= kHFSVolumeJournaledMask
;
895 if (hfsmp
->jnl
== NULL
) {
896 vfs_clearflags(hfsmp
->hfs_mp
, (u_int64_t
)((unsigned int)MNT_JOURNALED
));
900 if ( !(vcb
->vcbAtrb
& kHFSVolumeHardwareLockMask
) ) // if the disk is not write protected
902 MarkVCBDirty( vcb
); // mark VCB dirty so it will be written
906 * Distinguish 3 potential cases involving content protection:
907 * 1. mount point bit set; vcbAtrb does not support it. Fail.
908 * 2. mount point bit set; vcbattrb supports it. we're good.
909 * 3. mount point bit not set; vcbatrb supports it, turn bit on, then good.
911 if (vfs_flags(hfsmp
->hfs_mp
) & MNT_CPROTECT
) {
912 /* Does the mount point support it ? */
913 if ((vcb
->vcbAtrb
& kHFSContentProtectionMask
) == 0) {
920 /* not requested in the mount point. Is it in FS? */
921 if (vcb
->vcbAtrb
& kHFSContentProtectionMask
) {
923 vfs_setflags (hfsmp
->hfs_mp
, MNT_CPROTECT
);
927 /* At this point, if the mount point flag is set, we can enable it. */
928 if (vfs_flags(hfsmp
->hfs_mp
) & MNT_CPROTECT
) {
929 /* Cases 2+3 above */
931 /* Get the EAs as needed. */
933 uint16_t majorversion
;
934 uint16_t minorversion
;
936 uint8_t cryptogen
= 0;
937 struct cp_root_xattr
*xattr
= NULL
;
938 MALLOC (xattr
, struct cp_root_xattr
*, sizeof(struct cp_root_xattr
), M_TEMP
, M_WAITOK
);
943 bzero (xattr
, sizeof(struct cp_root_xattr
));
945 /* go get the EA to get the version information */
946 cperr
= cp_getrootxattr (hfsmp
, xattr
);
948 * If there was no EA there, then write one out.
949 * Assuming EA is not present on the root means
950 * this is an erase install or a very old FS
954 /* Have to run a valid CP version. */
955 if ((xattr
->major_version
< CP_PREV_MAJOR_VERS
) || (xattr
->major_version
> CP_NEW_MAJOR_VERS
)) {
959 else if (cperr
== ENOATTR
) {
960 printf("No root EA set, creating new EA with new version: %d\n", CP_NEW_MAJOR_VERS
);
961 bzero(xattr
, sizeof(struct cp_root_xattr
));
962 xattr
->major_version
= CP_NEW_MAJOR_VERS
;
963 xattr
->minor_version
= CP_MINOR_VERS
;
964 cperr
= cp_setrootxattr (hfsmp
, xattr
);
966 majorversion
= xattr
->major_version
;
967 minorversion
= xattr
->minor_version
;
968 flags
= xattr
->flags
;
969 if (xattr
->flags
& CP_ROOT_CRYPTOG1
) {
977 /* Recheck for good status */
979 /* If we got here, then the CP version is valid. Set it in the mount point */
980 hfsmp
->hfs_running_cp_major_vers
= majorversion
;
981 printf("Running with CP root xattr: %d.%d\n", majorversion
, minorversion
);
982 hfsmp
->cproot_flags
= flags
;
983 hfsmp
->cp_crypto_generation
= cryptogen
;
986 * Acquire the boot-arg for the AKS default key; if invalid, obtain from the device tree.
987 * Ensure that the boot-arg's value is valid for FILES (not directories),
988 * since only files are actually protected for now.
991 PE_parse_boot_argn("aks_default_class", &hfsmp
->default_cp_class
, sizeof(hfsmp
->default_cp_class
));
993 if (cp_is_valid_class(0, hfsmp
->default_cp_class
) == 0) {
994 PE_get_default("kern.default_cp_class", &hfsmp
->default_cp_class
, sizeof(hfsmp
->default_cp_class
));
997 if (cp_is_valid_class(0, hfsmp
->default_cp_class
) == 0) {
998 hfsmp
->default_cp_class
= PROTECTION_CLASS_C
;
1006 /* If CONFIG_PROTECT not built, ignore CP */
1007 vfs_clearflags(hfsmp
->hfs_mp
, MNT_CPROTECT
);
1012 * Establish a metadata allocation zone.
1014 hfs_metadatazone_init(hfsmp
, false);
1017 * Make any metadata zone adjustments.
1019 if (hfsmp
->hfs_flags
& HFS_METADATA_ZONE
) {
1020 /* Keep the roving allocator out of the metadata zone. */
1021 if (vcb
->nextAllocation
>= hfsmp
->hfs_metazone_start
&&
1022 vcb
->nextAllocation
<= hfsmp
->hfs_metazone_end
) {
1023 HFS_UPDATE_NEXT_ALLOCATION(hfsmp
, hfsmp
->hfs_metazone_end
+ 1);
1026 if (vcb
->nextAllocation
<= 1) {
1027 vcb
->nextAllocation
= hfsmp
->hfs_min_alloc_start
;
1030 vcb
->sparseAllocation
= hfsmp
->hfs_min_alloc_start
;
1032 /* Setup private/hidden directories for hardlinks. */
1033 hfs_privatedir_init(hfsmp
, FILE_HARDLINKS
);
1034 hfs_privatedir_init(hfsmp
, DIR_HARDLINKS
);
1036 if ((hfsmp
->hfs_flags
& HFS_READ_ONLY
) == 0)
1037 hfs_remove_orphans(hfsmp
);
1039 /* See if we need to erase unused Catalog nodes due to <rdar://problem/6947811>. */
1040 if ((hfsmp
->hfs_flags
& HFS_READ_ONLY
) == 0)
1042 retval
= hfs_erase_unused_nodes(hfsmp
);
1044 if (HFS_MOUNT_DEBUG
) {
1045 printf("hfs_mounthfsplus: hfs_erase_unused_nodes returned (%d) for %s \n", retval
, hfsmp
->vcbVN
);
1053 * Allow hot file clustering if conditions allow.
1055 if ((hfsmp
->hfs_flags
& HFS_METADATA_ZONE
) &&
1056 ((hfsmp
->hfs_flags
& (HFS_READ_ONLY
| HFS_SSD
)) == 0)) {
1057 (void) hfs_recording_init(hfsmp
);
1060 /* Force ACLs on HFS+ file systems. */
1061 vfs_setextendedsecurity(HFSTOVFS(hfsmp
));
1063 /* Enable extent-based extended attributes by default */
1064 hfsmp
->hfs_flags
|= HFS_XATTR_EXTENTS
;
1070 * A fatal error occurred and the volume cannot be mounted, so
1071 * release any resources that we acquired...
1073 hfsUnmount(hfsmp
, NULL
);
1075 if (HFS_MOUNT_DEBUG
) {
1076 printf("hfs_mounthfsplus: encountered error (%d)\n", retval
);
1083 * ReleaseMetaFileVNode
1087 static void ReleaseMetaFileVNode(struct vnode
*vp
)
1089 struct filefork
*fp
;
1091 if (vp
&& (fp
= VTOF(vp
))) {
1092 if (fp
->fcbBTCBPtr
!= NULL
) {
1093 (void)hfs_lock(VTOC(vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
1094 (void) BTClosePath(fp
);
1095 hfs_unlock(VTOC(vp
));
1098 /* release the node even if BTClosePath fails */
1105 /*************************************************************
1107 * Unmounts a hfs volume.
1108 * At this point vflush() has been called (to dump all non-metadata files)
1110 *************************************************************/
1113 hfsUnmount( register struct hfsmount
*hfsmp
, __unused
struct proc
*p
)
1115 /* Get rid of our attribute data vnode (if any). This is done
1116 * after the vflush() during mount, so we don't need to worry
1119 if (hfsmp
->hfs_attrdata_vp
) {
1120 ReleaseMetaFileVNode(hfsmp
->hfs_attrdata_vp
);
1121 hfsmp
->hfs_attrdata_vp
= NULLVP
;
1124 if (hfsmp
->hfs_startup_vp
) {
1125 ReleaseMetaFileVNode(hfsmp
->hfs_startup_vp
);
1126 hfsmp
->hfs_startup_cp
= NULL
;
1127 hfsmp
->hfs_startup_vp
= NULL
;
1130 if (hfsmp
->hfs_attribute_vp
) {
1131 ReleaseMetaFileVNode(hfsmp
->hfs_attribute_vp
);
1132 hfsmp
->hfs_attribute_cp
= NULL
;
1133 hfsmp
->hfs_attribute_vp
= NULL
;
1136 if (hfsmp
->hfs_catalog_vp
) {
1137 ReleaseMetaFileVNode(hfsmp
->hfs_catalog_vp
);
1138 hfsmp
->hfs_catalog_cp
= NULL
;
1139 hfsmp
->hfs_catalog_vp
= NULL
;
1142 if (hfsmp
->hfs_extents_vp
) {
1143 ReleaseMetaFileVNode(hfsmp
->hfs_extents_vp
);
1144 hfsmp
->hfs_extents_cp
= NULL
;
1145 hfsmp
->hfs_extents_vp
= NULL
;
1148 if (hfsmp
->hfs_allocation_vp
) {
1149 ReleaseMetaFileVNode(hfsmp
->hfs_allocation_vp
);
1150 hfsmp
->hfs_allocation_cp
= NULL
;
1151 hfsmp
->hfs_allocation_vp
= NULL
;
1159 * Test if fork has overflow extents.
1162 * non-zero - overflow extents exist
1163 * zero - overflow extents do not exist
1166 bool overflow_extents(struct filefork
*fp
)
1171 // If the vnode pointer is NULL then we're being called
1172 // from hfs_remove_orphans() with a faked-up filefork
1173 // and therefore it has to be an HFS+ volume. Otherwise
1174 // we check through the volume header to see what type
1175 // of volume we're on.
1179 if (FTOV(fp
) && VTOVCB(FTOV(fp
))->vcbSigWord
== kHFSSigWord
) {
1180 if (fp
->ff_extents
[2].blockCount
== 0)
1183 blocks
= fp
->ff_extents
[0].blockCount
+
1184 fp
->ff_extents
[1].blockCount
+
1185 fp
->ff_extents
[2].blockCount
;
1187 return fp
->ff_blocks
> blocks
;
1191 if (fp
->ff_extents
[7].blockCount
== 0)
1194 blocks
= fp
->ff_extents
[0].blockCount
+
1195 fp
->ff_extents
[1].blockCount
+
1196 fp
->ff_extents
[2].blockCount
+
1197 fp
->ff_extents
[3].blockCount
+
1198 fp
->ff_extents
[4].blockCount
+
1199 fp
->ff_extents
[5].blockCount
+
1200 fp
->ff_extents
[6].blockCount
+
1201 fp
->ff_extents
[7].blockCount
;
1203 return fp
->ff_blocks
> blocks
;
1206 static __attribute__((pure
))
1207 boolean_t
hfs_is_frozen(struct hfsmount
*hfsmp
)
1209 return (hfsmp
->hfs_freeze_state
== HFS_FROZEN
1210 || (hfsmp
->hfs_freeze_state
== HFS_FREEZING
1211 && current_thread() != hfsmp
->hfs_freezing_thread
));
1215 * Lock the HFS global journal lock
1218 hfs_lock_global (struct hfsmount
*hfsmp
, enum hfs_locktype locktype
)
1220 thread_t thread
= current_thread();
1222 if (hfsmp
->hfs_global_lockowner
== thread
) {
1223 panic ("hfs_lock_global: locking against myself!");
1227 * This check isn't really necessary but this stops us taking
1228 * the mount lock in most cases. The essential check is below.
1230 if (hfs_is_frozen(hfsmp
)) {
1232 * Unfortunately, there is no easy way of getting a notification
1233 * for when a process is exiting and it's possible for the exiting
1234 * process to get blocked somewhere else. To catch this, we
1235 * periodically monitor the frozen process here and thaw if
1236 * we spot that it's exiting.
1239 hfs_lock_mount(hfsmp
);
1241 struct timespec ts
= { 0, 500 * NSEC_PER_MSEC
};
1243 while (hfs_is_frozen(hfsmp
)) {
1244 if (hfsmp
->hfs_freeze_state
== HFS_FROZEN
1245 && proc_exiting(hfsmp
->hfs_freezing_proc
)) {
1246 hfs_thaw_locked(hfsmp
);
1250 msleep(&hfsmp
->hfs_freeze_state
, &hfsmp
->hfs_mutex
,
1251 PWAIT
, "hfs_lock_global (frozen)", &ts
);
1253 hfs_unlock_mount(hfsmp
);
1256 /* HFS_SHARED_LOCK */
1257 if (locktype
== HFS_SHARED_LOCK
) {
1258 lck_rw_lock_shared (&hfsmp
->hfs_global_lock
);
1259 hfsmp
->hfs_global_lockowner
= HFS_SHARED_OWNER
;
1261 /* HFS_EXCLUSIVE_LOCK */
1263 lck_rw_lock_exclusive (&hfsmp
->hfs_global_lock
);
1264 hfsmp
->hfs_global_lockowner
= thread
;
1268 * We have to check if we're frozen again because of the time
1269 * between when we checked and when we took the global lock.
1271 if (hfs_is_frozen(hfsmp
)) {
1272 hfs_unlock_global(hfsmp
);
1281 * Unlock the HFS global journal lock
1284 hfs_unlock_global (struct hfsmount
*hfsmp
)
1286 thread_t thread
= current_thread();
1288 /* HFS_LOCK_EXCLUSIVE */
1289 if (hfsmp
->hfs_global_lockowner
== thread
) {
1290 hfsmp
->hfs_global_lockowner
= NULL
;
1291 lck_rw_unlock_exclusive (&hfsmp
->hfs_global_lock
);
1293 /* HFS_LOCK_SHARED */
1295 lck_rw_unlock_shared (&hfsmp
->hfs_global_lock
);
1300 * Lock the HFS mount lock
1302 * Note: this is a mutex, not a rw lock!
1305 void hfs_lock_mount (struct hfsmount
*hfsmp
) {
1306 lck_mtx_lock (&(hfsmp
->hfs_mutex
));
1310 * Unlock the HFS mount lock
1312 * Note: this is a mutex, not a rw lock!
1315 void hfs_unlock_mount (struct hfsmount
*hfsmp
) {
1316 lck_mtx_unlock (&(hfsmp
->hfs_mutex
));
1320 * Lock HFS system file(s).
1323 hfs_systemfile_lock(struct hfsmount
*hfsmp
, int flags
, enum hfs_locktype locktype
)
1326 * Locking order is Catalog file, Attributes file, Startup file, Bitmap file, Extents file
1328 if (flags
& SFL_CATALOG
) {
1329 #ifdef HFS_CHECK_LOCK_ORDER
1330 if (hfsmp
->hfs_attribute_cp
&& hfsmp
->hfs_attribute_cp
->c_lockowner
== current_thread()) {
1331 panic("hfs_systemfile_lock: bad lock order (Attributes before Catalog)");
1333 if (hfsmp
->hfs_startup_cp
&& hfsmp
->hfs_startup_cp
->c_lockowner
== current_thread()) {
1334 panic("hfs_systemfile_lock: bad lock order (Startup before Catalog)");
1336 if (hfsmp
-> hfs_extents_cp
&& hfsmp
->hfs_extents_cp
->c_lockowner
== current_thread()) {
1337 panic("hfs_systemfile_lock: bad lock order (Extents before Catalog)");
1339 #endif /* HFS_CHECK_LOCK_ORDER */
1341 if (hfsmp
->hfs_catalog_cp
) {
1342 (void) hfs_lock(hfsmp
->hfs_catalog_cp
, locktype
, HFS_LOCK_DEFAULT
);
1344 * When the catalog file has overflow extents then
1345 * also acquire the extents b-tree lock if its not
1346 * already requested.
1348 if (((flags
& SFL_EXTENTS
) == 0) &&
1349 (hfsmp
->hfs_catalog_vp
!= NULL
) &&
1350 (overflow_extents(VTOF(hfsmp
->hfs_catalog_vp
)))) {
1351 flags
|= SFL_EXTENTS
;
1354 flags
&= ~SFL_CATALOG
;
1358 if (flags
& SFL_ATTRIBUTE
) {
1359 #ifdef HFS_CHECK_LOCK_ORDER
1360 if (hfsmp
->hfs_startup_cp
&& hfsmp
->hfs_startup_cp
->c_lockowner
== current_thread()) {
1361 panic("hfs_systemfile_lock: bad lock order (Startup before Attributes)");
1363 if (hfsmp
->hfs_extents_cp
&& hfsmp
->hfs_extents_cp
->c_lockowner
== current_thread()) {
1364 panic("hfs_systemfile_lock: bad lock order (Extents before Attributes)");
1366 #endif /* HFS_CHECK_LOCK_ORDER */
1368 if (hfsmp
->hfs_attribute_cp
) {
1369 (void) hfs_lock(hfsmp
->hfs_attribute_cp
, locktype
, HFS_LOCK_DEFAULT
);
1371 * When the attribute file has overflow extents then
1372 * also acquire the extents b-tree lock if its not
1373 * already requested.
1375 if (((flags
& SFL_EXTENTS
) == 0) &&
1376 (hfsmp
->hfs_attribute_vp
!= NULL
) &&
1377 (overflow_extents(VTOF(hfsmp
->hfs_attribute_vp
)))) {
1378 flags
|= SFL_EXTENTS
;
1381 flags
&= ~SFL_ATTRIBUTE
;
1385 if (flags
& SFL_STARTUP
) {
1386 #ifdef HFS_CHECK_LOCK_ORDER
1387 if (hfsmp
-> hfs_extents_cp
&& hfsmp
->hfs_extents_cp
->c_lockowner
== current_thread()) {
1388 panic("hfs_systemfile_lock: bad lock order (Extents before Startup)");
1390 #endif /* HFS_CHECK_LOCK_ORDER */
1392 if (hfsmp
->hfs_startup_cp
) {
1393 (void) hfs_lock(hfsmp
->hfs_startup_cp
, locktype
, HFS_LOCK_DEFAULT
);
1395 * When the startup file has overflow extents then
1396 * also acquire the extents b-tree lock if its not
1397 * already requested.
1399 if (((flags
& SFL_EXTENTS
) == 0) &&
1400 (hfsmp
->hfs_startup_vp
!= NULL
) &&
1401 (overflow_extents(VTOF(hfsmp
->hfs_startup_vp
)))) {
1402 flags
|= SFL_EXTENTS
;
1405 flags
&= ~SFL_STARTUP
;
1410 * To prevent locks being taken in the wrong order, the extent lock
1411 * gets a bitmap lock as well.
1413 if (flags
& (SFL_BITMAP
| SFL_EXTENTS
)) {
1414 if (hfsmp
->hfs_allocation_cp
) {
1415 (void) hfs_lock(hfsmp
->hfs_allocation_cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
1417 * The bitmap lock is also grabbed when only extent lock
1418 * was requested. Set the bitmap lock bit in the lock
1419 * flags which callers will use during unlock.
1421 flags
|= SFL_BITMAP
;
1423 flags
&= ~SFL_BITMAP
;
1427 if (flags
& SFL_EXTENTS
) {
1429 * Since the extents btree lock is recursive we always
1430 * need exclusive access.
1432 if (hfsmp
->hfs_extents_cp
) {
1433 (void) hfs_lock(hfsmp
->hfs_extents_cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
1435 if (hfsmp
->hfs_mp
->mnt_kern_flag
& MNTK_SWAP_MOUNT
) {
1437 * because we may need this lock on the pageout path (if a swapfile allocation
1438 * spills into the extents overflow tree), we will grant the holder of this
1439 * lock the privilege of dipping into the reserve free pool in order to prevent
1440 * a deadlock from occurring if we need those pageouts to complete before we
1441 * will make any new pages available on the free list... the deadlock can occur
1442 * if this thread needs to allocate memory while this lock is held
1444 if (set_vm_privilege(TRUE
) == FALSE
) {
1446 * indicate that we need to drop vm_privilege
1449 flags
|= SFL_VM_PRIV
;
1453 flags
&= ~SFL_EXTENTS
;
1461 * unlock HFS system file(s).
1464 hfs_systemfile_unlock(struct hfsmount
*hfsmp
, int flags
)
1467 u_int32_t lastfsync
;
1468 int numOfLockedBuffs
;
1470 if (hfsmp
->jnl
== NULL
) {
1472 lastfsync
= tv
.tv_sec
;
1474 if (flags
& SFL_STARTUP
&& hfsmp
->hfs_startup_cp
) {
1475 hfs_unlock(hfsmp
->hfs_startup_cp
);
1477 if (flags
& SFL_ATTRIBUTE
&& hfsmp
->hfs_attribute_cp
) {
1478 if (hfsmp
->jnl
== NULL
) {
1479 BTGetLastSync((FCB
*)VTOF(hfsmp
->hfs_attribute_vp
), &lastfsync
);
1480 numOfLockedBuffs
= count_lock_queue();
1481 if ((numOfLockedBuffs
> kMaxLockedMetaBuffers
) ||
1482 ((numOfLockedBuffs
> 1) && ((tv
.tv_sec
- lastfsync
) >
1483 kMaxSecsForFsync
))) {
1484 hfs_btsync(hfsmp
->hfs_attribute_vp
, HFS_SYNCTRANS
);
1487 hfs_unlock(hfsmp
->hfs_attribute_cp
);
1489 if (flags
& SFL_CATALOG
&& hfsmp
->hfs_catalog_cp
) {
1490 if (hfsmp
->jnl
== NULL
) {
1491 BTGetLastSync((FCB
*)VTOF(hfsmp
->hfs_catalog_vp
), &lastfsync
);
1492 numOfLockedBuffs
= count_lock_queue();
1493 if ((numOfLockedBuffs
> kMaxLockedMetaBuffers
) ||
1494 ((numOfLockedBuffs
> 1) && ((tv
.tv_sec
- lastfsync
) >
1495 kMaxSecsForFsync
))) {
1496 hfs_btsync(hfsmp
->hfs_catalog_vp
, HFS_SYNCTRANS
);
1499 hfs_unlock(hfsmp
->hfs_catalog_cp
);
1501 if (flags
& SFL_BITMAP
&& hfsmp
->hfs_allocation_cp
) {
1502 hfs_unlock(hfsmp
->hfs_allocation_cp
);
1504 if (flags
& SFL_EXTENTS
&& hfsmp
->hfs_extents_cp
) {
1505 if (hfsmp
->jnl
== NULL
) {
1506 BTGetLastSync((FCB
*)VTOF(hfsmp
->hfs_extents_vp
), &lastfsync
);
1507 numOfLockedBuffs
= count_lock_queue();
1508 if ((numOfLockedBuffs
> kMaxLockedMetaBuffers
) ||
1509 ((numOfLockedBuffs
> 1) && ((tv
.tv_sec
- lastfsync
) >
1510 kMaxSecsForFsync
))) {
1511 hfs_btsync(hfsmp
->hfs_extents_vp
, HFS_SYNCTRANS
);
1514 hfs_unlock(hfsmp
->hfs_extents_cp
);
1516 if (flags
& SFL_VM_PRIV
) {
1518 * revoke the vm_privilege we granted this thread
1519 * now that we have unlocked the overflow extents
1521 set_vm_privilege(FALSE
);
1530 * Check to see if a vnode is locked in the current context
1531 * This is to be used for debugging purposes only!!
1534 void RequireFileLock(FileReference vp
, int shareable
)
1538 /* The extents btree and allocation bitmap are always exclusive. */
1539 if (VTOC(vp
)->c_fileid
== kHFSExtentsFileID
||
1540 VTOC(vp
)->c_fileid
== kHFSAllocationFileID
) {
1544 locked
= VTOC(vp
)->c_lockowner
== current_thread();
1546 if (!locked
&& !shareable
) {
1547 switch (VTOC(vp
)->c_fileid
) {
1548 case kHFSExtentsFileID
:
1549 panic("hfs: extents btree not locked! v: 0x%08X\n #\n", (u_int
)vp
);
1551 case kHFSCatalogFileID
:
1552 panic("hfs: catalog btree not locked! v: 0x%08X\n #\n", (u_int
)vp
);
1554 case kHFSAllocationFileID
:
1555 /* The allocation file can hide behind the jornal lock. */
1556 if (VTOHFS(vp
)->jnl
== NULL
)
1557 panic("hfs: allocation file not locked! v: 0x%08X\n #\n", (u_int
)vp
);
1559 case kHFSStartupFileID
:
1560 panic("hfs: startup file not locked! v: 0x%08X\n #\n", (u_int
)vp
);
1561 case kHFSAttributesFileID
:
1562 panic("hfs: attributes btree not locked! v: 0x%08X\n #\n", (u_int
)vp
);
1571 * There are three ways to qualify for ownership rights on an object:
1573 * 1. (a) Your UID matches the cnode's UID.
1574 * (b) The object in question is owned by "unknown"
1575 * 2. (a) Permissions on the filesystem are being ignored and
1576 * your UID matches the replacement UID.
1577 * (b) Permissions on the filesystem are being ignored and
1578 * the replacement UID is "unknown".
1583 hfs_owner_rights(struct hfsmount
*hfsmp
, uid_t cnode_uid
, kauth_cred_t cred
,
1584 __unused
struct proc
*p
, int invokesuperuserstatus
)
1586 if ((kauth_cred_getuid(cred
) == cnode_uid
) || /* [1a] */
1587 (cnode_uid
== UNKNOWNUID
) || /* [1b] */
1588 ((((unsigned int)vfs_flags(HFSTOVFS(hfsmp
))) & MNT_UNKNOWNPERMISSIONS
) && /* [2] */
1589 ((kauth_cred_getuid(cred
) == hfsmp
->hfs_uid
) || /* [2a] */
1590 (hfsmp
->hfs_uid
== UNKNOWNUID
))) || /* [2b] */
1591 (invokesuperuserstatus
&& (suser(cred
, 0) == 0))) { /* [3] */
1599 u_int32_t
BestBlockSizeFit(u_int32_t allocationBlockSize
,
1600 u_int32_t blockSizeLimit
,
1601 u_int32_t baseMultiple
) {
1603 Compute the optimal (largest) block size (no larger than allocationBlockSize) that is less than the
1604 specified limit but still an even multiple of the baseMultiple.
1606 int baseBlockCount
, blockCount
;
1607 u_int32_t trialBlockSize
;
1609 if (allocationBlockSize
% baseMultiple
!= 0) {
1611 Whoops: the allocation blocks aren't even multiples of the specified base:
1612 no amount of dividing them into even parts will be a multiple, either then!
1614 return 512; /* Hope for the best */
1617 /* Try the obvious winner first, to prevent 12K allocation blocks, for instance,
1618 from being handled as two 6K logical blocks instead of 3 4K logical blocks.
1619 Even though the former (the result of the loop below) is the larger allocation
1620 block size, the latter is more efficient: */
1621 if (allocationBlockSize
% PAGE_SIZE
== 0) return PAGE_SIZE
;
1623 /* No clear winner exists: pick the largest even fraction <= MAXBSIZE: */
1624 baseBlockCount
= allocationBlockSize
/ baseMultiple
; /* Now guaranteed to be an even multiple */
1626 for (blockCount
= baseBlockCount
; blockCount
> 0; --blockCount
) {
1627 trialBlockSize
= blockCount
* baseMultiple
;
1628 if (allocationBlockSize
% trialBlockSize
== 0) { /* An even multiple? */
1629 if ((trialBlockSize
<= blockSizeLimit
) &&
1630 (trialBlockSize
% baseMultiple
== 0)) {
1631 return trialBlockSize
;
1636 /* Note: we should never get here, since blockCount = 1 should always work,
1637 but this is nice and safe and makes the compiler happy, too ... */
1643 GetFileInfo(ExtendedVCB
*vcb
, __unused u_int32_t dirid
, const char *name
,
1644 struct cat_attr
*fattr
, struct cat_fork
*forkinfo
)
1646 struct hfsmount
* hfsmp
;
1647 struct cat_desc jdesc
;
1651 if (vcb
->vcbSigWord
!= kHFSPlusSigWord
)
1654 hfsmp
= VCBTOHFS(vcb
);
1656 memset(&jdesc
, 0, sizeof(struct cat_desc
));
1657 jdesc
.cd_parentcnid
= kRootDirID
;
1658 jdesc
.cd_nameptr
= (const u_int8_t
*)name
;
1659 jdesc
.cd_namelen
= strlen(name
);
1661 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
, HFS_SHARED_LOCK
);
1662 error
= cat_lookup(hfsmp
, &jdesc
, 0, 0, NULL
, fattr
, forkinfo
, NULL
);
1663 hfs_systemfile_unlock(hfsmp
, lockflags
);
1666 return (fattr
->ca_fileid
);
1667 } else if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
1671 return (0); /* XXX what callers expect on an error */
1676 * On HFS Plus Volumes, there can be orphaned files or directories
1677 * These are files or directories that were unlinked while busy.
1678 * If the volume was not cleanly unmounted then some of these may
1679 * have persisted and need to be removed.
1682 hfs_remove_orphans(struct hfsmount
* hfsmp
)
1684 struct BTreeIterator
* iterator
= NULL
;
1685 struct FSBufferDescriptor btdata
;
1686 struct HFSPlusCatalogFile filerec
;
1687 struct HFSPlusCatalogKey
* keyp
;
1688 struct proc
*p
= current_proc();
1694 cat_cookie_t cookie
;
1700 int orphaned_files
= 0;
1701 int orphaned_dirs
= 0;
1703 bzero(&cookie
, sizeof(cookie
));
1705 if (hfsmp
->hfs_flags
& HFS_CLEANED_ORPHANS
)
1708 vcb
= HFSTOVCB(hfsmp
);
1709 fcb
= VTOF(hfsmp
->hfs_catalog_vp
);
1711 btdata
.bufferAddress
= &filerec
;
1712 btdata
.itemSize
= sizeof(filerec
);
1713 btdata
.itemCount
= 1;
1715 MALLOC(iterator
, struct BTreeIterator
*, sizeof(*iterator
), M_TEMP
, M_WAITOK
);
1716 bzero(iterator
, sizeof(*iterator
));
1718 /* Build a key to "temp" */
1719 keyp
= (HFSPlusCatalogKey
*)&iterator
->key
;
1720 keyp
->parentID
= hfsmp
->hfs_private_desc
[FILE_HARDLINKS
].cd_cnid
;
1721 keyp
->nodeName
.length
= 4; /* "temp" */
1722 keyp
->keyLength
= kHFSPlusCatalogKeyMinimumLength
+ keyp
->nodeName
.length
* 2;
1723 keyp
->nodeName
.unicode
[0] = 't';
1724 keyp
->nodeName
.unicode
[1] = 'e';
1725 keyp
->nodeName
.unicode
[2] = 'm';
1726 keyp
->nodeName
.unicode
[3] = 'p';
1729 * Position the iterator just before the first real temp file/dir.
1731 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
, HFS_EXCLUSIVE_LOCK
);
1732 (void) BTSearchRecord(fcb
, iterator
, NULL
, NULL
, iterator
);
1733 hfs_systemfile_unlock(hfsmp
, lockflags
);
1735 /* Visit all the temp files/dirs in the HFS+ private directory. */
1737 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
, HFS_EXCLUSIVE_LOCK
);
1738 result
= BTIterateRecord(fcb
, kBTreeNextRecord
, iterator
, &btdata
, NULL
);
1739 hfs_systemfile_unlock(hfsmp
, lockflags
);
1742 if (keyp
->parentID
!= hfsmp
->hfs_private_desc
[FILE_HARDLINKS
].cd_cnid
)
1745 (void) utf8_encodestr(keyp
->nodeName
.unicode
, keyp
->nodeName
.length
* 2,
1746 (u_int8_t
*)filename
, &namelen
, sizeof(filename
), 0, 0);
1748 (void) snprintf(tempname
, sizeof(tempname
), "%s%d",
1749 HFS_DELETE_PREFIX
, filerec
.fileID
);
1752 * Delete all files (and directories) named "tempxxx",
1753 * where xxx is the file's cnid in decimal.
1756 if (bcmp(tempname
, filename
, namelen
) == 0) {
1757 struct filefork dfork
;
1758 struct filefork rfork
;
1762 bzero(&dfork
, sizeof(dfork
));
1763 bzero(&rfork
, sizeof(rfork
));
1764 bzero(&cnode
, sizeof(cnode
));
1766 /* Delete any attributes, ignore errors */
1767 (void) hfs_removeallattr(hfsmp
, filerec
.fileID
);
1769 if (hfs_start_transaction(hfsmp
) != 0) {
1770 printf("hfs_remove_orphans: failed to start transaction\n");
1776 * Reserve some space in the Catalog file.
1778 if (cat_preflight(hfsmp
, CAT_DELETE
, &cookie
, p
) != 0) {
1779 printf("hfs_remove_orphans: cat_preflight failed\n");
1784 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
| SFL_ATTRIBUTE
| SFL_EXTENTS
| SFL_BITMAP
, HFS_EXCLUSIVE_LOCK
);
1787 /* Build a fake cnode */
1788 cat_convertattr(hfsmp
, (CatalogRecord
*)&filerec
, &cnode
.c_attr
,
1789 &dfork
.ff_data
, &rfork
.ff_data
);
1790 cnode
.c_desc
.cd_parentcnid
= hfsmp
->hfs_private_desc
[FILE_HARDLINKS
].cd_cnid
;
1791 cnode
.c_desc
.cd_nameptr
= (const u_int8_t
*)filename
;
1792 cnode
.c_desc
.cd_namelen
= namelen
;
1793 cnode
.c_desc
.cd_cnid
= cnode
.c_attr
.ca_fileid
;
1794 cnode
.c_blocks
= dfork
.ff_blocks
+ rfork
.ff_blocks
;
1796 /* Position iterator at previous entry */
1797 if (BTIterateRecord(fcb
, kBTreePrevRecord
, iterator
,
1802 /* Truncate the file to zero (both forks) */
1803 if (dfork
.ff_blocks
> 0) {
1806 dfork
.ff_cp
= &cnode
;
1807 cnode
.c_datafork
= &dfork
;
1808 cnode
.c_rsrcfork
= NULL
;
1809 fsize
= (u_int64_t
)dfork
.ff_blocks
* (u_int64_t
)HFSTOVCB(hfsmp
)->blockSize
;
1811 if (fsize
> HFS_BIGFILE_SIZE
) {
1812 fsize
-= HFS_BIGFILE_SIZE
;
1817 if (TruncateFileC(vcb
, (FCB
*)&dfork
, fsize
, 1, 0,
1818 cnode
.c_attr
.ca_fileid
, false) != 0) {
1819 printf("hfs: error truncating data fork!\n");
1824 // if we're iteratively truncating this file down,
1825 // then end the transaction and start a new one so
1826 // that no one transaction gets too big.
1828 if (fsize
> 0 && started_tr
) {
1829 /* Drop system file locks before starting
1830 * another transaction to preserve lock order.
1832 hfs_systemfile_unlock(hfsmp
, lockflags
);
1834 hfs_end_transaction(hfsmp
);
1836 if (hfs_start_transaction(hfsmp
) != 0) {
1840 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
| SFL_ATTRIBUTE
| SFL_EXTENTS
| SFL_BITMAP
, HFS_EXCLUSIVE_LOCK
);
1846 if (rfork
.ff_blocks
> 0) {
1847 rfork
.ff_cp
= &cnode
;
1848 cnode
.c_datafork
= NULL
;
1849 cnode
.c_rsrcfork
= &rfork
;
1850 if (TruncateFileC(vcb
, (FCB
*)&rfork
, 0, 1, 1, cnode
.c_attr
.ca_fileid
, false) != 0) {
1851 printf("hfs: error truncating rsrc fork!\n");
1856 /* Remove the file or folder record from the Catalog */
1857 if (cat_delete(hfsmp
, &cnode
.c_desc
, &cnode
.c_attr
) != 0) {
1858 printf("hfs_remove_orphans: error deleting cat rec for id %d!\n", cnode
.c_desc
.cd_cnid
);
1859 hfs_systemfile_unlock(hfsmp
, lockflags
);
1861 hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
1865 mode
= cnode
.c_attr
.ca_mode
& S_IFMT
;
1867 if (mode
== S_IFDIR
) {
1874 /* Update parent and volume counts */
1875 hfsmp
->hfs_private_attr
[FILE_HARDLINKS
].ca_entries
--;
1876 if (mode
== S_IFDIR
) {
1877 DEC_FOLDERCOUNT(hfsmp
, hfsmp
->hfs_private_attr
[FILE_HARDLINKS
]);
1880 (void)cat_update(hfsmp
, &hfsmp
->hfs_private_desc
[FILE_HARDLINKS
],
1881 &hfsmp
->hfs_private_attr
[FILE_HARDLINKS
], NULL
, NULL
);
1883 /* Drop locks and end the transaction */
1884 hfs_systemfile_unlock(hfsmp
, lockflags
);
1885 cat_postflight(hfsmp
, &cookie
, p
);
1886 catlock
= catreserve
= 0;
1889 Now that Catalog is unlocked, update the volume info, making
1890 sure to differentiate between files and directories
1892 if (mode
== S_IFDIR
) {
1893 hfs_volupdate(hfsmp
, VOL_RMDIR
, 0);
1896 hfs_volupdate(hfsmp
, VOL_RMFILE
, 0);
1900 hfs_end_transaction(hfsmp
);
1906 if (orphaned_files
> 0 || orphaned_dirs
> 0)
1907 printf("hfs: Removed %d orphaned / unlinked files and %d directories \n", orphaned_files
, orphaned_dirs
);
1910 hfs_systemfile_unlock(hfsmp
, lockflags
);
1913 cat_postflight(hfsmp
, &cookie
, p
);
1916 hfs_end_transaction(hfsmp
);
1919 FREE(iterator
, M_TEMP
);
1920 hfsmp
->hfs_flags
|= HFS_CLEANED_ORPHANS
;
1925 * This will return the correct logical block size for a given vnode.
1926 * For most files, it is the allocation block size, for meta data like
1927 * BTrees, this is kept as part of the BTree private nodeSize
1930 GetLogicalBlockSize(struct vnode
*vp
)
1932 u_int32_t logBlockSize
;
1934 DBG_ASSERT(vp
!= NULL
);
1936 /* start with default */
1937 logBlockSize
= VTOHFS(vp
)->hfs_logBlockSize
;
1939 if (vnode_issystem(vp
)) {
1940 if (VTOF(vp
)->fcbBTCBPtr
!= NULL
) {
1941 BTreeInfoRec bTreeInfo
;
1944 * We do not lock the BTrees, because if we are getting block..then the tree
1945 * should be locked in the first place.
1946 * We just want the nodeSize wich will NEVER change..so even if the world
1947 * is changing..the nodeSize should remain the same. Which argues why lock
1948 * it in the first place??
1951 (void) BTGetInformation (VTOF(vp
), kBTreeInfoVersion
, &bTreeInfo
);
1953 logBlockSize
= bTreeInfo
.nodeSize
;
1955 } else if (VTOC(vp
)->c_fileid
== kHFSAllocationFileID
) {
1956 logBlockSize
= VTOVCB(vp
)->vcbVBMIOSize
;
1960 DBG_ASSERT(logBlockSize
> 0);
1962 return logBlockSize
;
1966 static bool hfs_get_backing_free_blks(hfsmount_t
*hfsmp
, uint64_t *pfree_blks
)
1968 struct vfsstatfs
*vfsp
; /* 272 bytes */
1972 hfs_lock_mount(hfsmp
);
1974 vnode_t backing_vp
= hfsmp
->hfs_backingfs_rootvp
;
1976 hfs_unlock_mount(hfsmp
);
1980 // usecount is not enough; we need iocount
1981 if (vnode_get(backing_vp
)) {
1982 hfs_unlock_mount(hfsmp
);
1987 uint32_t loanedblks
= hfsmp
->loanedBlocks
;
1988 uint32_t bandblks
= hfsmp
->hfs_sparsebandblks
;
1989 uint64_t maxblks
= hfsmp
->hfs_backingfs_maxblocks
;
1991 hfs_unlock_mount(hfsmp
);
1993 mount_t backingfs_mp
= vnode_mount(backing_vp
);
1996 if ((now
.tv_sec
- hfsmp
->hfs_last_backingstatfs
) >= 1) {
1997 vfs_update_vfsstat(backingfs_mp
, vfs_context_kernel(), VFS_KERNEL_EVENT
);
1998 hfsmp
->hfs_last_backingstatfs
= now
.tv_sec
;
2001 if (!(vfsp
= vfs_statfs(backingfs_mp
))) {
2002 vnode_put(backing_vp
);
2006 vfreeblks
= vfsp
->f_bavail
;
2007 /* Normalize block count if needed. */
2008 if (vfsp
->f_bsize
!= hfsmp
->blockSize
)
2009 vfreeblks
= vfreeblks
* vfsp
->f_bsize
/ hfsmp
->blockSize
;
2010 if (vfreeblks
> bandblks
)
2011 vfreeblks
-= bandblks
;
2016 * Take into account any delayed allocations. It is not
2017 * certain what the original reason for the "2 *" is. Most
2018 * likely it is to allow for additional requirements in the
2019 * host file system and metadata required by disk images. The
2020 * number of loaned blocks is likely to be small and we will
2021 * stop using them as we get close to the limit.
2023 loanedblks
= 2 * loanedblks
;
2024 if (vfreeblks
> loanedblks
)
2025 vfreeblks
-= loanedblks
;
2030 vfreeblks
= MIN(vfreeblks
, maxblks
);
2032 vnode_put(backing_vp
);
2034 *pfree_blks
= vfreeblks
;
2041 hfs_freeblks(struct hfsmount
* hfsmp
, int wantreserve
)
2048 * We don't bother taking the mount lock
2049 * to look at these values since the values
2050 * themselves are each updated atomically
2051 * on aligned addresses.
2053 freeblks
= hfsmp
->freeBlocks
;
2054 rsrvblks
= hfsmp
->reserveBlocks
;
2055 loanblks
= hfsmp
->loanedBlocks
;
2057 if (freeblks
> rsrvblks
)
2058 freeblks
-= rsrvblks
;
2062 if (freeblks
> loanblks
)
2063 freeblks
-= loanblks
;
2069 * When the underlying device is sparse, check the
2070 * available space on the backing store volume.
2073 if (hfs_get_backing_free_blks(hfsmp
, &vfreeblks
))
2074 freeblks
= MIN(freeblks
, vfreeblks
);
2075 #endif /* HFS_SPARSE_DEV */
2077 if (hfsmp
->hfs_flags
& HFS_CS
) {
2078 uint64_t cs_free_bytes
;
2079 uint64_t cs_free_blks
;
2080 if (VNOP_IOCTL(hfsmp
->hfs_devvp
, _DKIOCCSGETFREEBYTES
,
2081 (caddr_t
)&cs_free_bytes
, 0, vfs_context_kernel()) == 0) {
2082 cs_free_blks
= cs_free_bytes
/ hfsmp
->blockSize
;
2083 if (cs_free_blks
> loanblks
)
2084 cs_free_blks
-= loanblks
;
2087 freeblks
= MIN(cs_free_blks
, freeblks
);
2095 * Map HFS Common errors (negative) to BSD error codes (positive).
2096 * Positive errors (ie BSD errors) are passed through unchanged.
2098 short MacToVFSError(OSErr err
)
2103 /* BSD/VFS internal errnos */
2105 case ERESERVEDNAME
: /* -8 */
2110 case dskFulErr
: /* -34 */
2111 case btNoSpaceAvail
: /* -32733 */
2113 case fxOvFlErr
: /* -32750 */
2116 case btBadNode
: /* -32731 */
2119 case memFullErr
: /* -108 */
2120 return ENOMEM
; /* +12 */
2122 case cmExists
: /* -32718 */
2123 case btExists
: /* -32734 */
2124 return EEXIST
; /* +17 */
2126 case cmNotFound
: /* -32719 */
2127 case btNotFound
: /* -32735 */
2128 return ENOENT
; /* 28 */
2130 case cmNotEmpty
: /* -32717 */
2131 return ENOTEMPTY
; /* 66 */
2133 case cmFThdDirErr
: /* -32714 */
2134 return EISDIR
; /* 21 */
2136 case fxRangeErr
: /* -32751 */
2139 case bdNamErr
: /* -37 */
2140 return ENAMETOOLONG
; /* 63 */
2142 case paramErr
: /* -50 */
2143 case fileBoundsErr
: /* -1309 */
2144 return EINVAL
; /* +22 */
2146 case fsBTBadNodeSize
:
2150 return EIO
; /* +5 */
2156 * Find the current thread's directory hint for a given index.
2158 * Requires an exclusive lock on directory cnode.
2160 * Use detach if the cnode lock must be dropped while the hint is still active.
2164 hfs_getdirhint(struct cnode
*dcp
, int index
, int detach
)
2167 directoryhint_t
*hint
;
2168 boolean_t need_remove
, need_init
;
2169 const u_int8_t
* name
;
2174 * Look for an existing hint first. If not found, create a new one (when
2175 * the list is not full) or recycle the oldest hint. Since new hints are
2176 * always added to the head of the list, the last hint is always the
2179 TAILQ_FOREACH(hint
, &dcp
->c_hintlist
, dh_link
) {
2180 if (hint
->dh_index
== index
)
2183 if (hint
!= NULL
) { /* found an existing hint */
2186 } else { /* cannot find an existing hint */
2188 if (dcp
->c_dirhintcnt
< HFS_MAXDIRHINTS
) { /* we don't need recycling */
2189 /* Create a default directory hint */
2190 MALLOC_ZONE(hint
, directoryhint_t
*, sizeof(directoryhint_t
), M_HFSDIRHINT
, M_WAITOK
);
2191 ++dcp
->c_dirhintcnt
;
2192 need_remove
= false;
2193 } else { /* recycle the last (i.e., the oldest) hint */
2194 hint
= TAILQ_LAST(&dcp
->c_hintlist
, hfs_hinthead
);
2195 if ((hint
->dh_desc
.cd_flags
& CD_HASBUF
) &&
2196 (name
= hint
->dh_desc
.cd_nameptr
)) {
2197 hint
->dh_desc
.cd_nameptr
= NULL
;
2198 hint
->dh_desc
.cd_namelen
= 0;
2199 hint
->dh_desc
.cd_flags
&= ~CD_HASBUF
;
2200 vfs_removename((const char *)name
);
2207 TAILQ_REMOVE(&dcp
->c_hintlist
, hint
, dh_link
);
2210 --dcp
->c_dirhintcnt
;
2212 TAILQ_INSERT_HEAD(&dcp
->c_hintlist
, hint
, dh_link
);
2215 hint
->dh_index
= index
;
2216 hint
->dh_desc
.cd_flags
= 0;
2217 hint
->dh_desc
.cd_encoding
= 0;
2218 hint
->dh_desc
.cd_namelen
= 0;
2219 hint
->dh_desc
.cd_nameptr
= NULL
;
2220 hint
->dh_desc
.cd_parentcnid
= dcp
->c_fileid
;
2221 hint
->dh_desc
.cd_hint
= dcp
->c_childhint
;
2222 hint
->dh_desc
.cd_cnid
= 0;
2224 hint
->dh_time
= tv
.tv_sec
;
2229 * Release a single directory hint.
2231 * Requires an exclusive lock on directory cnode.
2235 hfs_reldirhint(struct cnode
*dcp
, directoryhint_t
* relhint
)
2237 const u_int8_t
* name
;
2238 directoryhint_t
*hint
;
2240 /* Check if item is on list (could be detached) */
2241 TAILQ_FOREACH(hint
, &dcp
->c_hintlist
, dh_link
) {
2242 if (hint
== relhint
) {
2243 TAILQ_REMOVE(&dcp
->c_hintlist
, relhint
, dh_link
);
2244 --dcp
->c_dirhintcnt
;
2248 name
= relhint
->dh_desc
.cd_nameptr
;
2249 if ((relhint
->dh_desc
.cd_flags
& CD_HASBUF
) && (name
!= NULL
)) {
2250 relhint
->dh_desc
.cd_nameptr
= NULL
;
2251 relhint
->dh_desc
.cd_namelen
= 0;
2252 relhint
->dh_desc
.cd_flags
&= ~CD_HASBUF
;
2253 vfs_removename((const char *)name
);
2255 FREE_ZONE(relhint
, sizeof(directoryhint_t
), M_HFSDIRHINT
);
2259 * Release directory hints for given directory
2261 * Requires an exclusive lock on directory cnode.
2265 hfs_reldirhints(struct cnode
*dcp
, int stale_hints_only
)
2268 directoryhint_t
*hint
, *prev
;
2269 const u_int8_t
* name
;
2271 if (stale_hints_only
)
2274 /* searching from the oldest to the newest, so we can stop early when releasing stale hints only */
2275 for (hint
= TAILQ_LAST(&dcp
->c_hintlist
, hfs_hinthead
); hint
!= NULL
; hint
= prev
) {
2276 if (stale_hints_only
&& (tv
.tv_sec
- hint
->dh_time
) < HFS_DIRHINT_TTL
)
2277 break; /* stop here if this entry is too new */
2278 name
= hint
->dh_desc
.cd_nameptr
;
2279 if ((hint
->dh_desc
.cd_flags
& CD_HASBUF
) && (name
!= NULL
)) {
2280 hint
->dh_desc
.cd_nameptr
= NULL
;
2281 hint
->dh_desc
.cd_namelen
= 0;
2282 hint
->dh_desc
.cd_flags
&= ~CD_HASBUF
;
2283 vfs_removename((const char *)name
);
2285 prev
= TAILQ_PREV(hint
, hfs_hinthead
, dh_link
); /* must save this pointer before calling FREE_ZONE on this node */
2286 TAILQ_REMOVE(&dcp
->c_hintlist
, hint
, dh_link
);
2287 FREE_ZONE(hint
, sizeof(directoryhint_t
), M_HFSDIRHINT
);
2288 --dcp
->c_dirhintcnt
;
2293 * Insert a detached directory hint back into the list of dirhints.
2295 * Requires an exclusive lock on directory cnode.
2299 hfs_insertdirhint(struct cnode
*dcp
, directoryhint_t
* hint
)
2301 directoryhint_t
*test
;
2303 TAILQ_FOREACH(test
, &dcp
->c_hintlist
, dh_link
) {
2305 panic("hfs_insertdirhint: hint %p already on list!", hint
);
2308 TAILQ_INSERT_HEAD(&dcp
->c_hintlist
, hint
, dh_link
);
2309 ++dcp
->c_dirhintcnt
;
2313 * Perform a case-insensitive compare of two UTF-8 filenames.
2315 * Returns 0 if the strings match.
2319 hfs_namecmp(const u_int8_t
*str1
, size_t len1
, const u_int8_t
*str2
, size_t len2
)
2321 u_int16_t
*ustr1
, *ustr2
;
2322 size_t ulen1
, ulen2
;
2329 maxbytes
= kHFSPlusMaxFileNameChars
<< 1;
2330 MALLOC(ustr1
, u_int16_t
*, maxbytes
<< 1, M_TEMP
, M_WAITOK
);
2331 ustr2
= ustr1
+ (maxbytes
>> 1);
2333 if (utf8_decodestr(str1
, len1
, ustr1
, &ulen1
, maxbytes
, ':', 0) != 0)
2335 if (utf8_decodestr(str2
, len2
, ustr2
, &ulen2
, maxbytes
, ':', 0) != 0)
2338 cmp
= FastUnicodeCompare(ustr1
, ulen1
>>1, ustr2
, ulen2
>>1);
2340 FREE(ustr1
, M_TEMP
);
2345 typedef struct jopen_cb_info
{
2355 journal_open_cb(const char *bsd_dev_name
, const char *uuid_str
, void *arg
)
2357 struct nameidata nd
;
2358 jopen_cb_info
*ji
= (jopen_cb_info
*)arg
;
2362 strlcpy(&bsd_name
[0], "/dev/", sizeof(bsd_name
));
2363 strlcpy(&bsd_name
[5], bsd_dev_name
, sizeof(bsd_name
)-5);
2365 if (ji
->desired_uuid
&& ji
->desired_uuid
[0] && strcmp(uuid_str
, ji
->desired_uuid
) != 0) {
2366 return 1; // keep iterating
2369 // if we're here, either the desired uuid matched or there was no
2370 // desired uuid so let's try to open the device for writing and
2371 // see if it works. if it does, we'll use it.
2373 NDINIT(&nd
, LOOKUP
, OP_LOOKUP
, LOCKLEAF
, UIO_SYSSPACE32
, CAST_USER_ADDR_T(bsd_name
), vfs_context_kernel());
2374 if ((error
= namei(&nd
))) {
2375 printf("hfs: journal open cb: error %d looking up device %s (dev uuid %s)\n", error
, bsd_name
, uuid_str
);
2376 return 1; // keep iterating
2382 if (ji
->jvp
== NULL
) {
2383 printf("hfs: journal open cb: did not find %s (error %d)\n", bsd_name
, error
);
2385 error
= VNOP_OPEN(ji
->jvp
, FREAD
|FWRITE
, vfs_context_kernel());
2387 // if the journal is dirty and we didn't specify a desired
2388 // journal device uuid, then do not use the journal. but
2389 // if the journal is just invalid (e.g. it hasn't been
2390 // initialized) then just set the need_init flag.
2391 if (ji
->need_clean
&& ji
->desired_uuid
&& ji
->desired_uuid
[0] == '\0') {
2392 error
= journal_is_clean(ji
->jvp
, 0, ji
->jsize
, (void *)1, ji
->blksize
);
2393 if (error
== EBUSY
) {
2394 VNOP_CLOSE(ji
->jvp
, FREAD
|FWRITE
, vfs_context_kernel());
2397 return 1; // keep iterating
2398 } else if (error
== EINVAL
) {
2403 if (ji
->desired_uuid
&& ji
->desired_uuid
[0] == '\0') {
2404 strlcpy(ji
->desired_uuid
, uuid_str
, 128);
2406 vnode_setmountedon(ji
->jvp
);
2407 return 0; // stop iterating
2414 return 1; // keep iterating
2417 extern void IOBSDIterateMediaWithContent(const char *uuid_cstring
, int (*func
)(const char *bsd_dev_name
, const char *uuid_str
, void *arg
), void *arg
);
2418 kern_return_t
IOBSDGetPlatformSerialNumber(char *serial_number_str
, u_int32_t len
);
2422 open_journal_dev(const char *vol_device
,
2425 char *machine_serial_num
,
2430 int retry_counter
=0;
2434 ji
.desired_uuid
= uuid_str
;
2436 ji
.blksize
= blksize
;
2437 ji
.need_clean
= need_clean
;
2440 // if (uuid_str[0] == '\0') {
2441 // printf("hfs: open journal dev: %s: locating any available non-dirty external journal partition\n", vol_device);
2443 // printf("hfs: open journal dev: %s: trying to find the external journal partition w/uuid %s\n", vol_device, uuid_str);
2445 while (ji
.jvp
== NULL
&& retry_counter
++ < 4) {
2446 if (retry_counter
> 1) {
2448 printf("hfs: open_journal_dev: uuid %s not found. waiting 10sec.\n", uuid_str
);
2450 printf("hfs: open_journal_dev: no available external journal partition found. waiting 10sec.\n");
2452 delay_for_interval(10* 1000000, NSEC_PER_USEC
); // wait for ten seconds and then try again
2455 IOBSDIterateMediaWithContent(EXTJNL_CONTENT_TYPE_UUID
, journal_open_cb
, &ji
);
2458 if (ji
.jvp
== NULL
) {
2459 printf("hfs: volume: %s: did not find jnl device uuid: %s from machine serial number: %s\n",
2460 vol_device
, uuid_str
, machine_serial_num
);
2463 *need_init
= ji
.need_init
;
2470 hfs_early_journal_init(struct hfsmount
*hfsmp
, HFSPlusVolumeHeader
*vhp
,
2471 void *_args
, off_t embeddedOffset
, daddr64_t mdb_offset
,
2472 HFSMasterDirectoryBlock
*mdbp
, kauth_cred_t cred
)
2474 JournalInfoBlock
*jibp
;
2475 struct buf
*jinfo_bp
, *bp
;
2476 int sectors_per_fsblock
, arg_flags
=0, arg_tbufsz
=0;
2477 int retval
, write_jibp
= 0;
2478 uint32_t blksize
= hfsmp
->hfs_logical_block_size
;
2479 struct vnode
*devvp
;
2480 struct hfs_mount_args
*args
= _args
;
2481 u_int32_t jib_flags
;
2482 u_int64_t jib_offset
;
2484 const char *dev_name
;
2486 devvp
= hfsmp
->hfs_devvp
;
2487 dev_name
= vnode_getname_printable(devvp
);
2489 if (args
!= NULL
&& (args
->flags
& HFSFSMNT_EXTENDED_ARGS
)) {
2490 arg_flags
= args
->journal_flags
;
2491 arg_tbufsz
= args
->journal_tbuffer_size
;
2494 sectors_per_fsblock
= SWAP_BE32(vhp
->blockSize
) / blksize
;
2497 retval
= (int)buf_meta_bread(devvp
,
2498 (daddr64_t
)((embeddedOffset
/blksize
) +
2499 ((u_int64_t
)SWAP_BE32(vhp
->journalInfoBlock
)*sectors_per_fsblock
)),
2500 hfsmp
->hfs_physical_block_size
, cred
, &jinfo_bp
);
2503 buf_brelse(jinfo_bp
);
2505 goto cleanup_dev_name
;
2508 jibp
= (JournalInfoBlock
*)buf_dataptr(jinfo_bp
);
2509 jib_flags
= SWAP_BE32(jibp
->flags
);
2510 jib_size
= SWAP_BE64(jibp
->size
);
2512 if (jib_flags
& kJIJournalInFSMask
) {
2513 hfsmp
->jvp
= hfsmp
->hfs_devvp
;
2514 jib_offset
= SWAP_BE64(jibp
->offset
);
2518 // if the volume was unmounted cleanly then we'll pick any
2519 // available external journal partition
2521 if (SWAP_BE32(vhp
->attributes
) & kHFSVolumeUnmountedMask
) {
2522 *((char *)&jibp
->ext_jnl_uuid
[0]) = '\0';
2525 hfsmp
->jvp
= open_journal_dev(dev_name
,
2526 !(jib_flags
& kJIJournalNeedInitMask
),
2527 (char *)&jibp
->ext_jnl_uuid
[0],
2528 (char *)&jibp
->machine_serial_num
[0],
2530 hfsmp
->hfs_logical_block_size
,
2532 if (hfsmp
->jvp
== NULL
) {
2533 buf_brelse(jinfo_bp
);
2535 goto cleanup_dev_name
;
2537 if (IOBSDGetPlatformSerialNumber(&jibp
->machine_serial_num
[0], sizeof(jibp
->machine_serial_num
)) != KERN_SUCCESS
) {
2538 strlcpy(&jibp
->machine_serial_num
[0], "unknown-machine-uuid", sizeof(jibp
->machine_serial_num
));
2545 jib_flags
|= kJIJournalNeedInitMask
;
2549 // save this off for the hack-y check in hfs_remove()
2550 hfsmp
->jnl_start
= jib_offset
/ SWAP_BE32(vhp
->blockSize
);
2551 hfsmp
->jnl_size
= jib_size
;
2553 if ((hfsmp
->hfs_flags
& HFS_READ_ONLY
) && (vfs_flags(hfsmp
->hfs_mp
) & MNT_ROOTFS
) == 0) {
2554 // if the file system is read-only, check if the journal is empty.
2555 // if it is, then we can allow the mount. otherwise we have to
2557 retval
= journal_is_clean(hfsmp
->jvp
,
2558 jib_offset
+ embeddedOffset
,
2561 hfsmp
->hfs_logical_block_size
);
2565 buf_brelse(jinfo_bp
);
2568 const char *name
= vnode_getname_printable(devvp
);
2569 printf("hfs: early journal init: volume on %s is read-only and journal is dirty. Can not mount volume.\n",
2571 vnode_putname_printable(name
);
2574 goto cleanup_dev_name
;
2577 if (jib_flags
& kJIJournalNeedInitMask
) {
2578 printf("hfs: Initializing the journal (joffset 0x%llx sz 0x%llx)...\n",
2579 jib_offset
+ embeddedOffset
, jib_size
);
2580 hfsmp
->jnl
= journal_create(hfsmp
->jvp
,
2581 jib_offset
+ embeddedOffset
,
2587 hfs_sync_metadata
, hfsmp
->hfs_mp
,
2590 journal_trim_set_callback(hfsmp
->jnl
, hfs_trim_callback
, hfsmp
);
2592 // no need to start a transaction here... if this were to fail
2593 // we'd just re-init it on the next mount.
2594 jib_flags
&= ~kJIJournalNeedInitMask
;
2595 jibp
->flags
= SWAP_BE32(jib_flags
);
2596 buf_bwrite(jinfo_bp
);
2600 //printf("hfs: Opening the journal (joffset 0x%llx sz 0x%llx vhp_blksize %d)...\n",
2601 // jib_offset + embeddedOffset,
2602 // jib_size, SWAP_BE32(vhp->blockSize));
2604 hfsmp
->jnl
= journal_open(hfsmp
->jvp
,
2605 jib_offset
+ embeddedOffset
,
2611 hfs_sync_metadata
, hfsmp
->hfs_mp
,
2614 journal_trim_set_callback(hfsmp
->jnl
, hfs_trim_callback
, hfsmp
);
2617 buf_bwrite(jinfo_bp
);
2619 buf_brelse(jinfo_bp
);
2624 if (hfsmp
->jnl
&& mdbp
) {
2625 // reload the mdb because it could have changed
2626 // if the journal had to be replayed.
2627 if (mdb_offset
== 0) {
2628 mdb_offset
= (daddr64_t
)((embeddedOffset
/ blksize
) + HFS_PRI_SECTOR(blksize
));
2631 retval
= (int)buf_meta_bread(devvp
,
2632 HFS_PHYSBLK_ROUNDDOWN(mdb_offset
, hfsmp
->hfs_log_per_phys
),
2633 hfsmp
->hfs_physical_block_size
, cred
, &bp
);
2638 printf("hfs: failed to reload the mdb after opening the journal (retval %d)!\n",
2640 goto cleanup_dev_name
;
2642 bcopy((char *)buf_dataptr(bp
) + HFS_PRI_OFFSET(hfsmp
->hfs_physical_block_size
), mdbp
, 512);
2648 // if we expected the journal to be there and we couldn't
2649 // create it or open it then we have to bail out.
2650 if (hfsmp
->jnl
== NULL
) {
2651 printf("hfs: early jnl init: failed to open/create the journal (retval %d).\n", retval
);
2653 goto cleanup_dev_name
;
2659 vnode_putname_printable(dev_name
);
2665 // This function will go and re-locate the .journal_info_block and
2666 // the .journal files in case they moved (which can happen if you
2667 // run Norton SpeedDisk). If we fail to find either file we just
2668 // disable journaling for this volume and return. We turn off the
2669 // journaling bit in the vcb and assume it will get written to disk
2670 // later (if it doesn't on the next mount we'd do the same thing
2671 // again which is harmless). If we disable journaling we don't
2672 // return an error so that the volume is still mountable.
2674 // If the info we find for the .journal_info_block and .journal files
2675 // isn't what we had stored, we re-set our cached info and proceed
2676 // with opening the journal normally.
2679 hfs_late_journal_init(struct hfsmount
*hfsmp
, HFSPlusVolumeHeader
*vhp
, void *_args
)
2681 JournalInfoBlock
*jibp
;
2682 struct buf
*jinfo_bp
;
2683 int sectors_per_fsblock
, arg_flags
=0, arg_tbufsz
=0;
2684 int retval
, write_jibp
= 0, recreate_journal
= 0;
2685 struct vnode
*devvp
;
2686 struct cat_attr jib_attr
, jattr
;
2687 struct cat_fork jib_fork
, jfork
;
2690 struct hfs_mount_args
*args
= _args
;
2691 u_int32_t jib_flags
;
2692 u_int64_t jib_offset
;
2695 devvp
= hfsmp
->hfs_devvp
;
2696 vcb
= HFSTOVCB(hfsmp
);
2698 if (args
!= NULL
&& (args
->flags
& HFSFSMNT_EXTENDED_ARGS
)) {
2699 if (args
->journal_disable
) {
2703 arg_flags
= args
->journal_flags
;
2704 arg_tbufsz
= args
->journal_tbuffer_size
;
2707 fid
= GetFileInfo(vcb
, kRootDirID
, ".journal_info_block", &jib_attr
, &jib_fork
);
2708 if (fid
== 0 || jib_fork
.cf_extents
[0].startBlock
== 0 || jib_fork
.cf_size
== 0) {
2709 printf("hfs: can't find the .journal_info_block! disabling journaling (start: %d).\n",
2710 jib_fork
.cf_extents
[0].startBlock
);
2711 vcb
->vcbAtrb
&= ~kHFSVolumeJournaledMask
;
2714 hfsmp
->hfs_jnlinfoblkid
= fid
;
2716 // make sure the journal_info_block begins where we think it should.
2717 if (SWAP_BE32(vhp
->journalInfoBlock
) != jib_fork
.cf_extents
[0].startBlock
) {
2718 printf("hfs: The journal_info_block moved (was: %d; is: %d). Fixing up\n",
2719 SWAP_BE32(vhp
->journalInfoBlock
), jib_fork
.cf_extents
[0].startBlock
);
2721 vcb
->vcbJinfoBlock
= jib_fork
.cf_extents
[0].startBlock
;
2722 vhp
->journalInfoBlock
= SWAP_BE32(jib_fork
.cf_extents
[0].startBlock
);
2723 recreate_journal
= 1;
2727 sectors_per_fsblock
= SWAP_BE32(vhp
->blockSize
) / hfsmp
->hfs_logical_block_size
;
2729 retval
= (int)buf_meta_bread(devvp
,
2730 (vcb
->hfsPlusIOPosOffset
/ hfsmp
->hfs_logical_block_size
+
2731 ((u_int64_t
)SWAP_BE32(vhp
->journalInfoBlock
)*sectors_per_fsblock
)),
2732 hfsmp
->hfs_physical_block_size
, NOCRED
, &jinfo_bp
);
2735 buf_brelse(jinfo_bp
);
2737 printf("hfs: can't read journal info block. disabling journaling.\n");
2738 vcb
->vcbAtrb
&= ~kHFSVolumeJournaledMask
;
2742 jibp
= (JournalInfoBlock
*)buf_dataptr(jinfo_bp
);
2743 jib_flags
= SWAP_BE32(jibp
->flags
);
2744 jib_offset
= SWAP_BE64(jibp
->offset
);
2745 jib_size
= SWAP_BE64(jibp
->size
);
2747 fid
= GetFileInfo(vcb
, kRootDirID
, ".journal", &jattr
, &jfork
);
2748 if (fid
== 0 || jfork
.cf_extents
[0].startBlock
== 0 || jfork
.cf_size
== 0) {
2749 printf("hfs: can't find the journal file! disabling journaling (start: %d)\n",
2750 jfork
.cf_extents
[0].startBlock
);
2751 buf_brelse(jinfo_bp
);
2752 vcb
->vcbAtrb
&= ~kHFSVolumeJournaledMask
;
2755 hfsmp
->hfs_jnlfileid
= fid
;
2757 // make sure the journal file begins where we think it should.
2758 if ((jib_flags
& kJIJournalInFSMask
) && (jib_offset
/ (u_int64_t
)vcb
->blockSize
) != jfork
.cf_extents
[0].startBlock
) {
2759 printf("hfs: The journal file moved (was: %lld; is: %d). Fixing up\n",
2760 (jib_offset
/ (u_int64_t
)vcb
->blockSize
), jfork
.cf_extents
[0].startBlock
);
2762 jib_offset
= (u_int64_t
)jfork
.cf_extents
[0].startBlock
* (u_int64_t
)vcb
->blockSize
;
2764 recreate_journal
= 1;
2767 // check the size of the journal file.
2768 if (jib_size
!= (u_int64_t
)jfork
.cf_extents
[0].blockCount
*vcb
->blockSize
) {
2769 printf("hfs: The journal file changed size! (was %lld; is %lld). Fixing up.\n",
2770 jib_size
, (u_int64_t
)jfork
.cf_extents
[0].blockCount
*vcb
->blockSize
);
2772 jib_size
= (u_int64_t
)jfork
.cf_extents
[0].blockCount
* vcb
->blockSize
;
2774 recreate_journal
= 1;
2777 if (jib_flags
& kJIJournalInFSMask
) {
2778 hfsmp
->jvp
= hfsmp
->hfs_devvp
;
2779 jib_offset
+= (off_t
)vcb
->hfsPlusIOPosOffset
;
2781 const char *dev_name
;
2784 dev_name
= vnode_getname_printable(devvp
);
2786 // since the journal is empty, just use any available external journal
2787 *((char *)&jibp
->ext_jnl_uuid
[0]) = '\0';
2789 // this fills in the uuid of the device we actually get
2790 hfsmp
->jvp
= open_journal_dev(dev_name
,
2791 !(jib_flags
& kJIJournalNeedInitMask
),
2792 (char *)&jibp
->ext_jnl_uuid
[0],
2793 (char *)&jibp
->machine_serial_num
[0],
2795 hfsmp
->hfs_logical_block_size
,
2797 if (hfsmp
->jvp
== NULL
) {
2798 buf_brelse(jinfo_bp
);
2799 vnode_putname_printable(dev_name
);
2802 if (IOBSDGetPlatformSerialNumber(&jibp
->machine_serial_num
[0], sizeof(jibp
->machine_serial_num
)) != KERN_SUCCESS
) {
2803 strlcpy(&jibp
->machine_serial_num
[0], "unknown-machine-serial-num", sizeof(jibp
->machine_serial_num
));
2807 recreate_journal
= 1;
2810 jib_flags
|= kJIJournalNeedInitMask
;
2812 vnode_putname_printable(dev_name
);
2815 // save this off for the hack-y check in hfs_remove()
2816 hfsmp
->jnl_start
= jib_offset
/ SWAP_BE32(vhp
->blockSize
);
2817 hfsmp
->jnl_size
= jib_size
;
2819 if ((hfsmp
->hfs_flags
& HFS_READ_ONLY
) && (vfs_flags(hfsmp
->hfs_mp
) & MNT_ROOTFS
) == 0) {
2820 // if the file system is read-only, check if the journal is empty.
2821 // if it is, then we can allow the mount. otherwise we have to
2823 retval
= journal_is_clean(hfsmp
->jvp
,
2827 hfsmp
->hfs_logical_block_size
);
2831 buf_brelse(jinfo_bp
);
2834 const char *name
= vnode_getname_printable(devvp
);
2835 printf("hfs: late journal init: volume on %s is read-only and journal is dirty. Can not mount volume.\n",
2837 vnode_putname_printable(name
);
2843 if ((jib_flags
& kJIJournalNeedInitMask
) || recreate_journal
) {
2844 printf("hfs: Initializing the journal (joffset 0x%llx sz 0x%llx)...\n",
2845 jib_offset
, jib_size
);
2846 hfsmp
->jnl
= journal_create(hfsmp
->jvp
,
2850 hfsmp
->hfs_logical_block_size
,
2853 hfs_sync_metadata
, hfsmp
->hfs_mp
,
2856 journal_trim_set_callback(hfsmp
->jnl
, hfs_trim_callback
, hfsmp
);
2858 // no need to start a transaction here... if this were to fail
2859 // we'd just re-init it on the next mount.
2860 jib_flags
&= ~kJIJournalNeedInitMask
;
2865 // if we weren't the last person to mount this volume
2866 // then we need to throw away the journal because it
2867 // is likely that someone else mucked with the disk.
2868 // if the journal is empty this is no big deal. if the
2869 // disk is dirty this prevents us from replaying the
2870 // journal over top of changes that someone else made.
2872 arg_flags
|= JOURNAL_RESET
;
2874 //printf("hfs: Opening the journal (joffset 0x%llx sz 0x%llx vhp_blksize %d)...\n",
2876 // jib_size, SWAP_BE32(vhp->blockSize));
2878 hfsmp
->jnl
= journal_open(hfsmp
->jvp
,
2882 hfsmp
->hfs_logical_block_size
,
2885 hfs_sync_metadata
, hfsmp
->hfs_mp
,
2888 journal_trim_set_callback(hfsmp
->jnl
, hfs_trim_callback
, hfsmp
);
2893 jibp
->flags
= SWAP_BE32(jib_flags
);
2894 jibp
->offset
= SWAP_BE64(jib_offset
);
2895 jibp
->size
= SWAP_BE64(jib_size
);
2897 buf_bwrite(jinfo_bp
);
2899 buf_brelse(jinfo_bp
);
2904 // if we expected the journal to be there and we couldn't
2905 // create it or open it then we have to bail out.
2906 if (hfsmp
->jnl
== NULL
) {
2907 printf("hfs: late jnl init: failed to open/create the journal (retval %d).\n", retval
);
2915 * Calculate the allocation zone for metadata.
2917 * This zone includes the following:
2918 * Allocation Bitmap file
2919 * Overflow Extents file
2922 * Clustered Hot files
2925 * METADATA ALLOCATION ZONE
2926 * ____________________________________________________________________________
2928 * | BM | JF | OEF | CATALOG |---> | HOT FILES |
2929 * |____|____|_____|_______________|______________________________|___________|
2931 * <------------------------------- N * 128 MB ------------------------------->
2934 #define GIGABYTE (u_int64_t)(1024*1024*1024)
2936 #define OVERFLOW_DEFAULT_SIZE (4*1024*1024)
2937 #define OVERFLOW_MAXIMUM_SIZE (128*1024*1024)
2938 #define JOURNAL_DEFAULT_SIZE (8*1024*1024)
2939 #define JOURNAL_MAXIMUM_SIZE (512*1024*1024)
2940 #define HOTBAND_MINIMUM_SIZE (10*1024*1024)
2941 #define HOTBAND_MAXIMUM_SIZE (512*1024*1024)
2943 /* Initialize the metadata zone.
2945 * If the size of the volume is less than the minimum size for
2946 * metadata zone, metadata zone is disabled.
2948 * If disable is true, disable metadata zone unconditionally.
2951 hfs_metadatazone_init(struct hfsmount
*hfsmp
, int disable
)
2959 int items
, really_do_it
=1;
2961 vcb
= HFSTOVCB(hfsmp
);
2962 fs_size
= (u_int64_t
)vcb
->blockSize
* (u_int64_t
)vcb
->allocLimit
;
2965 * For volumes less than 10 GB, don't bother.
2967 if (fs_size
< ((u_int64_t
)10 * GIGABYTE
)) {
2972 * Skip non-journaled volumes as well.
2974 if (hfsmp
->jnl
== NULL
) {
2978 /* If caller wants to disable metadata zone, do it */
2979 if (disable
== true) {
2984 * Start with space for the boot blocks and Volume Header.
2985 * 1536 = byte offset from start of volume to end of volume header:
2986 * 1024 bytes is the offset from the start of the volume to the
2987 * start of the volume header (defined by the volume format)
2988 * + 512 bytes (the size of the volume header).
2990 zonesize
= roundup(1536, hfsmp
->blockSize
);
2993 * Add the on-disk size of allocation bitmap.
2995 zonesize
+= hfsmp
->hfs_allocation_cp
->c_datafork
->ff_blocks
* hfsmp
->blockSize
;
2998 * Add space for the Journal Info Block and Journal (if they're in
2999 * this file system).
3001 if (hfsmp
->jnl
&& hfsmp
->jvp
== hfsmp
->hfs_devvp
) {
3002 zonesize
+= hfsmp
->blockSize
+ hfsmp
->jnl_size
;
3006 * Add the existing size of the Extents Overflow B-tree.
3007 * (It rarely grows, so don't bother reserving additional room for it.)
3009 zonesize
+= hfsmp
->hfs_extents_cp
->c_datafork
->ff_blocks
* hfsmp
->blockSize
;
3012 * If there is an Attributes B-tree, leave room for 11 clumps worth.
3013 * newfs_hfs allocates one clump, and leaves a gap of 10 clumps.
3014 * When installing a full OS install onto a 20GB volume, we use
3015 * 7 to 8 clumps worth of space (depending on packages), so that leaves
3016 * us with another 3 or 4 clumps worth before we need another extent.
3018 if (hfsmp
->hfs_attribute_cp
) {
3019 zonesize
+= 11 * hfsmp
->hfs_attribute_cp
->c_datafork
->ff_clumpsize
;
3023 * Leave room for 11 clumps of the Catalog B-tree.
3024 * Again, newfs_hfs allocates one clump plus a gap of 10 clumps.
3025 * When installing a full OS install onto a 20GB volume, we use
3026 * 7 to 8 clumps worth of space (depending on packages), so that leaves
3027 * us with another 3 or 4 clumps worth before we need another extent.
3029 zonesize
+= 11 * hfsmp
->hfs_catalog_cp
->c_datafork
->ff_clumpsize
;
3032 * Add space for hot file region.
3034 * ...for now, use 5 MB per 1 GB (0.5 %)
3036 filesize
= (fs_size
/ 1024) * 5;
3037 if (filesize
> HOTBAND_MAXIMUM_SIZE
)
3038 filesize
= HOTBAND_MAXIMUM_SIZE
;
3039 else if (filesize
< HOTBAND_MINIMUM_SIZE
)
3040 filesize
= HOTBAND_MINIMUM_SIZE
;
3042 * Calculate user quota file requirements.
3044 if (hfsmp
->hfs_flags
& HFS_QUOTAS
) {
3045 items
= QF_USERS_PER_GB
* (fs_size
/ GIGABYTE
);
3046 if (items
< QF_MIN_USERS
)
3047 items
= QF_MIN_USERS
;
3048 else if (items
> QF_MAX_USERS
)
3049 items
= QF_MAX_USERS
;
3050 if (!powerof2(items
)) {
3058 filesize
+= (items
+ 1) * sizeof(struct dqblk
);
3060 * Calculate group quota file requirements.
3063 items
= QF_GROUPS_PER_GB
* (fs_size
/ GIGABYTE
);
3064 if (items
< QF_MIN_GROUPS
)
3065 items
= QF_MIN_GROUPS
;
3066 else if (items
> QF_MAX_GROUPS
)
3067 items
= QF_MAX_GROUPS
;
3068 if (!powerof2(items
)) {
3076 filesize
+= (items
+ 1) * sizeof(struct dqblk
);
3078 zonesize
+= filesize
;
3081 * Round up entire zone to a bitmap block's worth.
3082 * The extra space goes to the catalog file and hot file area.
3085 zonesize
= roundup(zonesize
, (u_int64_t
)vcb
->vcbVBMIOSize
* 8 * vcb
->blockSize
);
3086 hfsmp
->hfs_min_alloc_start
= zonesize
/ vcb
->blockSize
;
3088 * If doing the round up for hfs_min_alloc_start would push us past
3089 * allocLimit, then just reset it back to 0. Though using a value
3090 * bigger than allocLimit would not cause damage in the block allocator
3091 * code, this value could get stored in the volume header and make it out
3092 * to disk, making the volume header technically corrupt.
3094 if (hfsmp
->hfs_min_alloc_start
>= hfsmp
->allocLimit
) {
3095 hfsmp
->hfs_min_alloc_start
= 0;
3098 if (really_do_it
== 0) {
3099 /* If metadata zone needs to be disabled because the
3100 * volume was truncated, clear the bit and zero out
3101 * the values that are no longer needed.
3103 if (hfsmp
->hfs_flags
& HFS_METADATA_ZONE
) {
3104 /* Disable metadata zone */
3105 hfsmp
->hfs_flags
&= ~HFS_METADATA_ZONE
;
3107 /* Zero out mount point values that are not required */
3108 hfsmp
->hfs_catalog_maxblks
= 0;
3109 hfsmp
->hfs_hotfile_maxblks
= 0;
3110 hfsmp
->hfs_hotfile_start
= 0;
3111 hfsmp
->hfs_hotfile_end
= 0;
3112 hfsmp
->hfs_hotfile_freeblks
= 0;
3113 hfsmp
->hfs_metazone_start
= 0;
3114 hfsmp
->hfs_metazone_end
= 0;
3120 temp
= zonesize
- temp
; /* temp has extra space */
3121 filesize
+= temp
/ 3;
3122 hfsmp
->hfs_catalog_maxblks
+= (temp
- (temp
/ 3)) / vcb
->blockSize
;
3124 hfsmp
->hfs_hotfile_maxblks
= filesize
/ vcb
->blockSize
;
3126 /* Convert to allocation blocks. */
3127 blk
= zonesize
/ vcb
->blockSize
;
3129 /* The default metadata zone location is at the start of volume. */
3130 hfsmp
->hfs_metazone_start
= 1;
3131 hfsmp
->hfs_metazone_end
= blk
- 1;
3133 /* The default hotfile area is at the end of the zone. */
3134 if (vfs_flags(HFSTOVFS(hfsmp
)) & MNT_ROOTFS
) {
3135 hfsmp
->hfs_hotfile_start
= blk
- (filesize
/ vcb
->blockSize
);
3136 hfsmp
->hfs_hotfile_end
= hfsmp
->hfs_metazone_end
;
3137 hfsmp
->hfs_hotfile_freeblks
= hfs_hotfile_freeblocks(hfsmp
);
3140 hfsmp
->hfs_hotfile_start
= 0;
3141 hfsmp
->hfs_hotfile_end
= 0;
3142 hfsmp
->hfs_hotfile_freeblks
= 0;
3145 printf("hfs: metadata zone is %d to %d\n", hfsmp
->hfs_metazone_start
, hfsmp
->hfs_metazone_end
);
3146 printf("hfs: hot file band is %d to %d\n", hfsmp
->hfs_hotfile_start
, hfsmp
->hfs_hotfile_end
);
3147 printf("hfs: hot file band free blocks = %d\n", hfsmp
->hfs_hotfile_freeblks
);
3149 hfsmp
->hfs_flags
|= HFS_METADATA_ZONE
;
3154 hfs_hotfile_freeblocks(struct hfsmount
*hfsmp
)
3156 ExtendedVCB
*vcb
= HFSTOVCB(hfsmp
);
3160 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_BITMAP
, HFS_EXCLUSIVE_LOCK
);
3161 freeblocks
= MetaZoneFreeBlocks(vcb
);
3162 hfs_systemfile_unlock(hfsmp
, lockflags
);
3164 /* Minus Extents overflow file reserve. */
3166 hfsmp
->hfs_overflow_maxblks
- VTOF(hfsmp
->hfs_extents_vp
)->ff_blocks
;
3167 /* Minus catalog file reserve. */
3169 hfsmp
->hfs_catalog_maxblks
- VTOF(hfsmp
->hfs_catalog_vp
)->ff_blocks
;
3173 return MIN(freeblocks
, hfsmp
->hfs_hotfile_maxblks
);
3177 * Determine if a file is a "virtual" metadata file.
3178 * This includes journal and quota files.
3181 hfs_virtualmetafile(struct cnode
*cp
)
3183 const char * filename
;
3186 if (cp
->c_parentcnid
!= kHFSRootFolderID
)
3189 filename
= (const char *)cp
->c_desc
.cd_nameptr
;
3190 if (filename
== NULL
)
3193 if ((strncmp(filename
, ".journal", sizeof(".journal")) == 0) ||
3194 (strncmp(filename
, ".journal_info_block", sizeof(".journal_info_block")) == 0) ||
3195 (strncmp(filename
, ".quota.user", sizeof(".quota.user")) == 0) ||
3196 (strncmp(filename
, ".quota.group", sizeof(".quota.group")) == 0) ||
3197 (strncmp(filename
, ".hotfiles.btree", sizeof(".hotfiles.btree")) == 0))
3204 void hfs_syncer_lock(struct hfsmount
*hfsmp
)
3206 hfs_lock_mount(hfsmp
);
3210 void hfs_syncer_unlock(struct hfsmount
*hfsmp
)
3212 hfs_unlock_mount(hfsmp
);
3216 void hfs_syncer_wait(struct hfsmount
*hfsmp
)
3218 msleep(&hfsmp
->hfs_sync_incomplete
, &hfsmp
->hfs_mutex
, PWAIT
,
3219 "hfs_syncer_wait", NULL
);
3223 void hfs_syncer_wakeup(struct hfsmount
*hfsmp
)
3225 wakeup(&hfsmp
->hfs_sync_incomplete
);
3229 uint64_t hfs_usecs_to_deadline(uint64_t usecs
)
3232 clock_interval_to_deadline(usecs
, NSEC_PER_USEC
, &deadline
);
3237 void hfs_syncer_queue(thread_call_t syncer
)
3239 if (thread_call_enter_delayed_with_leeway(syncer
,
3241 hfs_usecs_to_deadline(HFS_META_DELAY
),
3243 THREAD_CALL_DELAY_SYS_BACKGROUND
)) {
3244 printf("hfs: syncer already scheduled!\n");
3249 // Fire off a timed callback to sync the disk if the
3250 // volume is on ejectable media.
3254 hfs_sync_ejectable(struct hfsmount
*hfsmp
)
3256 // If we don't have a syncer or we get called by the syncer, just return
3257 if (!hfsmp
->hfs_syncer
|| current_thread() == hfsmp
->hfs_syncer_thread
)
3260 hfs_syncer_lock(hfsmp
);
3262 if (!timerisset(&hfsmp
->hfs_sync_req_oldest
))
3263 microuptime(&hfsmp
->hfs_sync_req_oldest
);
3265 /* If hfs_unmount is running, it will set hfs_syncer to NULL. Also we
3266 don't want to queue again if there is a sync outstanding. */
3267 if (!hfsmp
->hfs_syncer
|| hfsmp
->hfs_sync_incomplete
) {
3268 hfs_syncer_unlock(hfsmp
);
3272 hfsmp
->hfs_sync_incomplete
= TRUE
;
3274 thread_call_t syncer
= hfsmp
->hfs_syncer
;
3276 hfs_syncer_unlock(hfsmp
);
3278 hfs_syncer_queue(syncer
);
3282 hfs_start_transaction(struct hfsmount
*hfsmp
)
3284 int ret
= 0, unlock_on_err
= 0;
3285 thread_t thread
= current_thread();
3287 #ifdef HFS_CHECK_LOCK_ORDER
3289 * You cannot start a transaction while holding a system
3290 * file lock. (unless the transaction is nested.)
3292 if (hfsmp
->jnl
&& journal_owner(hfsmp
->jnl
) != thread
) {
3293 if (hfsmp
->hfs_catalog_cp
&& hfsmp
->hfs_catalog_cp
->c_lockowner
== thread
) {
3294 panic("hfs_start_transaction: bad lock order (cat before jnl)\n");
3296 if (hfsmp
->hfs_attribute_cp
&& hfsmp
->hfs_attribute_cp
->c_lockowner
== thread
) {
3297 panic("hfs_start_transaction: bad lock order (attr before jnl)\n");
3299 if (hfsmp
->hfs_extents_cp
&& hfsmp
->hfs_extents_cp
->c_lockowner
== thread
) {
3300 panic("hfs_start_transaction: bad lock order (ext before jnl)\n");
3303 #endif /* HFS_CHECK_LOCK_ORDER */
3305 if (hfsmp
->jnl
== NULL
|| journal_owner(hfsmp
->jnl
) != thread
) {
3307 * The global lock should be held shared if journal is
3308 * active to prevent disabling. If we're not the owner
3309 * of the journal lock, verify that we're not already
3310 * holding the global lock exclusive before moving on.
3312 if (hfsmp
->hfs_global_lockowner
== thread
) {
3317 hfs_lock_global (hfsmp
, HFS_SHARED_LOCK
);
3318 OSAddAtomic(1, (SInt32
*)&hfsmp
->hfs_active_threads
);
3322 /* If a downgrade to read-only mount is in progress, no other
3323 * thread than the downgrade thread is allowed to modify
3326 if ((hfsmp
->hfs_flags
& HFS_RDONLY_DOWNGRADE
) &&
3327 hfsmp
->hfs_downgrading_thread
!= thread
) {
3333 ret
= journal_start_transaction(hfsmp
->jnl
);
3335 OSAddAtomic(1, &hfsmp
->hfs_global_lock_nesting
);
3342 if (ret
!= 0 && unlock_on_err
) {
3343 hfs_unlock_global (hfsmp
);
3344 OSAddAtomic(-1, (SInt32
*)&hfsmp
->hfs_active_threads
);
3351 hfs_end_transaction(struct hfsmount
*hfsmp
)
3353 int need_unlock
=0, ret
;
3355 if ((hfsmp
->jnl
== NULL
) || ( journal_owner(hfsmp
->jnl
) == current_thread()
3356 && (OSAddAtomic(-1, &hfsmp
->hfs_global_lock_nesting
) == 1)) ) {
3361 ret
= journal_end_transaction(hfsmp
->jnl
);
3367 OSAddAtomic(-1, (SInt32
*)&hfsmp
->hfs_active_threads
);
3368 hfs_unlock_global (hfsmp
);
3369 hfs_sync_ejectable(hfsmp
);
3377 hfs_journal_lock(struct hfsmount
*hfsmp
)
3379 /* Only peek at hfsmp->jnl while holding the global lock */
3380 hfs_lock_global (hfsmp
, HFS_SHARED_LOCK
);
3382 journal_lock(hfsmp
->jnl
);
3384 hfs_unlock_global (hfsmp
);
3388 hfs_journal_unlock(struct hfsmount
*hfsmp
)
3390 /* Only peek at hfsmp->jnl while holding the global lock */
3391 hfs_lock_global (hfsmp
, HFS_SHARED_LOCK
);
3393 journal_unlock(hfsmp
->jnl
);
3395 hfs_unlock_global (hfsmp
);
3399 * Flush the contents of the journal to the disk.
3403 * If TRUE, wait to write in-memory journal to the disk
3404 * consistently, and also wait to write all asynchronous
3405 * metadata blocks to its corresponding locations
3406 * consistently on the disk. This means that the journal
3407 * is empty at this point and does not contain any
3408 * transactions. This is overkill in normal scenarios
3409 * but is useful whenever the metadata blocks are required
3410 * to be consistent on-disk instead of just the journal
3411 * being consistent; like before live verification
3412 * and live volume resizing.
3414 * If FALSE, only wait to write in-memory journal to the
3415 * disk consistently. This means that the journal still
3416 * contains uncommitted transactions and the file system
3417 * metadata blocks in the journal transactions might be
3418 * written asynchronously to the disk. But there is no
3419 * guarantee that they are written to the disk before
3420 * returning to the caller. Note that this option is
3421 * sufficient for file system data integrity as it
3422 * guarantees consistent journal content on the disk.
3425 hfs_journal_flush(struct hfsmount
*hfsmp
, boolean_t wait_for_IO
)
3429 /* Only peek at hfsmp->jnl while holding the global lock */
3430 hfs_lock_global (hfsmp
, HFS_SHARED_LOCK
);
3432 ret
= journal_flush(hfsmp
->jnl
, wait_for_IO
);
3436 hfs_unlock_global (hfsmp
);
3443 * hfs_erase_unused_nodes
3445 * Check wheter a volume may suffer from unused Catalog B-tree nodes that
3446 * are not zeroed (due to <rdar://problem/6947811>). If so, just write
3447 * zeroes to the unused nodes.
3449 * How do we detect when a volume needs this repair? We can't always be
3450 * certain. If a volume was created after a certain date, then it may have
3451 * been created with the faulty newfs_hfs. Since newfs_hfs only created one
3452 * clump, we can assume that if a Catalog B-tree is larger than its clump size,
3453 * that means that the entire first clump must have been written to, which means
3454 * there shouldn't be unused and unwritten nodes in that first clump, and this
3455 * repair is not needed.
3457 * We have defined a bit in the Volume Header's attributes to indicate when the
3458 * unused nodes have been repaired. A newer newfs_hfs will set this bit.
3459 * As will fsck_hfs when it repairs the unused nodes.
3461 int hfs_erase_unused_nodes(struct hfsmount
*hfsmp
)
3464 struct filefork
*catalog
;
3467 if (hfsmp
->vcbAtrb
& kHFSUnusedNodeFixMask
)
3469 /* This volume has already been checked and repaired. */
3473 if ((hfsmp
->localCreateDate
< kHFSUnusedNodesFixDate
))
3475 /* This volume is too old to have had the problem. */
3476 hfsmp
->vcbAtrb
|= kHFSUnusedNodeFixMask
;
3480 catalog
= hfsmp
->hfs_catalog_cp
->c_datafork
;
3481 if (catalog
->ff_size
> catalog
->ff_clumpsize
)
3483 /* The entire first clump must have been in use at some point. */
3484 hfsmp
->vcbAtrb
|= kHFSUnusedNodeFixMask
;
3489 * If we get here, we need to zero out those unused nodes.
3491 * We start a transaction and lock the catalog since we're going to be
3492 * making on-disk changes. But note that BTZeroUnusedNodes doens't actually
3493 * do its writing via the journal, because that would be too much I/O
3494 * to fit in a transaction, and it's a pain to break it up into multiple
3495 * transactions. (It behaves more like growing a B-tree would.)
3497 printf("hfs_erase_unused_nodes: updating volume %s.\n", hfsmp
->vcbVN
);
3498 result
= hfs_start_transaction(hfsmp
);
3501 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
, HFS_EXCLUSIVE_LOCK
);
3502 result
= BTZeroUnusedNodes(catalog
);
3503 vnode_waitforwrites(hfsmp
->hfs_catalog_vp
, 0, 0, 0, "hfs_erase_unused_nodes");
3504 hfs_systemfile_unlock(hfsmp
, lockflags
);
3505 hfs_end_transaction(hfsmp
);
3507 hfsmp
->vcbAtrb
|= kHFSUnusedNodeFixMask
;
3508 printf("hfs_erase_unused_nodes: done updating volume %s.\n", hfsmp
->vcbVN
);
3515 extern time_t snapshot_timestamp
;
3518 check_for_tracked_file(struct vnode
*vp
, time_t ctime
, uint64_t op_type
, void *arg
)
3520 int snapshot_error
= 0;
3526 /* Swap files are special; skip them */
3527 if (vnode_isswap(vp
)) {
3531 if (ctime
!= 0 && snapshot_timestamp
!= 0 && (ctime
<= snapshot_timestamp
|| vnode_needssnapshots(vp
))) {
3532 // the change time is within this epoch
3535 error
= resolve_nspace_item_ext(vp
, op_type
| NAMESPACE_HANDLER_SNAPSHOT_EVENT
, arg
);
3536 if (error
== EDEADLK
) {
3539 if (error
== EAGAIN
) {
3540 printf("hfs: cow-snapshot: timed out waiting for namespace handler...\n");
3541 } else if (error
== EINTR
) {
3542 // printf("hfs: cow-snapshot: got a signal while waiting for namespace handler...\n");
3543 snapshot_error
= EINTR
;
3548 if (snapshot_error
) return snapshot_error
;
3554 check_for_dataless_file(struct vnode
*vp
, uint64_t op_type
)
3558 if (vp
== NULL
|| (VTOC(vp
)->c_bsdflags
& UF_COMPRESSED
) == 0 || VTOCMP(vp
) == NULL
|| VTOCMP(vp
)->cmp_type
!= DATALESS_CMPFS_TYPE
) {
3559 // there's nothing to do, it's not dataless
3563 /* Swap files are special; ignore them */
3564 if (vnode_isswap(vp
)) {
3568 // printf("hfs: dataless: encountered a file with the dataless bit set! (vp %p)\n", vp);
3569 error
= resolve_nspace_item(vp
, op_type
| NAMESPACE_HANDLER_NSPACE_EVENT
);
3570 if (error
== EDEADLK
&& op_type
== NAMESPACE_HANDLER_WRITE_OP
) {
3573 if (error
== EAGAIN
) {
3574 printf("hfs: dataless: timed out waiting for namespace handler...\n");
3575 // XXXdbg - return the fabled ENOTPRESENT (i.e. EJUKEBOX)?
3577 } else if (error
== EINTR
) {
3578 // printf("hfs: dataless: got a signal while waiting for namespace handler...\n");
3581 } else if (VTOC(vp
)->c_bsdflags
& UF_COMPRESSED
) {
3583 // if we're here, the dataless bit is still set on the file
3584 // which means it didn't get handled. we return an error
3585 // but it's presently ignored by all callers of this function.
3587 // XXXdbg - EDATANOTPRESENT is what we really need...
3597 // NOTE: this function takes care of starting a transaction and
3598 // acquiring the systemfile lock so that it can call
3601 // NOTE: do NOT hold and cnode locks while calling this function
3602 // to avoid deadlocks (because we take a lock on the root
3606 hfs_generate_document_id(struct hfsmount
*hfsmp
, uint32_t *docid
)
3612 error
= VFS_ROOT(HFSTOVFS(hfsmp
), &rvp
, vfs_context_kernel());
3618 if ((error
= hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
)) != 0) {
3621 struct FndrExtendedDirInfo
*extinfo
= (struct FndrExtendedDirInfo
*)((void *)((char *)&cp
->c_attr
.ca_finderinfo
+ 16));
3624 if (hfs_start_transaction(hfsmp
) != 0) {
3627 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
, HFS_EXCLUSIVE_LOCK
);
3629 if (extinfo
->document_id
== 0) {
3630 // initialize this to start at 3 (one greater than the root-dir id)
3631 extinfo
->document_id
= 3;
3634 *docid
= extinfo
->document_id
++;
3636 // mark the root cnode dirty
3637 cp
->c_flag
|= C_MODIFIED
| C_FORCEUPDATE
;
3638 (void) cat_update(hfsmp
, &cp
->c_desc
, &cp
->c_attr
, NULL
, NULL
);
3640 hfs_systemfile_unlock (hfsmp
, lockflags
);
3641 (void) hfs_end_transaction(hfsmp
);
3643 (void) hfs_unlock(cp
);
3653 * Return information about number of file system allocation blocks
3654 * taken by metadata on a volume.
3656 * This function populates struct hfsinfo_metadata with allocation blocks
3657 * used by extents overflow btree, catalog btree, bitmap, attribute btree,
3658 * journal file, and sum of all of the above.
3661 hfs_getinfo_metadata_blocks(struct hfsmount
*hfsmp
, struct hfsinfo_metadata
*hinfo
)
3664 int ret_lockflags
= 0;
3666 /* Zero out the output buffer */
3667 bzero(hinfo
, sizeof(struct hfsinfo_metadata
));
3670 * Getting number of allocation blocks for all btrees
3671 * should be a quick operation, so we grab locks for
3672 * all of them at the same time
3674 lockflags
= SFL_CATALOG
| SFL_EXTENTS
| SFL_BITMAP
| SFL_ATTRIBUTE
;
3675 ret_lockflags
= hfs_systemfile_lock(hfsmp
, lockflags
, HFS_EXCLUSIVE_LOCK
);
3677 * Make sure that we were able to acquire all locks requested
3678 * to protect us against conditions like unmount in progress.
3680 if ((lockflags
& ret_lockflags
) != lockflags
) {
3681 /* Release any locks that were acquired */
3682 hfs_systemfile_unlock(hfsmp
, ret_lockflags
);
3686 /* Get information about all the btrees */
3687 hinfo
->extents
= hfsmp
->hfs_extents_cp
->c_datafork
->ff_blocks
;
3688 hinfo
->catalog
= hfsmp
->hfs_catalog_cp
->c_datafork
->ff_blocks
;
3689 hinfo
->allocation
= hfsmp
->hfs_allocation_cp
->c_datafork
->ff_blocks
;
3690 hinfo
->attribute
= hfsmp
->hfs_attribute_cp
->c_datafork
->ff_blocks
;
3692 /* Done with btrees, give up the locks */
3693 hfs_systemfile_unlock(hfsmp
, ret_lockflags
);
3695 /* Get information about journal file */
3696 hinfo
->journal
= howmany(hfsmp
->jnl_size
, hfsmp
->blockSize
);
3698 /* Calculate total number of metadata blocks */
3699 hinfo
->total
= hinfo
->extents
+ hinfo
->catalog
+
3700 hinfo
->allocation
+ hinfo
->attribute
+
3707 hfs_freezewrite_callback(struct vnode
*vp
, __unused
void *cargs
)
3709 vnode_waitforwrites(vp
, 0, 0, 0, "hfs freeze 8");
3715 int hfs_freeze(struct hfsmount
*hfsmp
)
3717 // First make sure some other process isn't freezing
3718 hfs_lock_mount(hfsmp
);
3719 while (hfsmp
->hfs_freeze_state
!= HFS_THAWED
) {
3720 if (msleep(&hfsmp
->hfs_freeze_state
, &hfsmp
->hfs_mutex
,
3721 PWAIT
| PCATCH
, "hfs freeze 1", NULL
) == EINTR
) {
3722 hfs_unlock_mount(hfsmp
);
3727 // Stop new syncers from starting
3728 hfsmp
->hfs_freeze_state
= HFS_WANT_TO_FREEZE
;
3730 // Now wait for all syncers to finish
3731 while (hfsmp
->hfs_syncers
) {
3732 if (msleep(&hfsmp
->hfs_freeze_state
, &hfsmp
->hfs_mutex
,
3733 PWAIT
| PCATCH
, "hfs freeze 2", NULL
) == EINTR
) {
3734 hfs_thaw_locked(hfsmp
);
3735 hfs_unlock_mount(hfsmp
);
3739 hfs_unlock_mount(hfsmp
);
3741 // flush things before we get started to try and prevent
3742 // dirty data from being paged out while we're frozen.
3743 // note: we can't do this once we're in the freezing state because
3744 // other threads will need to take the global lock
3745 vnode_iterate(hfsmp
->hfs_mp
, 0, hfs_freezewrite_callback
, NULL
);
3747 // Block everything in hfs_lock_global now
3748 hfs_lock_mount(hfsmp
);
3749 hfsmp
->hfs_freeze_state
= HFS_FREEZING
;
3750 hfsmp
->hfs_freezing_thread
= current_thread();
3751 hfs_unlock_mount(hfsmp
);
3753 /* Take the exclusive lock to flush out anything else that
3754 might have the global lock at the moment and also so we
3755 can flush the journal. */
3756 hfs_lock_global(hfsmp
, HFS_EXCLUSIVE_LOCK
);
3757 journal_flush(hfsmp
->jnl
, TRUE
);
3758 hfs_unlock_global(hfsmp
);
3760 // don't need to iterate on all vnodes, we just need to
3761 // wait for writes to the system files and the device vnode
3763 // Now that journal flush waits for all metadata blocks to
3764 // be written out, waiting for btree writes is probably no
3766 if (HFSTOVCB(hfsmp
)->extentsRefNum
)
3767 vnode_waitforwrites(HFSTOVCB(hfsmp
)->extentsRefNum
, 0, 0, 0, "hfs freeze 3");
3768 if (HFSTOVCB(hfsmp
)->catalogRefNum
)
3769 vnode_waitforwrites(HFSTOVCB(hfsmp
)->catalogRefNum
, 0, 0, 0, "hfs freeze 4");
3770 if (HFSTOVCB(hfsmp
)->allocationsRefNum
)
3771 vnode_waitforwrites(HFSTOVCB(hfsmp
)->allocationsRefNum
, 0, 0, 0, "hfs freeze 5");
3772 if (hfsmp
->hfs_attribute_vp
)
3773 vnode_waitforwrites(hfsmp
->hfs_attribute_vp
, 0, 0, 0, "hfs freeze 6");
3774 vnode_waitforwrites(hfsmp
->hfs_devvp
, 0, 0, 0, "hfs freeze 7");
3776 // We're done, mark frozen
3777 hfs_lock_mount(hfsmp
);
3778 hfsmp
->hfs_freeze_state
= HFS_FROZEN
;
3779 hfsmp
->hfs_freezing_proc
= current_proc();
3780 hfs_unlock_mount(hfsmp
);
3786 int hfs_thaw(struct hfsmount
*hfsmp
, const struct proc
*process
)
3788 hfs_lock_mount(hfsmp
);
3790 if (hfsmp
->hfs_freeze_state
!= HFS_FROZEN
) {
3791 hfs_unlock_mount(hfsmp
);
3794 if (process
&& hfsmp
->hfs_freezing_proc
!= process
) {
3795 hfs_unlock_mount(hfsmp
);
3799 hfs_thaw_locked(hfsmp
);
3801 hfs_unlock_mount(hfsmp
);
3806 static void hfs_thaw_locked(struct hfsmount
*hfsmp
)
3808 hfsmp
->hfs_freezing_proc
= NULL
;
3809 hfsmp
->hfs_freeze_state
= HFS_THAWED
;
3811 wakeup(&hfsmp
->hfs_freeze_state
);