2 * Copyright (c) 1999-2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1991, 1993, 1994
30 * The Regents of the University of California. All rights reserved.
31 * (c) UNIX System Laboratories, Inc.
32 * All or some portions of this file are derived from material licensed
33 * to the University of California by American Telephone and Telegraph
34 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
35 * the permission of UNIX System Laboratories, Inc.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * derived from @(#)ufs_vfsops.c 8.8 (Berkeley) 5/20/95
68 * (c) Copyright 1997-2002 Apple Inc. All rights reserved.
70 * hfs_vfsops.c -- VFS layer for loadable HFS file system.
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/kauth.h>
78 #include <sys/sysctl.h>
79 #include <sys/malloc.h>
81 #include <sys/quota.h>
83 #include <sys/paths.h>
84 #include <sys/utfconv.h>
85 #include <sys/kdebug.h>
86 #include <sys/fslog.h>
88 #include <libkern/OSKextLib.h>
89 #include <libkern/OSAtomic.h>
91 /* for parsing boot-args */
92 #include <pexpert/pexpert.h>
95 #include <kern/locks.h>
97 #include "hfs_journal.h"
99 #include <miscfs/specfs/specdev.h>
100 #include "hfs_mount.h"
102 #include <libkern/crypto/md5.h>
103 #include <uuid/uuid.h>
105 #include "hfs_iokit.h"
107 #include "hfs_catalog.h"
108 #include "hfs_cnode.h"
110 #include "hfs_endian.h"
111 #include "hfs_hotfiles.h"
112 #include "hfs_quota.h"
113 #include "hfs_btreeio.h"
114 #include "hfs_kdebug.h"
115 #include "hfs_cprotect.h"
117 #include "FileMgrInternal.h"
118 #include "BTreesInternal.h"
120 #define HFS_MOUNT_DEBUG 1
122 /* Enable/disable debugging code for live volume resizing, defined in hfs_resize.c */
123 extern int hfs_resize_debug
;
125 lck_grp_attr_t
* hfs_group_attr
;
126 lck_attr_t
* hfs_lock_attr
;
127 lck_grp_t
* hfs_mutex_group
;
128 lck_grp_t
* hfs_rwlock_group
;
129 lck_grp_t
* hfs_spinlock_group
;
131 // variables to manage HFS kext retain count -- only supported on Macs
133 int hfs_active_mounts
= 0;
136 extern struct vnodeopv_desc hfs_vnodeop_opv_desc
;
139 extern struct vnodeopv_desc hfs_std_vnodeop_opv_desc
;
140 static int hfs_flushMDB(struct hfsmount
*hfsmp
, int waitfor
, int altflush
);
143 /* not static so we can re-use in hfs_readwrite.c for vn_getpath_ext calls */
144 int hfs_vfs_vget(struct mount
*mp
, ino64_t ino
, struct vnode
**vpp
, vfs_context_t context
);
146 static int hfs_changefs(struct mount
*mp
, struct hfs_mount_args
*args
);
147 static int hfs_fhtovp(struct mount
*mp
, int fhlen
, unsigned char *fhp
, struct vnode
**vpp
, vfs_context_t context
);
148 static int hfs_flushfiles(struct mount
*, int, struct proc
*);
149 static int hfs_init(struct vfsconf
*vfsp
);
150 static void hfs_locks_destroy(struct hfsmount
*hfsmp
);
151 static int hfs_quotactl(struct mount
*, int, uid_t
, caddr_t
, vfs_context_t context
);
152 static int hfs_start(struct mount
*mp
, int flags
, vfs_context_t context
);
153 static int hfs_vptofh(struct vnode
*vp
, int *fhlenp
, unsigned char *fhp
, vfs_context_t context
);
154 static void hfs_syncer_free(struct hfsmount
*hfsmp
);
156 void hfs_initialize_allocator (struct hfsmount
*hfsmp
);
157 int hfs_teardown_allocator (struct hfsmount
*hfsmp
);
159 int hfs_mount(struct mount
*mp
, vnode_t devvp
, user_addr_t data
, vfs_context_t context
);
160 int hfs_mountfs(struct vnode
*devvp
, struct mount
*mp
, struct hfs_mount_args
*args
, int journal_replay_only
, vfs_context_t context
);
161 int hfs_reload(struct mount
*mp
);
162 int hfs_statfs(struct mount
*mp
, register struct vfsstatfs
*sbp
, vfs_context_t context
);
163 int hfs_sync(struct mount
*mp
, int waitfor
, vfs_context_t context
);
164 int hfs_sysctl(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
165 user_addr_t newp
, size_t newlen
, vfs_context_t context
);
166 int hfs_unmount(struct mount
*mp
, int mntflags
, vfs_context_t context
);
168 static int hfs_journal_replay(vnode_t devvp
, vfs_context_t context
);
171 #include <IOKit/IOLib.h>
181 hfs_mount(struct mount
*mp
, vnode_t devvp
, user_addr_t data
, vfs_context_t context
)
186 #warning HFS_LEAK_DEBUG is on
188 hfs_alloc_trace_enable();
192 struct proc
*p
= vfs_context_proc(context
);
193 struct hfsmount
*hfsmp
= NULL
;
194 struct hfs_mount_args args
;
198 if (data
&& (retval
= copyin(data
, (caddr_t
)&args
, sizeof(args
)))) {
199 if (HFS_MOUNT_DEBUG
) {
200 printf("hfs_mount: copyin returned %d for fs\n", retval
);
204 cmdflags
= (u_int32_t
)vfs_flags(mp
) & MNT_CMDFLAGS
;
205 if (cmdflags
& MNT_UPDATE
) {
208 hfsmp
= VFSTOHFS(mp
);
210 /* Reload incore data after an fsck. */
211 if (cmdflags
& MNT_RELOAD
) {
212 if (vfs_isrdonly(mp
)) {
213 int error
= hfs_reload(mp
);
214 if (error
&& HFS_MOUNT_DEBUG
) {
215 printf("hfs_mount: hfs_reload returned %d on %s \n", error
, hfsmp
->vcbVN
);
220 if (HFS_MOUNT_DEBUG
) {
221 printf("hfs_mount: MNT_RELOAD not supported on rdwr filesystem %s\n", hfsmp
->vcbVN
);
227 /* Change to a read-only file system. */
228 if (((hfsmp
->hfs_flags
& HFS_READ_ONLY
) == 0) &&
232 /* Set flag to indicate that a downgrade to read-only
233 * is in progress and therefore block any further
234 * modifications to the file system.
236 hfs_lock_global (hfsmp
, HFS_EXCLUSIVE_LOCK
);
237 hfsmp
->hfs_flags
|= HFS_RDONLY_DOWNGRADE
;
238 hfsmp
->hfs_downgrading_thread
= current_thread();
239 hfs_unlock_global (hfsmp
);
240 hfs_syncer_free(hfsmp
);
242 /* use hfs_sync to push out System (btree) files */
243 retval
= hfs_sync(mp
, MNT_WAIT
, context
);
244 if (retval
&& ((cmdflags
& MNT_FORCE
) == 0)) {
245 hfsmp
->hfs_flags
&= ~HFS_RDONLY_DOWNGRADE
;
246 hfsmp
->hfs_downgrading_thread
= NULL
;
247 if (HFS_MOUNT_DEBUG
) {
248 printf("hfs_mount: VFS_SYNC returned %d during b-tree sync of %s \n", retval
, hfsmp
->vcbVN
);
254 if (cmdflags
& MNT_FORCE
)
257 if ((retval
= hfs_flushfiles(mp
, flags
, p
))) {
258 hfsmp
->hfs_flags
&= ~HFS_RDONLY_DOWNGRADE
;
259 hfsmp
->hfs_downgrading_thread
= NULL
;
260 if (HFS_MOUNT_DEBUG
) {
261 printf("hfs_mount: hfs_flushfiles returned %d on %s \n", retval
, hfsmp
->vcbVN
);
266 /* mark the volume cleanly unmounted */
267 hfsmp
->vcbAtrb
|= kHFSVolumeUnmountedMask
;
268 retval
= hfs_flushvolumeheader(hfsmp
, HFS_FVH_WAIT
);
269 hfsmp
->hfs_flags
|= HFS_READ_ONLY
;
272 * Close down the journal.
274 * NOTE: It is critically important to close down the journal
275 * and have it issue all pending I/O prior to calling VNOP_FSYNC below.
276 * In a journaled environment it is expected that the journal be
277 * the only actor permitted to issue I/O for metadata blocks in HFS.
278 * If we were to call VNOP_FSYNC prior to closing down the journal,
279 * we would inadvertantly issue (and wait for) the I/O we just
280 * initiated above as part of the flushvolumeheader call.
282 * To avoid this, we follow the same order of operations as in
283 * unmount and issue the journal_close prior to calling VNOP_FSYNC.
287 hfs_lock_global (hfsmp
, HFS_EXCLUSIVE_LOCK
);
289 journal_close(hfsmp
->jnl
);
292 // Note: we explicitly don't want to shutdown
293 // access to the jvp because we may need
294 // it later if we go back to being read-write.
296 hfs_unlock_global (hfsmp
);
298 vfs_clearflags(hfsmp
->hfs_mp
, MNT_JOURNALED
);
302 * Write out any pending I/O still outstanding against the device node
303 * now that the journal has been closed.
306 vnode_get(hfsmp
->hfs_devvp
);
307 retval
= VNOP_FSYNC(hfsmp
->hfs_devvp
, MNT_WAIT
, context
);
308 vnode_put(hfsmp
->hfs_devvp
);
312 if (HFS_MOUNT_DEBUG
) {
313 printf("hfs_mount: FSYNC on devvp returned %d for fs %s\n", retval
, hfsmp
->vcbVN
);
315 hfsmp
->hfs_flags
&= ~HFS_RDONLY_DOWNGRADE
;
316 hfsmp
->hfs_downgrading_thread
= NULL
;
317 hfsmp
->hfs_flags
&= ~HFS_READ_ONLY
;
321 if (hfsmp
->hfs_flags
& HFS_SUMMARY_TABLE
) {
322 if (hfsmp
->hfs_summary_table
) {
325 * Take the bitmap lock to serialize against a concurrent bitmap scan still in progress
327 if (hfsmp
->hfs_allocation_vp
) {
328 err
= hfs_lock (VTOC(hfsmp
->hfs_allocation_vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
330 hfs_free(hfsmp
->hfs_summary_table
, hfsmp
->hfs_summary_bytes
);
331 hfsmp
->hfs_summary_table
= NULL
;
332 hfsmp
->hfs_flags
&= ~HFS_SUMMARY_TABLE
;
333 if (err
== 0 && hfsmp
->hfs_allocation_vp
){
334 hfs_unlock (VTOC(hfsmp
->hfs_allocation_vp
));
339 hfsmp
->hfs_downgrading_thread
= NULL
;
342 /* Change to a writable file system. */
343 if (vfs_iswriteupgrade(mp
)) {
345 * On inconsistent disks, do not allow read-write mount
346 * unless it is the boot volume being mounted.
348 if (!(vfs_flags(mp
) & MNT_ROOTFS
) &&
349 (hfsmp
->vcbAtrb
& kHFSVolumeInconsistentMask
)) {
350 if (HFS_MOUNT_DEBUG
) {
351 printf("hfs_mount: attempting to mount inconsistent non-root volume %s\n", (hfsmp
->vcbVN
));
357 // If the journal was shut-down previously because we were
358 // asked to be read-only, let's start it back up again now
360 if ( (HFSTOVCB(hfsmp
)->vcbAtrb
& kHFSVolumeJournaledMask
)
361 && hfsmp
->jnl
== NULL
362 && hfsmp
->jvp
!= NULL
) {
365 if (hfsmp
->hfs_flags
& HFS_NEED_JNL_RESET
) {
366 jflags
= JOURNAL_RESET
;
371 hfs_lock_global (hfsmp
, HFS_EXCLUSIVE_LOCK
);
373 /* We provide the mount point twice here: The first is used as
374 * an opaque argument to be passed back when hfs_sync_metadata
375 * is called. The second is provided to the throttling code to
376 * indicate which mount's device should be used when accounting
377 * for metadata writes.
379 hfsmp
->jnl
= journal_open(hfsmp
->jvp
,
380 hfs_blk_to_bytes(hfsmp
->jnl_start
, HFSTOVCB(hfsmp
)->blockSize
) + (off_t
)HFSTOVCB(hfsmp
)->hfsPlusIOPosOffset
,
383 hfsmp
->hfs_logical_block_size
,
386 hfs_sync_metadata
, hfsmp
->hfs_mp
,
390 * Set up the trim callback function so that we can add
391 * recently freed extents to the free extent cache once
392 * the transaction that freed them is written to the
396 journal_trim_set_callback(hfsmp
->jnl
, hfs_trim_callback
, hfsmp
);
398 hfs_unlock_global (hfsmp
);
400 if (hfsmp
->jnl
== NULL
) {
401 if (HFS_MOUNT_DEBUG
) {
402 printf("hfs_mount: journal_open == NULL; couldn't be opened on %s \n", (hfsmp
->vcbVN
));
407 hfsmp
->hfs_flags
&= ~HFS_NEED_JNL_RESET
;
408 vfs_setflags(hfsmp
->hfs_mp
, MNT_JOURNALED
);
412 /* See if we need to erase unused Catalog nodes due to <rdar://problem/6947811>. */
413 retval
= hfs_erase_unused_nodes(hfsmp
);
414 if (retval
!= E_NONE
) {
415 if (HFS_MOUNT_DEBUG
) {
416 printf("hfs_mount: hfs_erase_unused_nodes returned %d for fs %s\n", retval
, hfsmp
->vcbVN
);
421 /* If this mount point was downgraded from read-write
422 * to read-only, clear that information as we are now
423 * moving back to read-write.
425 hfsmp
->hfs_flags
&= ~HFS_RDONLY_DOWNGRADE
;
426 hfsmp
->hfs_downgrading_thread
= NULL
;
428 /* mark the volume dirty (clear clean unmount bit) */
429 hfsmp
->vcbAtrb
&= ~kHFSVolumeUnmountedMask
;
431 retval
= hfs_flushvolumeheader(hfsmp
, HFS_FVH_WAIT
);
432 if (retval
!= E_NONE
) {
433 if (HFS_MOUNT_DEBUG
) {
434 printf("hfs_mount: hfs_flushvolumeheader returned %d for fs %s\n", retval
, hfsmp
->vcbVN
);
439 /* Only clear HFS_READ_ONLY after a successful write */
440 hfsmp
->hfs_flags
&= ~HFS_READ_ONLY
;
443 if (!(hfsmp
->hfs_flags
& (HFS_READ_ONLY
| HFS_STANDARD
))) {
444 /* Setup private/hidden directories for hardlinks. */
445 hfs_privatedir_init(hfsmp
, FILE_HARDLINKS
);
446 hfs_privatedir_init(hfsmp
, DIR_HARDLINKS
);
448 hfs_remove_orphans(hfsmp
);
451 * Since we're upgrading to a read-write mount, allow
452 * hot file clustering if conditions allow.
454 * Note: this normally only would happen if you booted
455 * single-user and upgraded the mount to read-write
457 * Note: at this point we are not allowed to fail the
458 * mount operation because the HotFile init code
459 * in hfs_recording_init() will lookup vnodes with
460 * VNOP_LOOKUP() which hangs vnodes off the mount
461 * (and if we were to fail, VFS is not prepared to
462 * clean that up at this point. Since HotFiles are
463 * optional, this is not a big deal.
465 if (ISSET(hfsmp
->hfs_flags
, HFS_METADATA_ZONE
)
466 && (!ISSET(hfsmp
->hfs_flags
, HFS_SSD
)
467 || ISSET(hfsmp
->hfs_flags
, HFS_CS_HOTFILE_PIN
))) {
468 hfs_recording_init(hfsmp
);
470 /* Force ACLs on HFS+ file systems. */
471 if (vfs_extendedsecurity(HFSTOVFS(hfsmp
)) == 0) {
472 vfs_setextendedsecurity(HFSTOVFS(hfsmp
));
477 /* Update file system parameters. */
478 retval
= hfs_changefs(mp
, &args
);
479 if (retval
&& HFS_MOUNT_DEBUG
) {
480 printf("hfs_mount: hfs_changefs returned %d for %s\n", retval
, hfsmp
->vcbVN
);
483 } else /* not an update request */ {
488 /* Set the mount flag to indicate that we support volfs */
489 vfs_setflags(mp
, (u_int64_t
)((unsigned int)MNT_DOVOLFS
));
491 retval
= hfs_mountfs(devvp
, mp
, data
? &args
: NULL
, 0, context
);
493 const char *name
= vnode_getname(devvp
);
494 printf("hfs_mount: hfs_mountfs returned error=%d for device %s\n", retval
, (name
? name
: "unknown-dev"));
501 /* After hfs_mountfs succeeds, we should have valid hfsmp */
502 hfsmp
= VFSTOHFS(mp
);
504 /* Set up the maximum defrag file size */
505 hfsmp
->hfs_defrag_max
= HFS_INITIAL_DEFRAG_SIZE
;
511 hfsmp
->hfs_uid
= UNKNOWNUID
;
512 hfsmp
->hfs_gid
= UNKNOWNGID
;
513 hfsmp
->hfs_dir_mask
= (S_IRWXU
| S_IRGRP
|S_IXGRP
| S_IROTH
|S_IXOTH
); /* 0755 */
514 hfsmp
->hfs_file_mask
= (S_IRWXU
| S_IRGRP
|S_IXGRP
| S_IROTH
|S_IXOTH
); /* 0755 */
516 /* Establish the free block reserve. */
517 hfsmp
->reserveBlocks
= ((u_int64_t
)hfsmp
->totalBlocks
* HFS_MINFREE
) / 100;
518 hfsmp
->reserveBlocks
= MIN(hfsmp
->reserveBlocks
, HFS_MAXRESERVE
/ hfsmp
->blockSize
);
521 // increment kext retain count
522 OSIncrementAtomic(&hfs_active_mounts
);
523 OSKextRetainKextWithLoadTag(OSKextGetCurrentLoadTag());
524 if (hfs_active_mounts
<= 0 && panic_on_assert
)
525 panic("hfs_mount: error - kext resource count is non-positive: %d but at least one active mount\n", hfs_active_mounts
);
531 (void)hfs_statfs(mp
, vfs_statfs(mp
), context
);
537 struct hfs_changefs_cargs
{
538 struct hfsmount
*hfsmp
;
545 hfs_changefs_callback(struct vnode
*vp
, void *cargs
)
549 struct cat_desc cndesc
;
550 struct cat_attr cnattr
;
551 struct hfs_changefs_cargs
*args
;
555 args
= (struct hfs_changefs_cargs
*)cargs
;
558 vcb
= HFSTOVCB(args
->hfsmp
);
560 lockflags
= hfs_systemfile_lock(args
->hfsmp
, SFL_CATALOG
, HFS_SHARED_LOCK
);
561 error
= cat_lookup(args
->hfsmp
, &cp
->c_desc
, 0, 0, &cndesc
, &cnattr
, NULL
, NULL
);
562 hfs_systemfile_unlock(args
->hfsmp
, lockflags
);
565 * If we couldn't find this guy skip to the next one
570 return (VNODE_RETURNED
);
573 * Get the real uid/gid and perm mask from disk.
575 if (args
->permswitch
|| args
->permfix
) {
576 cp
->c_uid
= cnattr
.ca_uid
;
577 cp
->c_gid
= cnattr
.ca_gid
;
578 cp
->c_mode
= cnattr
.ca_mode
;
581 * If we're switching name converters then...
582 * Remove the existing entry from the namei cache.
583 * Update name to one based on new encoder.
587 replace_desc(cp
, &cndesc
);
589 if (cndesc
.cd_cnid
== kHFSRootFolderID
) {
590 strlcpy((char *)vcb
->vcbVN
, (const char *)cp
->c_desc
.cd_nameptr
, NAME_MAX
+1);
591 cp
->c_desc
.cd_encoding
= args
->hfsmp
->hfs_encoding
;
594 cat_releasedesc(&cndesc
);
596 return (VNODE_RETURNED
);
599 /* Change fs mount parameters */
601 hfs_changefs(struct mount
*mp
, struct hfs_mount_args
*args
)
604 int namefix
, permfix
, permswitch
;
605 struct hfsmount
*hfsmp
;
607 struct hfs_changefs_cargs cargs
;
608 u_int32_t mount_flags
;
611 u_int32_t old_encoding
= 0;
612 hfs_to_unicode_func_t get_unicode_func
;
613 unicode_to_hfs_func_t get_hfsname_func
= NULL
;
616 hfsmp
= VFSTOHFS(mp
);
617 vcb
= HFSTOVCB(hfsmp
);
618 mount_flags
= (unsigned int)vfs_flags(mp
);
620 hfsmp
->hfs_flags
|= HFS_IN_CHANGEFS
;
622 permswitch
= (((hfsmp
->hfs_flags
& HFS_UNKNOWN_PERMS
) &&
623 ((mount_flags
& MNT_UNKNOWNPERMISSIONS
) == 0)) ||
624 (((hfsmp
->hfs_flags
& HFS_UNKNOWN_PERMS
) == 0) &&
625 (mount_flags
& MNT_UNKNOWNPERMISSIONS
)));
627 /* The root filesystem must operate with actual permissions: */
628 if (permswitch
&& (mount_flags
& MNT_ROOTFS
) && (mount_flags
& MNT_UNKNOWNPERMISSIONS
)) {
629 vfs_clearflags(mp
, (u_int64_t
)((unsigned int)MNT_UNKNOWNPERMISSIONS
)); /* Just say "No". */
633 if (mount_flags
& MNT_UNKNOWNPERMISSIONS
)
634 hfsmp
->hfs_flags
|= HFS_UNKNOWN_PERMS
;
636 hfsmp
->hfs_flags
&= ~HFS_UNKNOWN_PERMS
;
638 namefix
= permfix
= 0;
641 * Tracking of hot files requires up-to-date access times. So if
642 * access time updates are disabled, we must also disable hot files.
644 if (mount_flags
& MNT_NOATIME
) {
645 (void) hfs_recording_suspend(hfsmp
);
648 /* Change the timezone (Note: this affects all hfs volumes and hfs+ volume create dates) */
649 if (args
->hfs_timezone
.tz_minuteswest
!= VNOVAL
) {
650 gTimeZone
= args
->hfs_timezone
;
653 /* Change the default uid, gid and/or mask */
654 if ((args
->hfs_uid
!= (uid_t
)VNOVAL
) && (hfsmp
->hfs_uid
!= args
->hfs_uid
)) {
655 hfsmp
->hfs_uid
= args
->hfs_uid
;
656 if (vcb
->vcbSigWord
== kHFSPlusSigWord
)
659 if ((args
->hfs_gid
!= (gid_t
)VNOVAL
) && (hfsmp
->hfs_gid
!= args
->hfs_gid
)) {
660 hfsmp
->hfs_gid
= args
->hfs_gid
;
661 if (vcb
->vcbSigWord
== kHFSPlusSigWord
)
664 if (args
->hfs_mask
!= (mode_t
)VNOVAL
) {
665 if (hfsmp
->hfs_dir_mask
!= (args
->hfs_mask
& ALLPERMS
)) {
666 hfsmp
->hfs_dir_mask
= args
->hfs_mask
& ALLPERMS
;
667 hfsmp
->hfs_file_mask
= args
->hfs_mask
& ALLPERMS
;
668 if ((args
->flags
!= VNOVAL
) && (args
->flags
& HFSFSMNT_NOXONFILES
))
669 hfsmp
->hfs_file_mask
= (args
->hfs_mask
& DEFFILEMODE
);
670 if (vcb
->vcbSigWord
== kHFSPlusSigWord
)
676 /* Change the hfs encoding value (hfs only) */
677 if ((vcb
->vcbSigWord
== kHFSSigWord
) &&
678 (args
->hfs_encoding
!= (u_int32_t
)VNOVAL
) &&
679 (hfsmp
->hfs_encoding
!= args
->hfs_encoding
)) {
681 retval
= hfs_getconverter(args
->hfs_encoding
, &get_unicode_func
, &get_hfsname_func
);
686 * Connect the new hfs_get_unicode converter but leave
687 * the old hfs_get_hfsname converter in place so that
688 * we can lookup existing vnodes to get their correctly
691 * When we're all finished, we can then connect the new
692 * hfs_get_hfsname converter and release our interest
693 * in the old converters.
695 hfsmp
->hfs_get_unicode
= get_unicode_func
;
696 old_encoding
= hfsmp
->hfs_encoding
;
697 hfsmp
->hfs_encoding
= args
->hfs_encoding
;
702 if (!(namefix
|| permfix
|| permswitch
))
705 /* XXX 3762912 hack to support HFS filesystem 'owner' */
708 hfsmp
->hfs_uid
== UNKNOWNUID
? KAUTH_UID_NONE
: hfsmp
->hfs_uid
,
709 hfsmp
->hfs_gid
== UNKNOWNGID
? KAUTH_GID_NONE
: hfsmp
->hfs_gid
);
713 * For each active vnode fix things that changed
715 * Note that we can visit a vnode more than once
716 * and we can race with fsync.
718 * hfs_changefs_callback will be called for each vnode
719 * hung off of this mount point
721 * The vnode will be properly referenced and unreferenced
722 * around the callback
725 cargs
.namefix
= namefix
;
726 cargs
.permfix
= permfix
;
727 cargs
.permswitch
= permswitch
;
729 vnode_iterate(mp
, 0, hfs_changefs_callback
, (void *)&cargs
);
733 * If we're switching name converters we can now
734 * connect the new hfs_get_hfsname converter and
735 * release our interest in the old converters.
738 /* HFS standard only */
739 hfsmp
->hfs_get_hfsname
= get_hfsname_func
;
740 vcb
->volumeNameEncodingHint
= args
->hfs_encoding
;
741 (void) hfs_relconverter(old_encoding
);
746 hfsmp
->hfs_flags
&= ~HFS_IN_CHANGEFS
;
751 struct hfs_reload_cargs
{
752 struct hfsmount
*hfsmp
;
757 hfs_reload_callback(struct vnode
*vp
, void *cargs
)
760 struct hfs_reload_cargs
*args
;
763 args
= (struct hfs_reload_cargs
*)cargs
;
765 * flush all the buffers associated with this node
767 (void) buf_invalidateblks(vp
, 0, 0, 0);
771 * Remove any directory hints
774 hfs_reldirhints(cp
, 0);
777 * Re-read cnode data for all active vnodes (non-metadata files).
779 if (!vnode_issystem(vp
) && !VNODE_IS_RSRC(vp
) && (cp
->c_fileid
>= kHFSFirstUserCatalogNodeID
)) {
780 struct cat_fork
*datafork
;
781 struct cat_desc desc
;
783 datafork
= cp
->c_datafork
? &cp
->c_datafork
->ff_data
: NULL
;
785 /* lookup by fileID since name could have changed */
786 lockflags
= hfs_systemfile_lock(args
->hfsmp
, SFL_CATALOG
, HFS_SHARED_LOCK
);
787 args
->error
= cat_idlookup(args
->hfsmp
, cp
->c_fileid
, 0, 0, &desc
, &cp
->c_attr
, datafork
);
788 hfs_systemfile_unlock(args
->hfsmp
, lockflags
);
790 return (VNODE_RETURNED_DONE
);
793 /* update cnode's catalog descriptor */
794 (void) replace_desc(cp
, &desc
);
796 return (VNODE_RETURNED
);
800 * Reload all incore data for a filesystem (used after running fsck on
801 * the root filesystem and finding things to fix). The filesystem must
802 * be mounted read-only.
804 * Things to do to update the mount:
805 * invalidate all cached meta-data.
806 * invalidate all inactive vnodes.
807 * invalidate all cached file data.
808 * re-read volume header from disk.
809 * re-load meta-file info (extents, file size).
810 * re-load B-tree header data.
811 * re-read cnode data for all active vnodes.
814 hfs_reload(struct mount
*mountp
)
816 register struct vnode
*devvp
;
819 struct hfsmount
*hfsmp
;
820 struct HFSPlusVolumeHeader
*vhp
;
822 struct filefork
*forkp
;
823 struct cat_desc cndesc
;
824 struct hfs_reload_cargs args
;
825 daddr64_t priIDSector
;
827 hfsmp
= VFSTOHFS(mountp
);
828 vcb
= HFSTOVCB(hfsmp
);
830 if (vcb
->vcbSigWord
== kHFSSigWord
)
831 return (EINVAL
); /* rooting from HFS is not supported! */
834 * Invalidate all cached meta-data.
836 devvp
= hfsmp
->hfs_devvp
;
837 if (buf_invalidateblks(devvp
, 0, 0, 0))
838 panic("hfs_reload: dirty1");
843 * hfs_reload_callback will be called for each vnode
844 * hung off of this mount point that can't be recycled...
845 * vnode_iterate will recycle those that it can (the VNODE_RELOAD option)
846 * the vnode will be in an 'unbusy' state (VNODE_WAIT) and
847 * properly referenced and unreferenced around the callback
849 vnode_iterate(mountp
, VNODE_RELOAD
| VNODE_WAIT
, hfs_reload_callback
, (void *)&args
);
855 * Re-read VolumeHeader from disk.
857 priIDSector
= (daddr64_t
)((vcb
->hfsPlusIOPosOffset
/ hfsmp
->hfs_logical_block_size
) +
858 HFS_PRI_SECTOR(hfsmp
->hfs_logical_block_size
));
860 error
= (int)buf_meta_bread(hfsmp
->hfs_devvp
,
861 HFS_PHYSBLK_ROUNDDOWN(priIDSector
, hfsmp
->hfs_log_per_phys
),
862 hfsmp
->hfs_physical_block_size
, NOCRED
, &bp
);
869 vhp
= (HFSPlusVolumeHeader
*) (buf_dataptr(bp
) + HFS_PRI_OFFSET(hfsmp
->hfs_physical_block_size
));
871 /* Do a quick sanity check */
872 if ((SWAP_BE16(vhp
->signature
) != kHFSPlusSigWord
&&
873 SWAP_BE16(vhp
->signature
) != kHFSXSigWord
) ||
874 (SWAP_BE16(vhp
->version
) != kHFSPlusVersion
&&
875 SWAP_BE16(vhp
->version
) != kHFSXVersion
) ||
876 SWAP_BE32(vhp
->blockSize
) != vcb
->blockSize
) {
881 vcb
->vcbLsMod
= to_bsd_time(SWAP_BE32(vhp
->modifyDate
));
882 vcb
->vcbAtrb
= SWAP_BE32 (vhp
->attributes
);
883 vcb
->vcbJinfoBlock
= SWAP_BE32(vhp
->journalInfoBlock
);
884 vcb
->vcbClpSiz
= SWAP_BE32 (vhp
->rsrcClumpSize
);
885 vcb
->vcbNxtCNID
= SWAP_BE32 (vhp
->nextCatalogID
);
886 vcb
->vcbVolBkUp
= to_bsd_time(SWAP_BE32(vhp
->backupDate
));
887 vcb
->vcbWrCnt
= SWAP_BE32 (vhp
->writeCount
);
888 vcb
->vcbFilCnt
= SWAP_BE32 (vhp
->fileCount
);
889 vcb
->vcbDirCnt
= SWAP_BE32 (vhp
->folderCount
);
890 HFS_UPDATE_NEXT_ALLOCATION(vcb
, SWAP_BE32 (vhp
->nextAllocation
));
891 vcb
->totalBlocks
= SWAP_BE32 (vhp
->totalBlocks
);
892 vcb
->freeBlocks
= SWAP_BE32 (vhp
->freeBlocks
);
893 vcb
->encodingsBitmap
= SWAP_BE64 (vhp
->encodingsBitmap
);
894 bcopy(vhp
->finderInfo
, vcb
->vcbFndrInfo
, sizeof(vhp
->finderInfo
));
895 vcb
->localCreateDate
= SWAP_BE32 (vhp
->createDate
); /* hfs+ create date is in local time */
898 * Re-load meta-file vnode data (extent info, file size, etc).
900 forkp
= VTOF((struct vnode
*)vcb
->extentsRefNum
);
901 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
902 forkp
->ff_extents
[i
].startBlock
=
903 SWAP_BE32 (vhp
->extentsFile
.extents
[i
].startBlock
);
904 forkp
->ff_extents
[i
].blockCount
=
905 SWAP_BE32 (vhp
->extentsFile
.extents
[i
].blockCount
);
907 forkp
->ff_size
= SWAP_BE64 (vhp
->extentsFile
.logicalSize
);
908 forkp
->ff_blocks
= SWAP_BE32 (vhp
->extentsFile
.totalBlocks
);
909 forkp
->ff_clumpsize
= SWAP_BE32 (vhp
->extentsFile
.clumpSize
);
912 forkp
= VTOF((struct vnode
*)vcb
->catalogRefNum
);
913 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
914 forkp
->ff_extents
[i
].startBlock
=
915 SWAP_BE32 (vhp
->catalogFile
.extents
[i
].startBlock
);
916 forkp
->ff_extents
[i
].blockCount
=
917 SWAP_BE32 (vhp
->catalogFile
.extents
[i
].blockCount
);
919 forkp
->ff_size
= SWAP_BE64 (vhp
->catalogFile
.logicalSize
);
920 forkp
->ff_blocks
= SWAP_BE32 (vhp
->catalogFile
.totalBlocks
);
921 forkp
->ff_clumpsize
= SWAP_BE32 (vhp
->catalogFile
.clumpSize
);
923 if (hfsmp
->hfs_attribute_vp
) {
924 forkp
= VTOF(hfsmp
->hfs_attribute_vp
);
925 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
926 forkp
->ff_extents
[i
].startBlock
=
927 SWAP_BE32 (vhp
->attributesFile
.extents
[i
].startBlock
);
928 forkp
->ff_extents
[i
].blockCount
=
929 SWAP_BE32 (vhp
->attributesFile
.extents
[i
].blockCount
);
931 forkp
->ff_size
= SWAP_BE64 (vhp
->attributesFile
.logicalSize
);
932 forkp
->ff_blocks
= SWAP_BE32 (vhp
->attributesFile
.totalBlocks
);
933 forkp
->ff_clumpsize
= SWAP_BE32 (vhp
->attributesFile
.clumpSize
);
936 forkp
= VTOF((struct vnode
*)vcb
->allocationsRefNum
);
937 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
938 forkp
->ff_extents
[i
].startBlock
=
939 SWAP_BE32 (vhp
->allocationFile
.extents
[i
].startBlock
);
940 forkp
->ff_extents
[i
].blockCount
=
941 SWAP_BE32 (vhp
->allocationFile
.extents
[i
].blockCount
);
943 forkp
->ff_size
= SWAP_BE64 (vhp
->allocationFile
.logicalSize
);
944 forkp
->ff_blocks
= SWAP_BE32 (vhp
->allocationFile
.totalBlocks
);
945 forkp
->ff_clumpsize
= SWAP_BE32 (vhp
->allocationFile
.clumpSize
);
951 * Re-load B-tree header data
953 forkp
= VTOF((struct vnode
*)vcb
->extentsRefNum
);
954 if ( (error
= MacToVFSError( BTReloadData((FCB
*)forkp
) )) )
957 forkp
= VTOF((struct vnode
*)vcb
->catalogRefNum
);
958 if ( (error
= MacToVFSError( BTReloadData((FCB
*)forkp
) )) )
961 if (hfsmp
->hfs_attribute_vp
) {
962 forkp
= VTOF(hfsmp
->hfs_attribute_vp
);
963 if ( (error
= MacToVFSError( BTReloadData((FCB
*)forkp
) )) )
967 /* Reload the volume name */
968 if ((error
= cat_idlookup(hfsmp
, kHFSRootFolderID
, 0, 0, &cndesc
, NULL
, NULL
)))
970 vcb
->volumeNameEncodingHint
= cndesc
.cd_encoding
;
971 bcopy(cndesc
.cd_nameptr
, vcb
->vcbVN
, min(255, cndesc
.cd_namelen
));
972 cat_releasedesc(&cndesc
);
974 /* Re-establish private/hidden directories. */
975 hfs_privatedir_init(hfsmp
, FILE_HARDLINKS
);
976 hfs_privatedir_init(hfsmp
, DIR_HARDLINKS
);
978 /* In case any volume information changed to trigger a notification */
979 hfs_generate_volume_notifications(hfsmp
);
985 static uint64_t tv_to_usecs(struct timeval
*tv
)
987 return tv
->tv_sec
* 1000000ULL + tv
->tv_usec
;
990 // Returns TRUE if b - a >= usecs
991 static bool hfs_has_elapsed (const struct timeval
*a
,
992 const struct timeval
*b
,
996 timersub(b
, a
, &diff
);
997 return diff
.tv_sec
* 1000000ULL + diff
.tv_usec
>= usecs
;
1000 void hfs_syncer(void *arg
, __unused wait_result_t wr
)
1002 struct hfsmount
*hfsmp
= arg
;
1005 KDBG(HFSDBG_SYNCER
| DBG_FUNC_START
, obfuscate_addr(hfsmp
));
1007 hfs_syncer_lock(hfsmp
);
1009 while (ISSET(hfsmp
->hfs_flags
, HFS_RUN_SYNCER
)
1010 && timerisset(&hfsmp
->hfs_sync_req_oldest
)) {
1012 hfs_syncer_wait(hfsmp
, &HFS_META_DELAY_TS
);
1014 if (!ISSET(hfsmp
->hfs_flags
, HFS_RUN_SYNCER
)
1015 || !timerisset(&hfsmp
->hfs_sync_req_oldest
)) {
1019 /* Check to see whether we should flush now: either the oldest
1020 is > HFS_MAX_META_DELAY or HFS_META_DELAY has elapsed since
1021 the request and there are no pending writes. */
1024 uint64_t idle_time
= vfs_idle_time(hfsmp
->hfs_mp
);
1026 if (!hfs_has_elapsed(&hfsmp
->hfs_sync_req_oldest
, &now
,
1028 && idle_time
< HFS_META_DELAY
) {
1032 timerclear(&hfsmp
->hfs_sync_req_oldest
);
1034 hfs_syncer_unlock(hfsmp
);
1036 KDBG(HFSDBG_SYNCER_TIMED
| DBG_FUNC_START
, obfuscate_addr(hfsmp
));
1039 * We intentionally do a synchronous flush (of the journal or entire volume) here.
1040 * For journaled volumes, this means we wait until the metadata blocks are written
1041 * to both the journal and their final locations (in the B-trees, etc.).
1043 * This tends to avoid interleaving the metadata writes with other writes (for
1044 * example, user data, or to the journal when a later transaction notices that
1045 * an earlier transaction has finished its async writes, and then updates the
1046 * journal start in the journal header). Avoiding interleaving of writes is
1047 * very good for performance on simple flash devices like SD cards, thumb drives;
1048 * and on devices like floppies. Since removable devices tend to be this kind of
1049 * simple device, doing a synchronous flush actually improves performance in
1052 * NOTE: For non-journaled volumes, the call to hfs_sync will also cause dirty
1053 * user data to be written.
1056 hfs_flush(hfsmp
, HFS_FLUSH_JOURNAL_META
);
1058 hfs_sync(hfsmp
->hfs_mp
, MNT_WAIT
, vfs_context_current());
1061 KDBG(HFSDBG_SYNCER_TIMED
| DBG_FUNC_END
);
1063 hfs_syncer_lock(hfsmp
);
1066 hfsmp
->hfs_syncer_thread
= NULL
;
1067 hfs_syncer_unlock(hfsmp
);
1068 hfs_syncer_wakeup(hfsmp
);
1070 /* BE CAREFUL WHAT YOU ADD HERE: at this point hfs_unmount is free
1071 to continue and therefore hfsmp might be invalid. */
1073 KDBG(HFSDBG_SYNCER
| DBG_FUNC_END
);
1077 * Call into the allocator code and perform a full scan of the bitmap file.
1079 * This allows us to TRIM unallocated ranges if needed, and also to build up
1080 * an in-memory summary table of the state of the allocated blocks.
1082 void hfs_scan_blocks (struct hfsmount
*hfsmp
) {
1084 * Take the allocation file lock. Journal transactions will block until
1088 int flags
= hfs_systemfile_lock(hfsmp
, SFL_BITMAP
, HFS_EXCLUSIVE_LOCK
);
1091 * We serialize here with the HFS mount lock as we're mounting.
1093 * The mount can only proceed once this thread has acquired the bitmap
1094 * lock, since we absolutely do not want someone else racing in and
1095 * getting the bitmap lock, doing a read/write of the bitmap file,
1096 * then us getting the bitmap lock.
1098 * To prevent this, the mount thread takes the HFS mount mutex, starts us
1099 * up, then immediately msleeps on the scan_var variable in the mount
1100 * point as a condition variable. This serialization is safe since
1101 * if we race in and try to proceed while they're still holding the lock,
1102 * we'll block trying to acquire the global lock. Since the mount thread
1103 * acquires the HFS mutex before starting this function in a new thread,
1104 * any lock acquisition on our part must be linearizably AFTER the mount thread's.
1106 * Note that the HFS mount mutex is always taken last, and always for only
1107 * a short time. In this case, we just take it long enough to mark the
1108 * scan-in-flight bit.
1110 (void) hfs_lock_mount (hfsmp
);
1111 hfsmp
->scan_var
|= HFS_ALLOCATOR_SCAN_INFLIGHT
;
1112 wakeup((caddr_t
) &hfsmp
->scan_var
);
1113 hfs_unlock_mount (hfsmp
);
1115 /* Initialize the summary table */
1116 if (hfs_init_summary (hfsmp
)) {
1117 printf("hfs: could not initialize summary table for %s\n", hfsmp
->vcbVN
);
1121 * ScanUnmapBlocks assumes that the bitmap lock is held when you
1122 * call the function. We don't care if there were any errors issuing unmaps.
1124 * It will also attempt to build up the summary table for subsequent
1125 * allocator use, as configured.
1127 (void) ScanUnmapBlocks(hfsmp
);
1129 (void) hfs_lock_mount (hfsmp
);
1130 hfsmp
->scan_var
&= ~HFS_ALLOCATOR_SCAN_INFLIGHT
;
1131 hfsmp
->scan_var
|= HFS_ALLOCATOR_SCAN_COMPLETED
;
1132 wakeup((caddr_t
) &hfsmp
->scan_var
);
1133 hfs_unlock_mount (hfsmp
);
1135 buf_invalidateblks(hfsmp
->hfs_allocation_vp
, 0, 0, 0);
1137 hfs_systemfile_unlock(hfsmp
, flags
);
1142 * Common code for mount and mountroot
1145 hfs_mountfs(struct vnode
*devvp
, struct mount
*mp
, struct hfs_mount_args
*args
,
1146 int journal_replay_only
, vfs_context_t context
)
1148 struct proc
*p
= vfs_context_proc(context
);
1149 int retval
= E_NONE
;
1150 struct hfsmount
*hfsmp
= NULL
;
1153 HFSMasterDirectoryBlock
*mdbp
= NULL
;
1161 daddr64_t log_blkcnt
;
1162 u_int32_t log_blksize
;
1163 u_int32_t phys_blksize
;
1164 u_int32_t minblksize
;
1165 u_int32_t iswritable
;
1166 daddr64_t mdb_offset
;
1168 int isroot
= !journal_replay_only
&& args
== NULL
;
1169 u_int32_t device_features
= 0;
1172 ronly
= mp
&& vfs_isrdonly(mp
);
1173 dev
= vnode_specrdev(devvp
);
1174 cred
= p
? vfs_context_ucred(context
) : NOCRED
;
1180 minblksize
= kHFSBlockSize
;
1182 /* Advisory locking should be handled at the VFS layer */
1184 vfs_setlocklocal(mp
);
1186 /* Get the logical block size (treated as physical block size everywhere) */
1187 if (VNOP_IOCTL(devvp
, DKIOCGETBLOCKSIZE
, (caddr_t
)&log_blksize
, 0, context
)) {
1188 if (HFS_MOUNT_DEBUG
) {
1189 printf("hfs_mountfs: DKIOCGETBLOCKSIZE failed\n");
1194 if (log_blksize
== 0 || log_blksize
> 1024*1024*1024) {
1195 printf("hfs: logical block size 0x%x looks bad. Not mounting.\n", log_blksize
);
1200 /* Get the physical block size. */
1201 retval
= VNOP_IOCTL(devvp
, DKIOCGETPHYSICALBLOCKSIZE
, (caddr_t
)&phys_blksize
, 0, context
);
1203 if ((retval
!= ENOTSUP
) && (retval
!= ENOTTY
)) {
1204 if (HFS_MOUNT_DEBUG
) {
1205 printf("hfs_mountfs: DKIOCGETPHYSICALBLOCKSIZE failed\n");
1210 /* If device does not support this ioctl, assume that physical
1211 * block size is same as logical block size
1213 phys_blksize
= log_blksize
;
1215 if (phys_blksize
== 0 || phys_blksize
> MAXBSIZE
) {
1216 printf("hfs: physical block size 0x%x looks bad. Not mounting.\n", phys_blksize
);
1221 if (phys_blksize
< log_blksize
) {
1223 * In the off chance that the phys_blksize is SMALLER than the logical
1224 * then don't let that happen. Pretend that the PHYSICALBLOCKSIZE
1225 * ioctl was not supported.
1227 phys_blksize
= log_blksize
;
1231 /* Switch to 512 byte sectors (temporarily) */
1232 if (log_blksize
> 512) {
1233 u_int32_t size512
= 512;
1235 if (VNOP_IOCTL(devvp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&size512
, FWRITE
, context
)) {
1236 if (HFS_MOUNT_DEBUG
) {
1237 printf("hfs_mountfs: DKIOCSETBLOCKSIZE failed \n");
1243 /* Get the number of 512 byte physical blocks. */
1244 if (VNOP_IOCTL(devvp
, DKIOCGETBLOCKCOUNT
, (caddr_t
)&log_blkcnt
, 0, context
)) {
1245 /* resetting block size may fail if getting block count did */
1246 (void)VNOP_IOCTL(devvp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&log_blksize
, FWRITE
, context
);
1247 if (HFS_MOUNT_DEBUG
) {
1248 printf("hfs_mountfs: DKIOCGETBLOCKCOUNT failed\n");
1253 /* Compute an accurate disk size (i.e. within 512 bytes) */
1254 disksize
= (u_int64_t
)log_blkcnt
* (u_int64_t
)512;
1257 * On Tiger it is not necessary to switch the device
1258 * block size to be 4k if there are more than 31-bits
1259 * worth of blocks but to insure compatibility with
1260 * pre-Tiger systems we have to do it.
1262 * If the device size is not a multiple of 4K (8 * 512), then
1263 * switching the logical block size isn't going to help because
1264 * we will be unable to write the alternate volume header.
1265 * In this case, just leave the logical block size unchanged.
1267 if (log_blkcnt
> 0x000000007fffffff && (log_blkcnt
& 7) == 0) {
1268 minblksize
= log_blksize
= 4096;
1269 if (phys_blksize
< log_blksize
)
1270 phys_blksize
= log_blksize
;
1274 * The cluster layer is not currently prepared to deal with a logical
1275 * block size larger than the system's page size. (It can handle
1276 * blocks per page, but not multiple pages per block.) So limit the
1277 * logical block size to the page size.
1279 if (log_blksize
> PAGE_SIZE
) {
1280 log_blksize
= PAGE_SIZE
;
1283 /* Now switch to our preferred physical block size. */
1284 if (log_blksize
> 512) {
1285 if (VNOP_IOCTL(devvp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&log_blksize
, FWRITE
, context
)) {
1286 if (HFS_MOUNT_DEBUG
) {
1287 printf("hfs_mountfs: DKIOCSETBLOCKSIZE (2) failed\n");
1292 /* Get the count of physical blocks. */
1293 if (VNOP_IOCTL(devvp
, DKIOCGETBLOCKCOUNT
, (caddr_t
)&log_blkcnt
, 0, context
)) {
1294 if (HFS_MOUNT_DEBUG
) {
1295 printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (2) failed\n");
1304 * minblksize is the minimum physical block size
1305 * log_blksize has our preferred physical block size
1306 * log_blkcnt has the total number of physical blocks
1309 mdb_offset
= (daddr64_t
)HFS_PRI_SECTOR(log_blksize
);
1311 if ((retval
= (int)buf_meta_bread(devvp
,
1312 HFS_PHYSBLK_ROUNDDOWN(mdb_offset
, (phys_blksize
/log_blksize
)),
1313 phys_blksize
, cred
, &bp
))) {
1314 if (HFS_MOUNT_DEBUG
) {
1315 printf("hfs_mountfs: buf_meta_bread failed with %d\n", retval
);
1319 mdbp
= hfs_malloc(kMDBSize
);
1320 bcopy((char *)buf_dataptr(bp
) + HFS_PRI_OFFSET(phys_blksize
), mdbp
, kMDBSize
);
1324 hfsmp
= hfs_mallocz(sizeof(struct hfsmount
));
1326 hfs_chashinit_finish(hfsmp
);
1328 /* Init the ID lookup hashtable */
1329 hfs_idhash_init (hfsmp
);
1332 * See if the disk supports unmap (trim).
1334 * NOTE: vfs_init_io_attributes has not been called yet, so we can't use the io_flags field
1335 * returned by vfs_ioattr. We need to call VNOP_IOCTL ourselves.
1337 if (VNOP_IOCTL(devvp
, DKIOCGETFEATURES
, (caddr_t
)&device_features
, 0, context
) == 0) {
1338 if (device_features
& DK_FEATURE_UNMAP
) {
1339 hfsmp
->hfs_flags
|= HFS_UNMAP
;
1342 if(device_features
& DK_FEATURE_BARRIER
)
1343 hfsmp
->hfs_flags
|= HFS_FEATURE_BARRIER
;
1347 * See if the disk is a solid state device, too. We need this to decide what to do about
1350 if (VNOP_IOCTL(devvp
, DKIOCISSOLIDSTATE
, (caddr_t
)&isssd
, 0, context
) == 0) {
1352 hfsmp
->hfs_flags
|= HFS_SSD
;
1356 /* See if the underlying device is Core Storage or not */
1357 dk_corestorage_info_t cs_info
;
1358 memset(&cs_info
, 0, sizeof(dk_corestorage_info_t
));
1359 if (VNOP_IOCTL(devvp
, DKIOCCORESTORAGE
, (caddr_t
)&cs_info
, 0, context
) == 0) {
1360 hfsmp
->hfs_flags
|= HFS_CS
;
1361 if (isroot
&& (cs_info
.flags
& DK_CORESTORAGE_PIN_YOUR_METADATA
)) {
1362 hfsmp
->hfs_flags
|= HFS_CS_METADATA_PIN
;
1364 if (isroot
&& (cs_info
.flags
& DK_CORESTORAGE_ENABLE_HOTFILES
)) {
1365 hfsmp
->hfs_flags
|= HFS_CS_HOTFILE_PIN
;
1366 hfsmp
->hfs_cs_hotfile_size
= cs_info
.hotfile_size
;
1368 if ((cs_info
.flags
& DK_CORESTORAGE_PIN_YOUR_SWAPFILE
)) {
1369 hfsmp
->hfs_flags
|= HFS_CS_SWAPFILE_PIN
;
1371 struct vfsioattr ioattr
;
1372 vfs_ioattr(mp
, &ioattr
);
1373 ioattr
.io_flags
|= VFS_IOATTR_FLAGS_SWAPPIN_SUPPORTED
;
1374 ioattr
.io_max_swappin_available
= cs_info
.swapfile_pinning
;
1375 vfs_setioattr(mp
, &ioattr
);
1380 * Init the volume information structure
1383 lck_mtx_init(&hfsmp
->hfs_mutex
, hfs_mutex_group
, hfs_lock_attr
);
1384 lck_mtx_init(&hfsmp
->hfc_mutex
, hfs_mutex_group
, hfs_lock_attr
);
1385 lck_rw_init(&hfsmp
->hfs_global_lock
, hfs_rwlock_group
, hfs_lock_attr
);
1386 lck_spin_init(&hfsmp
->vcbFreeExtLock
, hfs_spinlock_group
, hfs_lock_attr
);
1388 lck_spin_init(&hfsmp
->hfs_xattr_io
.lock
, hfs_spinlock_group
, hfs_lock_attr
);
1392 vfs_setfsprivate(mp
, hfsmp
);
1393 hfsmp
->hfs_mp
= mp
; /* Make VFSTOHFS work */
1394 hfsmp
->hfs_raw_dev
= vnode_specrdev(devvp
);
1395 hfsmp
->hfs_devvp
= devvp
;
1396 vnode_ref(devvp
); /* Hold a ref on the device, dropped when hfsmp is freed. */
1397 hfsmp
->hfs_logical_block_size
= log_blksize
;
1398 hfsmp
->hfs_logical_block_count
= log_blkcnt
;
1399 hfsmp
->hfs_logical_bytes
= (uint64_t) log_blksize
* (uint64_t) log_blkcnt
;
1400 hfsmp
->hfs_physical_block_size
= phys_blksize
;
1401 hfsmp
->hfs_log_per_phys
= (phys_blksize
/ log_blksize
);
1402 hfsmp
->hfs_flags
|= HFS_WRITEABLE_MEDIA
;
1404 hfsmp
->hfs_flags
|= HFS_READ_ONLY
;
1405 if (mp
&& ((unsigned int)vfs_flags(mp
)) & MNT_UNKNOWNPERMISSIONS
)
1406 hfsmp
->hfs_flags
|= HFS_UNKNOWN_PERMS
;
1409 for (i
= 0; i
< MAXQUOTAS
; i
++)
1410 dqfileinit(&hfsmp
->hfs_qfiles
[i
]);
1414 hfsmp
->hfs_uid
= (args
->hfs_uid
== (uid_t
)VNOVAL
) ? UNKNOWNUID
: args
->hfs_uid
;
1415 if (hfsmp
->hfs_uid
== 0xfffffffd) hfsmp
->hfs_uid
= UNKNOWNUID
;
1416 hfsmp
->hfs_gid
= (args
->hfs_gid
== (gid_t
)VNOVAL
) ? UNKNOWNGID
: args
->hfs_gid
;
1417 if (hfsmp
->hfs_gid
== 0xfffffffd) hfsmp
->hfs_gid
= UNKNOWNGID
;
1418 vfs_setowner(mp
, hfsmp
->hfs_uid
, hfsmp
->hfs_gid
); /* tell the VFS */
1419 if (args
->hfs_mask
!= (mode_t
)VNOVAL
) {
1420 hfsmp
->hfs_dir_mask
= args
->hfs_mask
& ALLPERMS
;
1421 if (args
->flags
& HFSFSMNT_NOXONFILES
) {
1422 hfsmp
->hfs_file_mask
= (args
->hfs_mask
& DEFFILEMODE
);
1424 hfsmp
->hfs_file_mask
= args
->hfs_mask
& ALLPERMS
;
1427 hfsmp
->hfs_dir_mask
= UNKNOWNPERMISSIONS
& ALLPERMS
; /* 0777: rwx---rwx */
1428 hfsmp
->hfs_file_mask
= UNKNOWNPERMISSIONS
& DEFFILEMODE
; /* 0666: no --x by default? */
1430 if ((args
->flags
!= (int)VNOVAL
) && (args
->flags
& HFSFSMNT_WRAPPER
))
1433 /* Even w/o explicit mount arguments, MNT_UNKNOWNPERMISSIONS requires setting up uid, gid, and mask: */
1434 if (mp
&& ((unsigned int)vfs_flags(mp
)) & MNT_UNKNOWNPERMISSIONS
) {
1435 hfsmp
->hfs_uid
= UNKNOWNUID
;
1436 hfsmp
->hfs_gid
= UNKNOWNGID
;
1437 vfs_setowner(mp
, hfsmp
->hfs_uid
, hfsmp
->hfs_gid
); /* tell the VFS */
1438 hfsmp
->hfs_dir_mask
= UNKNOWNPERMISSIONS
& ALLPERMS
; /* 0777: rwx---rwx */
1439 hfsmp
->hfs_file_mask
= UNKNOWNPERMISSIONS
& DEFFILEMODE
; /* 0666: no --x by default? */
1443 /* Find out if disk media is writable. */
1444 if (VNOP_IOCTL(devvp
, DKIOCISWRITABLE
, (caddr_t
)&iswritable
, 0, context
) == 0) {
1446 hfsmp
->hfs_flags
|= HFS_WRITEABLE_MEDIA
;
1448 hfsmp
->hfs_flags
&= ~HFS_WRITEABLE_MEDIA
;
1452 rl_init(&hfsmp
->hfs_reserved_ranges
[0]);
1453 rl_init(&hfsmp
->hfs_reserved_ranges
[1]);
1455 // record the current time at which we're mounting this volume
1458 hfsmp
->hfs_mount_time
= tv
.tv_sec
;
1460 /* Mount a standard HFS disk */
1461 if ((SWAP_BE16(mdbp
->drSigWord
) == kHFSSigWord
) &&
1462 (mntwrapper
|| (SWAP_BE16(mdbp
->drEmbedSigWord
) != kHFSPlusSigWord
))) {
1464 /* If only journal replay is requested, exit immediately */
1465 if (journal_replay_only
) {
1470 /* On 10.6 and beyond, non read-only mounts for HFS standard vols get rejected */
1471 if (vfs_isrdwr(mp
)) {
1476 printf("hfs_mountfs: Mounting HFS Standard volumes was deprecated in Mac OS 10.7 \n");
1478 /* Treat it as if it's read-only and not writeable */
1479 hfsmp
->hfs_flags
|= HFS_READ_ONLY
;
1480 hfsmp
->hfs_flags
&= ~HFS_WRITEABLE_MEDIA
;
1482 if ((vfs_flags(mp
) & MNT_ROOTFS
)) {
1483 retval
= EINVAL
; /* Cannot root from HFS standard disks */
1486 /* HFS disks can only use 512 byte physical blocks */
1487 if (log_blksize
> kHFSBlockSize
) {
1488 log_blksize
= kHFSBlockSize
;
1489 if (VNOP_IOCTL(devvp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&log_blksize
, FWRITE
, context
)) {
1493 if (VNOP_IOCTL(devvp
, DKIOCGETBLOCKCOUNT
, (caddr_t
)&log_blkcnt
, 0, context
)) {
1497 hfsmp
->hfs_logical_block_size
= log_blksize
;
1498 hfsmp
->hfs_logical_block_count
= log_blkcnt
;
1499 hfsmp
->hfs_logical_bytes
= (uint64_t) log_blksize
* (uint64_t) log_blkcnt
;
1500 hfsmp
->hfs_physical_block_size
= log_blksize
;
1501 hfsmp
->hfs_log_per_phys
= 1;
1504 hfsmp
->hfs_encoding
= args
->hfs_encoding
;
1505 HFSTOVCB(hfsmp
)->volumeNameEncodingHint
= args
->hfs_encoding
;
1507 /* establish the timezone */
1508 gTimeZone
= args
->hfs_timezone
;
1511 retval
= hfs_getconverter(hfsmp
->hfs_encoding
, &hfsmp
->hfs_get_unicode
,
1512 &hfsmp
->hfs_get_hfsname
);
1516 retval
= hfs_MountHFSVolume(hfsmp
, mdbp
, p
);
1518 (void) hfs_relconverter(hfsmp
->hfs_encoding
);
1520 /* On platforms where HFS Standard is not supported, deny the mount altogether */
1526 else { /* Mount an HFS Plus disk */
1527 HFSPlusVolumeHeader
*vhp
;
1528 off_t embeddedOffset
;
1529 int jnl_disable
= 0;
1531 /* Get the embedded Volume Header */
1532 if (SWAP_BE16(mdbp
->drEmbedSigWord
) == kHFSPlusSigWord
) {
1533 embeddedOffset
= SWAP_BE16(mdbp
->drAlBlSt
) * kHFSBlockSize
;
1534 embeddedOffset
+= (u_int64_t
)SWAP_BE16(mdbp
->drEmbedExtent
.startBlock
) *
1535 (u_int64_t
)SWAP_BE32(mdbp
->drAlBlkSiz
);
1538 * Cooperative Fusion is not allowed on embedded HFS+
1539 * filesystems (HFS+ inside HFS standard wrapper)
1541 hfsmp
->hfs_flags
&= ~HFS_CS_METADATA_PIN
;
1544 * If the embedded volume doesn't start on a block
1545 * boundary, then switch the device to a 512-byte
1546 * block size so everything will line up on a block
1549 if ((embeddedOffset
% log_blksize
) != 0) {
1550 printf("hfs_mountfs: embedded volume offset not"
1551 " a multiple of physical block size (%d);"
1552 " switching to 512\n", log_blksize
);
1554 if (VNOP_IOCTL(devvp
, DKIOCSETBLOCKSIZE
,
1555 (caddr_t
)&log_blksize
, FWRITE
, context
)) {
1557 if (HFS_MOUNT_DEBUG
) {
1558 printf("hfs_mountfs: DKIOCSETBLOCKSIZE (3) failed\n");
1563 if (VNOP_IOCTL(devvp
, DKIOCGETBLOCKCOUNT
,
1564 (caddr_t
)&log_blkcnt
, 0, context
)) {
1565 if (HFS_MOUNT_DEBUG
) {
1566 printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (3) failed\n");
1571 /* Note: relative block count adjustment */
1572 hfsmp
->hfs_logical_block_count
*=
1573 hfsmp
->hfs_logical_block_size
/ log_blksize
;
1575 /* Update logical /physical block size */
1576 hfsmp
->hfs_logical_block_size
= log_blksize
;
1577 hfsmp
->hfs_physical_block_size
= log_blksize
;
1579 phys_blksize
= log_blksize
;
1580 hfsmp
->hfs_log_per_phys
= 1;
1583 disksize
= (u_int64_t
)SWAP_BE16(mdbp
->drEmbedExtent
.blockCount
) *
1584 (u_int64_t
)SWAP_BE32(mdbp
->drAlBlkSiz
);
1586 hfsmp
->hfs_logical_block_count
= disksize
/ log_blksize
;
1588 hfsmp
->hfs_logical_bytes
= (uint64_t) hfsmp
->hfs_logical_block_count
* (uint64_t) hfsmp
->hfs_logical_block_size
;
1590 mdb_offset
= (daddr64_t
)((embeddedOffset
/ log_blksize
) + HFS_PRI_SECTOR(log_blksize
));
1593 buf_markinvalid(bp
);
1597 retval
= (int)buf_meta_bread(devvp
, HFS_PHYSBLK_ROUNDDOWN(mdb_offset
, hfsmp
->hfs_log_per_phys
),
1598 phys_blksize
, cred
, &bp
);
1600 if (HFS_MOUNT_DEBUG
) {
1601 printf("hfs_mountfs: buf_meta_bread (2) failed with %d\n", retval
);
1605 bcopy((char *)buf_dataptr(bp
) + HFS_PRI_OFFSET(phys_blksize
), mdbp
, 512);
1608 vhp
= (HFSPlusVolumeHeader
*) mdbp
;
1611 else { /* pure HFS+ */
1613 vhp
= (HFSPlusVolumeHeader
*) mdbp
;
1616 retval
= hfs_ValidateHFSPlusVolumeHeader(hfsmp
, vhp
);
1621 * If allocation block size is less than the physical block size,
1622 * invalidate the buffer read in using native physical block size
1623 * to ensure data consistency.
1625 * HFS Plus reserves one allocation block for the Volume Header.
1626 * If the physical size is larger, then when we read the volume header,
1627 * we will also end up reading in the next allocation block(s).
1628 * If those other allocation block(s) is/are modified, and then the volume
1629 * header is modified, the write of the volume header's buffer will write
1630 * out the old contents of the other allocation blocks.
1632 * We assume that the physical block size is same as logical block size.
1633 * The physical block size value is used to round down the offsets for
1634 * reading and writing the primary and alternate volume headers.
1636 * The same logic is also in hfs_MountHFSPlusVolume to ensure that
1637 * hfs_mountfs, hfs_MountHFSPlusVolume and later are doing the I/Os
1638 * using same block size.
1640 if (SWAP_BE32(vhp
->blockSize
) < hfsmp
->hfs_physical_block_size
) {
1641 phys_blksize
= hfsmp
->hfs_logical_block_size
;
1642 hfsmp
->hfs_physical_block_size
= hfsmp
->hfs_logical_block_size
;
1643 hfsmp
->hfs_log_per_phys
= 1;
1644 // There should be one bp associated with devvp in buffer cache.
1645 retval
= buf_invalidateblks(devvp
, 0, 0, 0);
1650 if (isroot
&& ((SWAP_BE32(vhp
->attributes
) & kHFSVolumeUnmountedMask
) != 0)) {
1651 vfs_set_root_unmounted_cleanly();
1655 * On inconsistent disks, do not allow read-write mount
1656 * unless it is the boot volume being mounted. We also
1657 * always want to replay the journal if the journal_replay_only
1658 * flag is set because that will (most likely) get the
1659 * disk into a consistent state before fsck_hfs starts
1662 if (!journal_replay_only
1663 && !(vfs_flags(mp
) & MNT_ROOTFS
)
1664 && (SWAP_BE32(vhp
->attributes
) & kHFSVolumeInconsistentMask
)
1665 && !(hfsmp
->hfs_flags
& HFS_READ_ONLY
)) {
1667 if (HFS_MOUNT_DEBUG
) {
1668 printf("hfs_mountfs: failed to mount non-root inconsistent disk\n");
1679 if (args
!= NULL
&& (args
->flags
& HFSFSMNT_EXTENDED_ARGS
) &&
1680 args
->journal_disable
) {
1685 // We only initialize the journal here if the last person
1686 // to mount this volume was journaling aware. Otherwise
1687 // we delay journal initialization until later at the end
1688 // of hfs_MountHFSPlusVolume() because the last person who
1689 // mounted it could have messed things up behind our back
1690 // (so we need to go find the .journal file, make sure it's
1691 // the right size, re-sync up if it was moved, etc).
1693 if ( (SWAP_BE32(vhp
->lastMountedVersion
) == kHFSJMountVersion
)
1694 && (SWAP_BE32(vhp
->attributes
) & kHFSVolumeJournaledMask
)
1697 // if we're able to init the journal, mark the mount
1698 // point as journaled.
1700 if ((retval
= hfs_early_journal_init(hfsmp
, vhp
, args
, embeddedOffset
, mdb_offset
, mdbp
, cred
)) == 0) {
1702 vfs_setflags(mp
, (u_int64_t
)((unsigned int)MNT_JOURNALED
));
1704 if (retval
== EROFS
) {
1705 // EROFS is a special error code that means the volume has an external
1706 // journal which we couldn't find. in that case we do not want to
1707 // rewrite the volume header - we'll just refuse to mount the volume.
1708 if (HFS_MOUNT_DEBUG
) {
1709 printf("hfs_mountfs: hfs_early_journal_init indicated external jnl \n");
1715 // if the journal failed to open, then set the lastMountedVersion
1716 // to be "FSK!" which fsck_hfs will see and force the fsck instead
1717 // of just bailing out because the volume is journaled.
1719 if (HFS_MOUNT_DEBUG
) {
1720 printf("hfs_mountfs: hfs_early_journal_init failed, setting to FSK \n");
1723 HFSPlusVolumeHeader
*jvhp
;
1725 hfsmp
->hfs_flags
|= HFS_NEED_JNL_RESET
;
1727 if (mdb_offset
== 0) {
1728 mdb_offset
= (daddr64_t
)((embeddedOffset
/ log_blksize
) + HFS_PRI_SECTOR(log_blksize
));
1732 retval
= (int)buf_meta_bread(devvp
,
1733 HFS_PHYSBLK_ROUNDDOWN(mdb_offset
, hfsmp
->hfs_log_per_phys
),
1734 phys_blksize
, cred
, &bp
);
1736 jvhp
= (HFSPlusVolumeHeader
*)(buf_dataptr(bp
) + HFS_PRI_OFFSET(phys_blksize
));
1738 if (SWAP_BE16(jvhp
->signature
) == kHFSPlusSigWord
|| SWAP_BE16(jvhp
->signature
) == kHFSXSigWord
) {
1739 printf ("hfs(1): Journal replay fail. Writing lastMountVersion as FSK!\n");
1740 jvhp
->lastMountedVersion
= SWAP_BE32(kFSKMountVersion
);
1748 // clear this so the error exit path won't try to use it
1753 // if this isn't the root device just bail out.
1754 // If it is the root device we just continue on
1755 // in the hopes that fsck_hfs will be able to
1756 // fix any damage that exists on the volume.
1757 if (mp
&& !(vfs_flags(mp
) & MNT_ROOTFS
)) {
1758 if (HFS_MOUNT_DEBUG
) {
1759 printf("hfs_mountfs: hfs_early_journal_init failed, erroring out \n");
1767 /* Either the journal is replayed successfully, or there
1768 * was nothing to replay, or no journal exists. In any case,
1771 if (journal_replay_only
) {
1777 (void) hfs_getconverter(0, &hfsmp
->hfs_get_unicode
, &hfsmp
->hfs_get_hfsname
);
1780 retval
= hfs_MountHFSPlusVolume(hfsmp
, vhp
, embeddedOffset
, disksize
, p
, args
, cred
);
1782 * If the backend didn't like our physical blocksize
1783 * then retry with physical blocksize of 512.
1785 if ((retval
== ENXIO
) && (log_blksize
> 512) && (log_blksize
!= minblksize
)) {
1786 printf("hfs_mountfs: could not use physical block size "
1787 "(%d) switching to 512\n", log_blksize
);
1789 if (VNOP_IOCTL(devvp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&log_blksize
, FWRITE
, context
)) {
1790 if (HFS_MOUNT_DEBUG
) {
1791 printf("hfs_mountfs: DKIOCSETBLOCKSIZE (4) failed \n");
1796 if (VNOP_IOCTL(devvp
, DKIOCGETBLOCKCOUNT
, (caddr_t
)&log_blkcnt
, 0, context
)) {
1797 if (HFS_MOUNT_DEBUG
) {
1798 printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (4) failed \n");
1803 set_fsblocksize(devvp
);
1804 /* Note: relative block count adjustment (in case this is an embedded volume). */
1805 hfsmp
->hfs_logical_block_count
*= hfsmp
->hfs_logical_block_size
/ log_blksize
;
1806 hfsmp
->hfs_logical_block_size
= log_blksize
;
1807 hfsmp
->hfs_log_per_phys
= hfsmp
->hfs_physical_block_size
/ log_blksize
;
1809 hfsmp
->hfs_logical_bytes
= (uint64_t) hfsmp
->hfs_logical_block_count
* (uint64_t) hfsmp
->hfs_logical_block_size
;
1811 if (hfsmp
->jnl
&& hfsmp
->jvp
== devvp
) {
1812 // close and re-open this with the new block size
1813 journal_close(hfsmp
->jnl
);
1815 if (hfs_early_journal_init(hfsmp
, vhp
, args
, embeddedOffset
, mdb_offset
, mdbp
, cred
) == 0) {
1816 vfs_setflags(mp
, (u_int64_t
)((unsigned int)MNT_JOURNALED
));
1818 // if the journal failed to open, then set the lastMountedVersion
1819 // to be "FSK!" which fsck_hfs will see and force the fsck instead
1820 // of just bailing out because the volume is journaled.
1822 if (HFS_MOUNT_DEBUG
) {
1823 printf("hfs_mountfs: hfs_early_journal_init (2) resetting.. \n");
1825 HFSPlusVolumeHeader
*jvhp
;
1827 hfsmp
->hfs_flags
|= HFS_NEED_JNL_RESET
;
1829 if (mdb_offset
== 0) {
1830 mdb_offset
= (daddr64_t
)((embeddedOffset
/ log_blksize
) + HFS_PRI_SECTOR(log_blksize
));
1834 retval
= (int)buf_meta_bread(devvp
, HFS_PHYSBLK_ROUNDDOWN(mdb_offset
, hfsmp
->hfs_log_per_phys
),
1835 phys_blksize
, cred
, &bp
);
1837 jvhp
= (HFSPlusVolumeHeader
*)(buf_dataptr(bp
) + HFS_PRI_OFFSET(phys_blksize
));
1839 if (SWAP_BE16(jvhp
->signature
) == kHFSPlusSigWord
|| SWAP_BE16(jvhp
->signature
) == kHFSXSigWord
) {
1840 printf ("hfs(2): Journal replay fail. Writing lastMountVersion as FSK!\n");
1841 jvhp
->lastMountedVersion
= SWAP_BE32(kFSKMountVersion
);
1849 // clear this so the error exit path won't try to use it
1854 // if this isn't the root device just bail out.
1855 // If it is the root device we just continue on
1856 // in the hopes that fsck_hfs will be able to
1857 // fix any damage that exists on the volume.
1858 if ( !(vfs_flags(mp
) & MNT_ROOTFS
)) {
1859 if (HFS_MOUNT_DEBUG
) {
1860 printf("hfs_mountfs: hfs_early_journal_init (2) failed \n");
1868 /* Try again with a smaller block size... */
1869 retval
= hfs_MountHFSPlusVolume(hfsmp
, vhp
, embeddedOffset
, disksize
, p
, args
, cred
);
1870 if (retval
&& HFS_MOUNT_DEBUG
) {
1871 printf("hfs_MountHFSPlusVolume (late) returned %d\n",retval
);
1876 (void) hfs_relconverter(0);
1880 // save off a snapshot of the mtime from the previous mount
1882 hfsmp
->hfs_last_mounted_mtime
= hfsmp
->hfs_mtime
;
1885 if (HFS_MOUNT_DEBUG
) {
1886 printf("hfs_mountfs: encountered failure %d \n", retval
);
1891 struct vfsstatfs
*vsfs
= vfs_statfs(mp
);
1892 vsfs
->f_fsid
.val
[0] = dev
;
1893 vsfs
->f_fsid
.val
[1] = vfs_typenum(mp
);
1895 vfs_setmaxsymlen(mp
, 0);
1898 if (ISSET(hfsmp
->hfs_flags
, HFS_STANDARD
)) {
1899 /* HFS standard doesn't support extended readdir! */
1900 mount_set_noreaddirext (mp
);
1906 * Set the free space warning levels for a non-root volume:
1908 * Set the "danger" limit to 1% of the volume size or 150MB, whichever is less.
1909 * Set the "warning" limit to 2% of the volume size or 500MB, whichever is less.
1910 * Set the "near warning" limit to 10% of the volume size or 1GB, whichever is less.
1911 * And last, set the "desired" freespace level to to 12% of the volume size or 1.2GB,
1912 * whichever is less.
1914 hfsmp
->hfs_freespace_notify_dangerlimit
=
1915 MIN(HFS_VERYLOWDISKTRIGGERLEVEL
/ HFSTOVCB(hfsmp
)->blockSize
,
1916 (HFSTOVCB(hfsmp
)->totalBlocks
/ 100) * HFS_VERYLOWDISKTRIGGERFRACTION
);
1917 hfsmp
->hfs_freespace_notify_warninglimit
=
1918 MIN(HFS_LOWDISKTRIGGERLEVEL
/ HFSTOVCB(hfsmp
)->blockSize
,
1919 (HFSTOVCB(hfsmp
)->totalBlocks
/ 100) * HFS_LOWDISKTRIGGERFRACTION
);
1920 hfsmp
->hfs_freespace_notify_nearwarninglimit
=
1921 MIN(HFS_NEARLOWDISKTRIGGERLEVEL
/ HFSTOVCB(hfsmp
)->blockSize
,
1922 (HFSTOVCB(hfsmp
)->totalBlocks
/ 100) * HFS_NEARLOWDISKTRIGGERFRACTION
);
1923 hfsmp
->hfs_freespace_notify_desiredlevel
=
1924 MIN(HFS_LOWDISKSHUTOFFLEVEL
/ HFSTOVCB(hfsmp
)->blockSize
,
1925 (HFSTOVCB(hfsmp
)->totalBlocks
/ 100) * HFS_LOWDISKSHUTOFFFRACTION
);
1928 * Set the free space warning levels for the root volume:
1930 * Set the "danger" limit to 5% of the volume size or 512MB, whichever is less.
1931 * Set the "warning" limit to 10% of the volume size or 1GB, whichever is less.
1932 * Set the "near warning" limit to 10.5% of the volume size or 1.1GB, whichever is less.
1933 * And last, set the "desired" freespace level to to 11% of the volume size or 1.25GB,
1934 * whichever is less.
1936 * NOTE: While those are the default limits, KernelEventAgent (as of 3/2016)
1937 * will unilaterally override these to the following on OSX only:
1939 * Warning: Min (2% of root volume, 10GB), with a floor of 10GB
1940 * Desired: Warning Threshold + 1.5GB
1942 hfsmp
->hfs_freespace_notify_dangerlimit
=
1943 MIN(HFS_ROOTVERYLOWDISKTRIGGERLEVEL
/ HFSTOVCB(hfsmp
)->blockSize
,
1944 (HFSTOVCB(hfsmp
)->totalBlocks
/ 100) * HFS_ROOTVERYLOWDISKTRIGGERFRACTION
);
1945 hfsmp
->hfs_freespace_notify_warninglimit
=
1946 MIN(HFS_ROOTLOWDISKTRIGGERLEVEL
/ HFSTOVCB(hfsmp
)->blockSize
,
1947 (HFSTOVCB(hfsmp
)->totalBlocks
/ 100) * HFS_ROOTLOWDISKTRIGGERFRACTION
);
1948 hfsmp
->hfs_freespace_notify_nearwarninglimit
=
1949 MIN(HFS_ROOTNEARLOWDISKTRIGGERLEVEL
/ HFSTOVCB(hfsmp
)->blockSize
,
1950 (HFSTOVCB(hfsmp
)->totalBlocks
/ 100) * HFS_ROOTNEARLOWDISKTRIGGERFRACTION
);
1951 hfsmp
->hfs_freespace_notify_desiredlevel
=
1952 MIN(HFS_ROOTLOWDISKSHUTOFFLEVEL
/ HFSTOVCB(hfsmp
)->blockSize
,
1953 (HFSTOVCB(hfsmp
)->totalBlocks
/ 100) * HFS_ROOTLOWDISKSHUTOFFFRACTION
);
1956 /* Check if the file system exists on virtual device, like disk image */
1957 if (VNOP_IOCTL(devvp
, DKIOCISVIRTUAL
, (caddr_t
)&isvirtual
, 0, context
) == 0) {
1959 hfsmp
->hfs_flags
|= HFS_VIRTUAL_DEVICE
;
1964 && !ISSET(hfsmp
->hfs_flags
, HFS_VIRTUAL_DEVICE
)
1965 && hfs_is_ejectable(vfs_statfs(mp
)->f_mntfromname
)) {
1966 SET(hfsmp
->hfs_flags
, HFS_RUN_SYNCER
);
1969 const char *dev_name
= (hfsmp
->hfs_devvp
1970 ? vnode_getname_printable(hfsmp
->hfs_devvp
) : NULL
);
1972 printf("hfs: mounted %s on device %s\n",
1973 (hfsmp
->vcbVN
[0] ? (const char*) hfsmp
->vcbVN
: "unknown"),
1974 dev_name
?: "unknown device");
1977 vnode_putname_printable(dev_name
);
1980 * Start looking for free space to drop below this level and generate a
1981 * warning immediately if needed:
1983 hfsmp
->hfs_notification_conditions
= 0;
1984 hfs_generate_volume_notifications(hfsmp
);
1987 (void) hfs_flushvolumeheader(hfsmp
, HFS_FVH_WAIT
);
1989 hfs_free(mdbp
, kMDBSize
);
1996 hfs_free(mdbp
, kMDBSize
);
1998 hfs_close_jvp(hfsmp
);
2001 if (hfsmp
->hfs_devvp
) {
2002 vnode_rele(hfsmp
->hfs_devvp
);
2004 hfs_locks_destroy(hfsmp
);
2005 hfs_delete_chash(hfsmp
);
2006 hfs_idhash_destroy (hfsmp
);
2008 hfs_free(hfsmp
, sizeof(*hfsmp
));
2010 vfs_setfsprivate(mp
, NULL
);
2017 * Make a filesystem operational.
2018 * Nothing to do at the moment.
2022 hfs_start(__unused
struct mount
*mp
, __unused
int flags
, __unused vfs_context_t context
)
2029 * unmount system call
2032 hfs_unmount(struct mount
*mp
, int mntflags
, vfs_context_t context
)
2034 struct proc
*p
= vfs_context_proc(context
);
2035 struct hfsmount
*hfsmp
= VFSTOHFS(mp
);
2036 int retval
= E_NONE
;
2043 if (mntflags
& MNT_FORCE
) {
2044 flags
|= FORCECLOSE
;
2048 const char *dev_name
= (hfsmp
->hfs_devvp
2049 ? vnode_getname_printable(hfsmp
->hfs_devvp
) : NULL
);
2051 printf("hfs: unmount initiated on %s on device %s\n",
2052 (hfsmp
->vcbVN
[0] ? (const char*) hfsmp
->vcbVN
: "unknown"),
2053 dev_name
?: "unknown device");
2056 vnode_putname_printable(dev_name
);
2058 if ((retval
= hfs_flushfiles(mp
, flags
, p
)) && !force
)
2061 if (hfsmp
->hfs_flags
& HFS_METADATA_ZONE
)
2062 (void) hfs_recording_suspend(hfsmp
);
2064 hfs_syncer_free(hfsmp
);
2066 if (hfsmp
->hfs_flags
& HFS_SUMMARY_TABLE
) {
2067 if (hfsmp
->hfs_summary_table
) {
2070 * Take the bitmap lock to serialize against a concurrent bitmap scan still in progress
2072 if (hfsmp
->hfs_allocation_vp
) {
2073 err
= hfs_lock (VTOC(hfsmp
->hfs_allocation_vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
2075 hfs_free(hfsmp
->hfs_summary_table
, hfsmp
->hfs_summary_bytes
);
2076 hfsmp
->hfs_summary_table
= NULL
;
2077 hfsmp
->hfs_flags
&= ~HFS_SUMMARY_TABLE
;
2079 if (err
== 0 && hfsmp
->hfs_allocation_vp
){
2080 hfs_unlock (VTOC(hfsmp
->hfs_allocation_vp
));
2087 * Flush out the b-trees, volume bitmap and Volume Header
2089 if ((hfsmp
->hfs_flags
& HFS_READ_ONLY
) == 0) {
2090 retval
= hfs_start_transaction(hfsmp
);
2093 } else if (!force
) {
2097 if (hfsmp
->hfs_startup_vp
) {
2098 (void) hfs_lock(VTOC(hfsmp
->hfs_startup_vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
2099 retval
= hfs_fsync(hfsmp
->hfs_startup_vp
, MNT_WAIT
, 0, p
);
2100 hfs_unlock(VTOC(hfsmp
->hfs_startup_vp
));
2101 if (retval
&& !force
)
2105 if (hfsmp
->hfs_attribute_vp
) {
2106 (void) hfs_lock(VTOC(hfsmp
->hfs_attribute_vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
2107 retval
= hfs_fsync(hfsmp
->hfs_attribute_vp
, MNT_WAIT
, 0, p
);
2108 hfs_unlock(VTOC(hfsmp
->hfs_attribute_vp
));
2109 if (retval
&& !force
)
2113 (void) hfs_lock(VTOC(hfsmp
->hfs_catalog_vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
2114 retval
= hfs_fsync(hfsmp
->hfs_catalog_vp
, MNT_WAIT
, 0, p
);
2115 hfs_unlock(VTOC(hfsmp
->hfs_catalog_vp
));
2116 if (retval
&& !force
)
2119 (void) hfs_lock(VTOC(hfsmp
->hfs_extents_vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
2120 retval
= hfs_fsync(hfsmp
->hfs_extents_vp
, MNT_WAIT
, 0, p
);
2121 hfs_unlock(VTOC(hfsmp
->hfs_extents_vp
));
2122 if (retval
&& !force
)
2125 if (hfsmp
->hfs_allocation_vp
) {
2126 (void) hfs_lock(VTOC(hfsmp
->hfs_allocation_vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
2127 retval
= hfs_fsync(hfsmp
->hfs_allocation_vp
, MNT_WAIT
, 0, p
);
2128 hfs_unlock(VTOC(hfsmp
->hfs_allocation_vp
));
2129 if (retval
&& !force
)
2133 if (hfsmp
->hfc_filevp
&& vnode_issystem(hfsmp
->hfc_filevp
)) {
2134 retval
= hfs_fsync(hfsmp
->hfc_filevp
, MNT_WAIT
, 0, p
);
2135 if (retval
&& !force
)
2139 /* If runtime corruption was detected, indicate that the volume
2140 * was not unmounted cleanly.
2142 if (hfsmp
->vcbAtrb
& kHFSVolumeInconsistentMask
) {
2143 HFSTOVCB(hfsmp
)->vcbAtrb
&= ~kHFSVolumeUnmountedMask
;
2145 HFSTOVCB(hfsmp
)->vcbAtrb
|= kHFSVolumeUnmountedMask
;
2148 if (hfsmp
->hfs_flags
& HFS_HAS_SPARSE_DEVICE
) {
2150 u_int32_t min_start
= hfsmp
->totalBlocks
;
2152 // set the nextAllocation pointer to the smallest free block number
2153 // we've seen so on the next mount we won't rescan unnecessarily
2154 lck_spin_lock(&hfsmp
->vcbFreeExtLock
);
2155 for(i
=0; i
< (int)hfsmp
->vcbFreeExtCnt
; i
++) {
2156 if (hfsmp
->vcbFreeExt
[i
].startBlock
< min_start
) {
2157 min_start
= hfsmp
->vcbFreeExt
[i
].startBlock
;
2160 lck_spin_unlock(&hfsmp
->vcbFreeExtLock
);
2161 if (min_start
< hfsmp
->nextAllocation
) {
2162 hfsmp
->nextAllocation
= min_start
;
2166 retval
= hfs_flushvolumeheader(hfsmp
, HFS_FVH_WAIT
);
2168 HFSTOVCB(hfsmp
)->vcbAtrb
&= ~kHFSVolumeUnmountedMask
;
2170 goto err_exit
; /* could not flush everything */
2174 hfs_end_transaction(hfsmp
);
2180 hfs_flush(hfsmp
, HFS_FLUSH_FULL
);
2184 * Invalidate our caches and release metadata vnodes
2186 (void) hfsUnmount(hfsmp
, p
);
2189 if (HFSTOVCB(hfsmp
)->vcbSigWord
== kHFSSigWord
) {
2190 (void) hfs_relconverter(hfsmp
->hfs_encoding
);
2196 journal_close(hfsmp
->jnl
);
2200 VNOP_FSYNC(hfsmp
->hfs_devvp
, MNT_WAIT
, context
);
2202 hfs_close_jvp(hfsmp
);
2205 * Last chance to dump unreferenced system files.
2207 (void) vflush(mp
, NULLVP
, FORCECLOSE
);
2210 /* Drop our reference on the backing fs (if any). */
2211 if ((hfsmp
->hfs_flags
& HFS_HAS_SPARSE_DEVICE
) && hfsmp
->hfs_backingvp
) {
2212 struct vnode
* tmpvp
;
2214 hfsmp
->hfs_flags
&= ~HFS_HAS_SPARSE_DEVICE
;
2215 tmpvp
= hfsmp
->hfs_backingvp
;
2216 hfsmp
->hfs_backingvp
= NULLVP
;
2219 #endif /* HFS_SPARSE_DEV */
2221 vnode_rele(hfsmp
->hfs_devvp
);
2223 hfs_locks_destroy(hfsmp
);
2224 hfs_delete_chash(hfsmp
);
2225 hfs_idhash_destroy(hfsmp
);
2227 hfs_assert(TAILQ_EMPTY(&hfsmp
->hfs_reserved_ranges
[HFS_TENTATIVE_BLOCKS
])
2228 && TAILQ_EMPTY(&hfsmp
->hfs_reserved_ranges
[HFS_LOCKED_BLOCKS
]));
2229 hfs_assert(!hfsmp
->lockedBlocks
);
2231 hfs_free(hfsmp
, sizeof(*hfsmp
));
2233 // decrement kext retain count
2235 OSDecrementAtomic(&hfs_active_mounts
);
2236 OSKextReleaseKextWithLoadTag(OSKextGetCurrentLoadTag());
2239 #if HFS_LEAK_DEBUG && TARGET_OS_OSX
2240 if (hfs_active_mounts
== 0) {
2241 if (hfs_dump_allocations())
2244 printf("hfs: last unmount and nothing was leaked!\n");
2245 msleep(hfs_unmount
, NULL
, PINOD
, "hfs_unmount",
2246 &(struct timespec
){ 5, 0 });
2255 hfs_end_transaction(hfsmp
);
2262 * Return the root of a filesystem.
2264 int hfs_vfs_root(struct mount
*mp
, struct vnode
**vpp
, __unused vfs_context_t context
)
2266 return hfs_vget(VFSTOHFS(mp
), (cnid_t
)kHFSRootFolderID
, vpp
, 1, 0);
2271 * Do operations associated with quotas
2275 hfs_quotactl(__unused
struct mount
*mp
, __unused
int cmds
, __unused uid_t uid
, __unused caddr_t datap
, __unused vfs_context_t context
)
2281 hfs_quotactl(struct mount
*mp
, int cmds
, uid_t uid
, caddr_t datap
, vfs_context_t context
)
2283 struct proc
*p
= vfs_context_proc(context
);
2284 int cmd
, type
, error
;
2287 uid
= kauth_cred_getuid(vfs_context_ucred(context
));
2288 cmd
= cmds
>> SUBCMDSHIFT
;
2295 if (uid
== kauth_cred_getuid(vfs_context_ucred(context
)))
2299 if ( (error
= vfs_context_suser(context
)) )
2303 type
= cmds
& SUBCMDMASK
;
2304 if ((u_int
)type
>= MAXQUOTAS
)
2306 if ((error
= vfs_busy(mp
, LK_NOWAIT
)) != 0)
2312 error
= hfs_quotaon(p
, mp
, type
, datap
);
2316 error
= hfs_quotaoff(p
, mp
, type
);
2320 error
= hfs_setquota(mp
, uid
, type
, datap
);
2324 error
= hfs_setuse(mp
, uid
, type
, datap
);
2328 error
= hfs_getquota(mp
, uid
, type
, datap
);
2332 error
= hfs_qsync(mp
);
2336 error
= hfs_quotastat(mp
, type
, datap
);
2349 /* Subtype is composite of bits */
2350 #define HFS_SUBTYPE_JOURNALED 0x01
2351 #define HFS_SUBTYPE_CASESENSITIVE 0x02
2352 /* bits 2 - 6 reserved */
2353 #define HFS_SUBTYPE_STANDARDHFS 0x80
2356 * Get file system statistics.
2359 hfs_statfs(struct mount
*mp
, register struct vfsstatfs
*sbp
, __unused vfs_context_t context
)
2361 ExtendedVCB
*vcb
= VFSTOVCB(mp
);
2362 struct hfsmount
*hfsmp
= VFSTOHFS(mp
);
2363 u_int16_t subtype
= 0;
2365 sbp
->f_bsize
= (u_int32_t
)vcb
->blockSize
;
2366 sbp
->f_iosize
= (size_t)cluster_max_io_size(mp
, 0);
2367 sbp
->f_blocks
= (u_int64_t
)((u_int32_t
)vcb
->totalBlocks
);
2368 sbp
->f_bfree
= (u_int64_t
)((u_int32_t
)hfs_freeblks(hfsmp
, 0));
2369 sbp
->f_bavail
= (u_int64_t
)((u_int32_t
)hfs_freeblks(hfsmp
, 1));
2370 sbp
->f_files
= (u_int64_t
)HFS_MAX_FILES
;
2371 sbp
->f_ffree
= (u_int64_t
)hfs_free_cnids(hfsmp
);
2374 * Subtypes (flavors) for HFS
2375 * 0: Mac OS Extended
2376 * 1: Mac OS Extended (Journaled)
2377 * 2: Mac OS Extended (Case Sensitive)
2378 * 3: Mac OS Extended (Case Sensitive, Journaled)
2380 * 128: Mac OS Standard
2383 if ((hfsmp
->hfs_flags
& HFS_STANDARD
) == 0) {
2384 /* HFS+ & variants */
2386 subtype
|= HFS_SUBTYPE_JOURNALED
;
2388 if (hfsmp
->hfs_flags
& HFS_CASE_SENSITIVE
) {
2389 subtype
|= HFS_SUBTYPE_CASESENSITIVE
;
2395 subtype
= HFS_SUBTYPE_STANDARDHFS
;
2398 sbp
->f_fssubtype
= subtype
;
2405 // XXXdbg -- this is a callback to be used by the journal to
2406 // get meta data blocks flushed out to disk.
2408 // XXXdbg -- be smarter and don't flush *every* block on each
2409 // call. try to only flush some so we don't wind up
2410 // being too synchronous.
2413 hfs_sync_metadata(void *arg
)
2415 struct mount
*mp
= (struct mount
*)arg
;
2416 struct hfsmount
*hfsmp
;
2420 daddr64_t priIDSector
;
2421 hfsmp
= VFSTOHFS(mp
);
2422 vcb
= HFSTOVCB(hfsmp
);
2424 // now make sure the super block is flushed
2425 priIDSector
= (daddr64_t
)((vcb
->hfsPlusIOPosOffset
/ hfsmp
->hfs_logical_block_size
) +
2426 HFS_PRI_SECTOR(hfsmp
->hfs_logical_block_size
));
2428 retval
= (int)buf_meta_bread(hfsmp
->hfs_devvp
,
2429 HFS_PHYSBLK_ROUNDDOWN(priIDSector
, hfsmp
->hfs_log_per_phys
),
2430 hfsmp
->hfs_physical_block_size
, NOCRED
, &bp
);
2431 if ((retval
!= 0 ) && (retval
!= ENXIO
)) {
2432 printf("hfs_sync_metadata: can't read volume header at %d! (retval 0x%x)\n",
2433 (int)priIDSector
, retval
);
2436 if (retval
== 0 && ((buf_flags(bp
) & (B_DELWRI
| B_LOCKED
)) == B_DELWRI
)) {
2442 /* Note that these I/Os bypass the journal (no calls to journal_start_modify_block) */
2444 // the alternate super block...
2445 // XXXdbg - we probably don't need to do this each and every time.
2446 // hfs_btreeio.c:FlushAlternate() should flag when it was
2448 if (hfsmp
->hfs_partition_avh_sector
) {
2449 retval
= (int)buf_meta_bread(hfsmp
->hfs_devvp
,
2450 HFS_PHYSBLK_ROUNDDOWN(hfsmp
->hfs_partition_avh_sector
, hfsmp
->hfs_log_per_phys
),
2451 hfsmp
->hfs_physical_block_size
, NOCRED
, &bp
);
2452 if (retval
== 0 && ((buf_flags(bp
) & (B_DELWRI
| B_LOCKED
)) == B_DELWRI
)) {
2454 * note this I/O can fail if the partition shrank behind our backs!
2455 * So failure should be OK here.
2463 /* Is the FS's idea of the AVH different than the partition ? */
2464 if ((hfsmp
->hfs_fs_avh_sector
) && (hfsmp
->hfs_partition_avh_sector
!= hfsmp
->hfs_fs_avh_sector
)) {
2465 retval
= (int)buf_meta_bread(hfsmp
->hfs_devvp
,
2466 HFS_PHYSBLK_ROUNDDOWN(hfsmp
->hfs_fs_avh_sector
, hfsmp
->hfs_log_per_phys
),
2467 hfsmp
->hfs_physical_block_size
, NOCRED
, &bp
);
2468 if (retval
== 0 && ((buf_flags(bp
) & (B_DELWRI
| B_LOCKED
)) == B_DELWRI
)) {
2478 struct hfs_sync_cargs
{
2483 int atime_only_syncs
;
2484 time_t sync_start_time
;
2489 hfs_sync_callback(struct vnode
*vp
, void *cargs
)
2491 struct cnode
*cp
= VTOC(vp
);
2492 struct hfs_sync_cargs
*args
;
2495 args
= (struct hfs_sync_cargs
*)cargs
;
2497 if (hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
) != 0) {
2498 return (VNODE_RETURNED
);
2501 hfs_dirty_t dirty_state
= hfs_is_dirty(cp
);
2503 bool sync
= dirty_state
== HFS_DIRTY
|| vnode_hasdirtyblks(vp
);
2505 if (!sync
&& dirty_state
== HFS_DIRTY_ATIME
2506 && args
->atime_only_syncs
< 256) {
2507 // We only update if the atime changed more than 60s ago
2508 if (args
->sync_start_time
- cp
->c_attr
.ca_atime
> 60) {
2510 ++args
->atime_only_syncs
;
2515 error
= hfs_fsync(vp
, args
->waitfor
, 0, args
->p
);
2518 args
->error
= error
;
2519 } else if (cp
->c_touch_acctime
)
2520 hfs_touchtimes(VTOHFS(vp
), cp
);
2523 return (VNODE_RETURNED
);
2529 * Go through the disk queues to initiate sandbagged IO;
2530 * go through the inodes to write those that have been modified;
2531 * initiate the writing of the super block if it has been modified.
2533 * Note: we are always called with the filesystem marked `MPBUSY'.
2536 hfs_sync(struct mount
*mp
, int waitfor
, vfs_context_t context
)
2538 struct proc
*p
= vfs_context_proc(context
);
2540 struct hfsmount
*hfsmp
;
2542 struct vnode
*meta_vp
[4];
2544 int error
, allerror
= 0;
2545 struct hfs_sync_cargs args
;
2547 hfsmp
= VFSTOHFS(mp
);
2549 // Back off if hfs_changefs or a freeze is underway
2550 hfs_lock_mount(hfsmp
);
2551 if ((hfsmp
->hfs_flags
& HFS_IN_CHANGEFS
)
2552 || hfsmp
->hfs_freeze_state
!= HFS_THAWED
) {
2553 hfs_unlock_mount(hfsmp
);
2557 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2558 hfs_unlock_mount(hfsmp
);
2562 ++hfsmp
->hfs_syncers
;
2563 hfs_unlock_mount(hfsmp
);
2565 args
.cred
= kauth_cred_get();
2566 args
.waitfor
= waitfor
;
2569 args
.atime_only_syncs
= 0;
2574 args
.sync_start_time
= tv
.tv_sec
;
2577 * hfs_sync_callback will be called for each vnode
2578 * hung off of this mount point... the vnode will be
2579 * properly referenced and unreferenced around the callback
2581 vnode_iterate(mp
, 0, hfs_sync_callback
, (void *)&args
);
2584 allerror
= args
.error
;
2586 vcb
= HFSTOVCB(hfsmp
);
2588 meta_vp
[0] = vcb
->extentsRefNum
;
2589 meta_vp
[1] = vcb
->catalogRefNum
;
2590 meta_vp
[2] = vcb
->allocationsRefNum
; /* This is NULL for standard HFS */
2591 meta_vp
[3] = hfsmp
->hfs_attribute_vp
; /* Optional file */
2593 /* Now sync our three metadata files */
2594 for (i
= 0; i
< 4; ++i
) {
2598 if ((btvp
==0) || (vnode_mount(btvp
) != mp
))
2601 /* XXX use hfs_systemfile_lock instead ? */
2602 (void) hfs_lock(VTOC(btvp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
2605 if (!hfs_is_dirty(cp
) && !vnode_hasdirtyblks(btvp
)) {
2606 hfs_unlock(VTOC(btvp
));
2609 error
= vnode_get(btvp
);
2611 hfs_unlock(VTOC(btvp
));
2614 if ((error
= hfs_fsync(btvp
, waitfor
, 0, p
)))
2624 * Force stale file system control information to be flushed.
2626 if (vcb
->vcbSigWord
== kHFSSigWord
) {
2627 if ((error
= VNOP_FSYNC(hfsmp
->hfs_devvp
, waitfor
, context
))) {
2637 hfs_hotfilesync(hfsmp
, vfs_context_kernel());
2640 * Write back modified superblock.
2642 if (IsVCBDirty(vcb
)) {
2643 error
= hfs_flushvolumeheader(hfsmp
, waitfor
== MNT_WAIT
? HFS_FVH_WAIT
: 0);
2649 hfs_flush(hfsmp
, HFS_FLUSH_JOURNAL
);
2652 hfs_lock_mount(hfsmp
);
2653 boolean_t wake
= (!--hfsmp
->hfs_syncers
2654 && hfsmp
->hfs_freeze_state
== HFS_WANT_TO_FREEZE
);
2655 hfs_unlock_mount(hfsmp
);
2657 wakeup(&hfsmp
->hfs_freeze_state
);
2664 * File handle to vnode
2666 * Have to be really careful about stale file handles:
2667 * - check that the cnode id is valid
2668 * - call hfs_vget() to get the locked cnode
2669 * - check for an unallocated cnode (i_mode == 0)
2670 * - check that the given client host has export rights and return
2671 * those rights via. exflagsp and credanonp
2674 hfs_fhtovp(struct mount
*mp
, int fhlen
, unsigned char *fhp
, struct vnode
**vpp
, __unused vfs_context_t context
)
2676 struct hfsfid
*hfsfhp
;
2681 hfsfhp
= (struct hfsfid
*)fhp
;
2683 if (fhlen
< (int)sizeof(struct hfsfid
))
2686 result
= hfs_vget(VFSTOHFS(mp
), ntohl(hfsfhp
->hfsfid_cnid
), &nvp
, 0, 0);
2688 if (result
== ENOENT
)
2694 * We used to use the create time as the gen id of the file handle,
2695 * but it is not static enough because it can change at any point
2696 * via system calls. We still don't have another volume ID or other
2697 * unique identifier to use for a generation ID across reboots that
2698 * persists until the file is removed. Using only the CNID exposes
2699 * us to the potential wrap-around case, but as of 2/2008, it would take
2700 * over 2 months to wrap around if the machine did nothing but allocate
2701 * CNIDs. Using some kind of wrap counter would only be effective if
2702 * each file had the wrap counter associated with it. For now,
2703 * we use only the CNID to identify the file as it's good enough.
2708 hfs_unlock(VTOC(nvp
));
2714 * Vnode pointer to File handle
2718 hfs_vptofh(struct vnode
*vp
, int *fhlenp
, unsigned char *fhp
, __unused vfs_context_t context
)
2721 struct hfsfid
*hfsfhp
;
2723 if (ISHFS(VTOVCB(vp
)))
2724 return (ENOTSUP
); /* hfs standard is not exportable */
2726 if (*fhlenp
< (int)sizeof(struct hfsfid
))
2730 hfsfhp
= (struct hfsfid
*)fhp
;
2731 /* only the CNID is used to identify the file now */
2732 hfsfhp
->hfsfid_cnid
= htonl(cp
->c_fileid
);
2733 hfsfhp
->hfsfid_gen
= htonl(cp
->c_fileid
);
2734 *fhlenp
= sizeof(struct hfsfid
);
2741 * Initialize HFS filesystems, done only once per boot.
2743 * HFS is not a kext-based file system. This makes it difficult to find
2744 * out when the last HFS file system was unmounted and call hfs_uninit()
2745 * to deallocate data structures allocated in hfs_init(). Therefore we
2746 * never deallocate memory allocated by lock attribute and group initializations
2750 hfs_init(__unused
struct vfsconf
*vfsp
)
2752 static int done
= 0;
2761 hfs_lock_attr
= lck_attr_alloc_init();
2762 hfs_group_attr
= lck_grp_attr_alloc_init();
2763 hfs_mutex_group
= lck_grp_alloc_init("hfs-mutex", hfs_group_attr
);
2764 hfs_rwlock_group
= lck_grp_alloc_init("hfs-rwlock", hfs_group_attr
);
2765 hfs_spinlock_group
= lck_grp_alloc_init("hfs-spinlock", hfs_group_attr
);
2778 * Destroy all locks, mutexes and spinlocks in hfsmp on unmount or failed mount
2781 hfs_locks_destroy(struct hfsmount
*hfsmp
)
2784 lck_mtx_destroy(&hfsmp
->hfs_mutex
, hfs_mutex_group
);
2785 lck_mtx_destroy(&hfsmp
->hfc_mutex
, hfs_mutex_group
);
2786 lck_rw_destroy(&hfsmp
->hfs_global_lock
, hfs_rwlock_group
);
2787 lck_spin_destroy(&hfsmp
->vcbFreeExtLock
, hfs_spinlock_group
);
2789 lck_spin_destroy(&hfsmp
->hfs_xattr_io
.lock
, hfs_spinlock_group
);
2797 hfs_getmountpoint(struct vnode
*vp
, struct hfsmount
**hfsmpp
)
2799 struct hfsmount
* hfsmp
;
2800 char fstypename
[MFSNAMELEN
];
2805 if (!vnode_isvroot(vp
))
2808 vnode_vfsname(vp
, fstypename
);
2809 if (strncmp(fstypename
, "hfs", sizeof(fstypename
)) != 0)
2814 if (HFSTOVCB(hfsmp
)->vcbSigWord
== kHFSSigWord
)
2822 // Replace user-space value
2823 static errno_t
ureplace(user_addr_t oldp
, size_t *oldlenp
,
2824 user_addr_t newp
, size_t newlen
,
2825 void *data
, size_t len
)
2830 if (oldp
&& *oldlenp
< len
)
2832 if (newp
&& newlen
!= len
)
2836 error
= copyout(data
, oldp
, len
);
2840 return newp
? copyin(newp
, data
, len
) : 0;
2843 #define UREPLACE(oldp, oldlenp, newp, newlenp, v) \
2844 ureplace(oldp, oldlenp, newp, newlenp, &v, sizeof(v))
2846 static hfsmount_t
*hfs_mount_from_cwd(vfs_context_t ctx
)
2848 vnode_t vp
= vfs_context_cwd(ctx
);
2854 * We could use vnode_tag, but it is probably more future proof to
2855 * compare fstypename.
2857 char fstypename
[MFSNAMELEN
];
2858 vnode_vfsname(vp
, fstypename
);
2860 if (strcmp(fstypename
, "hfs"))
2867 * HFS filesystem related variables.
2870 hfs_sysctl(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
2871 user_addr_t newp
, size_t newlen
, vfs_context_t context
)
2874 struct hfsmount
*hfsmp
;
2875 struct proc
*p
= NULL
;
2877 /* all sysctl names at this level are terminal */
2879 p
= vfs_context_proc(context
);
2880 if (name
[0] == HFS_ENCODINGBIAS
) {
2883 bias
= hfs_getencodingbias();
2885 error
= UREPLACE(oldp
, oldlenp
, newp
, newlen
, bias
);
2889 hfs_setencodingbias(bias
);
2894 if (name
[0] == HFS_EXTEND_FS
) {
2895 u_int64_t newsize
= 0;
2896 vnode_t vp
= vfs_context_cwd(context
);
2898 if (newp
== USER_ADDR_NULL
|| vp
== NULLVP
2899 || newlen
!= sizeof(quad_t
) || !oldlenp
)
2901 if ((error
= hfs_getmountpoint(vp
, &hfsmp
)))
2904 /* Start with the 'size' set to the current number of bytes in the filesystem */
2905 newsize
= ((uint64_t)hfsmp
->totalBlocks
) * ((uint64_t)hfsmp
->blockSize
);
2907 error
= UREPLACE(oldp
, oldlenp
, newp
, newlen
, newsize
);
2911 return hfs_extendfs(hfsmp
, newsize
, context
);
2912 } else if (name
[0] == HFS_ENABLE_JOURNALING
) {
2913 // make the file system journaled...
2916 struct cat_attr jnl_attr
;
2917 struct cat_attr jinfo_attr
;
2918 struct cat_fork jnl_fork
;
2919 struct cat_fork jinfo_fork
;
2923 uint64_t journal_byte_offset
;
2924 uint64_t journal_size
;
2925 vnode_t jib_vp
= NULLVP
;
2926 struct JournalInfoBlock local_jib
;
2931 /* Only root can enable journaling */
2932 if (!kauth_cred_issuser(kauth_cred_get())) {
2937 hfsmp
= hfs_mount_from_cwd(context
);
2941 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2944 if (HFSTOVCB(hfsmp
)->vcbSigWord
== kHFSSigWord
) {
2945 printf("hfs: can't make a plain hfs volume journaled.\n");
2950 printf("hfs: volume %s is already journaled!\n", hfsmp
->vcbVN
);
2953 vcb
= HFSTOVCB(hfsmp
);
2955 /* Set up local copies of the initialization info */
2956 tmpblkno
= (uint32_t) name
[1];
2957 jib_blkno
= (uint64_t) tmpblkno
;
2958 journal_byte_offset
= (uint64_t) name
[2];
2959 journal_byte_offset
*= hfsmp
->blockSize
;
2960 journal_byte_offset
+= hfsmp
->hfsPlusIOPosOffset
;
2961 journal_size
= (uint64_t)((unsigned)name
[3]);
2963 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
| SFL_EXTENTS
, HFS_EXCLUSIVE_LOCK
);
2964 if (BTHasContiguousNodes(VTOF(vcb
->catalogRefNum
)) == 0 ||
2965 BTHasContiguousNodes(VTOF(vcb
->extentsRefNum
)) == 0) {
2967 printf("hfs: volume has a btree w/non-contiguous nodes. can not enable journaling.\n");
2968 hfs_systemfile_unlock(hfsmp
, lockflags
);
2971 hfs_systemfile_unlock(hfsmp
, lockflags
);
2973 // make sure these both exist!
2974 if ( GetFileInfo(vcb
, kHFSRootFolderID
, ".journal_info_block", &jinfo_attr
, &jinfo_fork
) == 0
2975 || GetFileInfo(vcb
, kHFSRootFolderID
, ".journal", &jnl_attr
, &jnl_fork
) == 0) {
2981 * At this point, we have a copy of the metadata that lives in the catalog for the
2982 * journal info block. Compare that the journal info block's single extent matches
2983 * that which was passed into this sysctl.
2985 * If it is different, deny the journal enable call.
2987 if (jinfo_fork
.cf_blocks
> 1) {
2988 /* too many blocks */
2992 if (jinfo_fork
.cf_extents
[0].startBlock
!= jib_blkno
) {
2998 * We want to immediately purge the vnode for the JIB.
3000 * Because it was written to from userland, there's probably
3001 * a vnode somewhere in the vnode cache (possibly with UBC backed blocks).
3002 * So we bring the vnode into core, then immediately do whatever
3003 * we can to flush/vclean it out. This is because those blocks will be
3004 * interpreted as user data, which may be treated separately on some platforms
3005 * than metadata. If the vnode is gone, then there cannot be backing blocks
3008 if (hfs_vget (hfsmp
, jinfo_attr
.ca_fileid
, &jib_vp
, 1, 0)) {
3012 * Now we have a vnode for the JIB. recycle it. Because we hold an iocount
3013 * on the vnode, we'll just mark it for termination when the last iocount
3014 * (hopefully ours), is dropped.
3016 vnode_recycle (jib_vp
);
3017 err
= vnode_put (jib_vp
);
3022 /* Initialize the local copy of the JIB (just like hfs.util) */
3023 memset (&local_jib
, 'Z', sizeof(struct JournalInfoBlock
));
3024 local_jib
.flags
= SWAP_BE32(kJIJournalInFSMask
);
3025 /* Note that the JIB's offset is in bytes */
3026 local_jib
.offset
= SWAP_BE64(journal_byte_offset
);
3027 local_jib
.size
= SWAP_BE64(journal_size
);
3030 * Now write out the local JIB. This essentially overwrites the userland
3031 * copy of the JIB. Read it as BLK_META to treat it as a metadata read/write.
3033 jib_buf
= buf_getblk (hfsmp
->hfs_devvp
,
3034 jib_blkno
* (hfsmp
->blockSize
/ hfsmp
->hfs_logical_block_size
),
3035 hfsmp
->blockSize
, 0, 0, BLK_META
);
3036 char* buf_ptr
= (char*) buf_dataptr (jib_buf
);
3038 /* Zero out the portion of the block that won't contain JIB data */
3039 memset (buf_ptr
, 0, hfsmp
->blockSize
);
3041 bcopy(&local_jib
, buf_ptr
, sizeof(local_jib
));
3042 if (buf_bwrite (jib_buf
)) {
3046 /* Force a flush track cache */
3047 hfs_flush(hfsmp
, HFS_FLUSH_CACHE
);
3049 /* Now proceed with full volume sync */
3050 hfs_sync(hfsmp
->hfs_mp
, MNT_WAIT
, context
);
3052 printf("hfs: Initializing the journal (joffset 0x%llx sz 0x%llx)...\n",
3053 (off_t
)name
[2], (off_t
)name
[3]);
3056 // XXXdbg - note that currently (Sept, 08) hfs_util does not support
3057 // enabling the journal on a separate device so it is safe
3058 // to just copy hfs_devvp here. If hfs_util gets the ability
3059 // to dynamically enable the journal on a separate device then
3060 // we will have to do the same thing as hfs_early_journal_init()
3061 // to locate and open the journal device.
3063 jvp
= hfsmp
->hfs_devvp
;
3064 jnl
= journal_create(jvp
, journal_byte_offset
, journal_size
,
3066 hfsmp
->hfs_logical_block_size
,
3069 hfs_sync_metadata
, hfsmp
->hfs_mp
,
3073 * Set up the trim callback function so that we can add
3074 * recently freed extents to the free extent cache once
3075 * the transaction that freed them is written to the
3079 journal_trim_set_callback(jnl
, hfs_trim_callback
, hfsmp
);
3082 printf("hfs: FAILED to create the journal!\n");
3086 hfs_lock_global (hfsmp
, HFS_EXCLUSIVE_LOCK
);
3089 * Flush all dirty metadata buffers.
3091 buf_flushdirtyblks(hfsmp
->hfs_devvp
, TRUE
, 0, "hfs_sysctl");
3092 buf_flushdirtyblks(hfsmp
->hfs_extents_vp
, TRUE
, 0, "hfs_sysctl");
3093 buf_flushdirtyblks(hfsmp
->hfs_catalog_vp
, TRUE
, 0, "hfs_sysctl");
3094 buf_flushdirtyblks(hfsmp
->hfs_allocation_vp
, TRUE
, 0, "hfs_sysctl");
3095 if (hfsmp
->hfs_attribute_vp
)
3096 buf_flushdirtyblks(hfsmp
->hfs_attribute_vp
, TRUE
, 0, "hfs_sysctl");
3098 HFSTOVCB(hfsmp
)->vcbJinfoBlock
= name
[1];
3099 HFSTOVCB(hfsmp
)->vcbAtrb
|= kHFSVolumeJournaledMask
;
3103 // save this off for the hack-y check in hfs_remove()
3104 hfsmp
->jnl_start
= (u_int32_t
)name
[2];
3105 hfsmp
->jnl_size
= (off_t
)((unsigned)name
[3]);
3106 hfsmp
->hfs_jnlinfoblkid
= jinfo_attr
.ca_fileid
;
3107 hfsmp
->hfs_jnlfileid
= jnl_attr
.ca_fileid
;
3109 vfs_setflags(hfsmp
->hfs_mp
, (u_int64_t
)((unsigned int)MNT_JOURNALED
));
3111 hfs_unlock_global (hfsmp
);
3112 hfs_flushvolumeheader(hfsmp
, HFS_FVH_WAIT
| HFS_FVH_WRITE_ALT
);
3117 fsid
.val
[0] = (int32_t)hfsmp
->hfs_raw_dev
;
3118 fsid
.val
[1] = (int32_t)vfs_typenum(HFSTOVFS(hfsmp
));
3119 vfs_event_signal(&fsid
, VQ_UPDATE
, (intptr_t)NULL
);
3122 } else if (name
[0] == HFS_DISABLE_JOURNALING
) {
3123 // clear the journaling bit
3125 /* Only root can disable journaling */
3126 if (!kauth_cred_issuser(kauth_cred_get())) {
3130 hfsmp
= hfs_mount_from_cwd(context
);
3135 * Disabling journaling is disallowed on volumes with directory hard links
3136 * because we have not tested the relevant code path.
3138 if (hfsmp
->hfs_private_attr
[DIR_HARDLINKS
].ca_entries
!= 0){
3139 printf("hfs: cannot disable journaling on volumes with directory hardlinks\n");
3143 printf("hfs: disabling journaling for %s\n", hfsmp
->vcbVN
);
3145 hfs_lock_global (hfsmp
, HFS_EXCLUSIVE_LOCK
);
3147 // Lights out for you buddy!
3148 journal_close(hfsmp
->jnl
);
3151 hfs_close_jvp(hfsmp
);
3152 vfs_clearflags(hfsmp
->hfs_mp
, (u_int64_t
)((unsigned int)MNT_JOURNALED
));
3153 hfsmp
->jnl_start
= 0;
3154 hfsmp
->hfs_jnlinfoblkid
= 0;
3155 hfsmp
->hfs_jnlfileid
= 0;
3157 HFSTOVCB(hfsmp
)->vcbAtrb
&= ~kHFSVolumeJournaledMask
;
3159 hfs_unlock_global (hfsmp
);
3161 hfs_flushvolumeheader(hfsmp
, HFS_FVH_WAIT
| HFS_FVH_WRITE_ALT
);
3166 fsid
.val
[0] = (int32_t)hfsmp
->hfs_raw_dev
;
3167 fsid
.val
[1] = (int32_t)vfs_typenum(HFSTOVFS(hfsmp
));
3168 vfs_event_signal(&fsid
, VQ_UPDATE
, (intptr_t)NULL
);
3171 } else if (name
[0] == VFS_CTL_QUERY
) {
3172 #if TARGET_OS_IPHONE
3174 #else //!TARGET_OS_IPHONE
3175 struct sysctl_req
*req
;
3176 union union_vfsidctl vc
;
3180 req
= CAST_DOWN(struct sysctl_req
*, oldp
); /* we're new style vfs sysctl. */
3185 error
= SYSCTL_IN(req
, &vc
, proc_is64bit(p
)? sizeof(vc
.vc64
):sizeof(vc
.vc32
));
3186 if (error
) return (error
);
3188 mp
= vfs_getvfs(&vc
.vc32
.vc_fsid
); /* works for 32 and 64 */
3189 if (mp
== NULL
) return (ENOENT
);
3191 hfsmp
= VFSTOHFS(mp
);
3192 bzero(&vq
, sizeof(vq
));
3193 vq
.vq_flags
= hfsmp
->hfs_notification_conditions
;
3194 return SYSCTL_OUT(req
, &vq
, sizeof(vq
));;
3195 #endif // TARGET_OS_IPHONE
3196 } else if (name
[0] == HFS_REPLAY_JOURNAL
) {
3197 vnode_t devvp
= NULL
;
3202 device_fd
= name
[1];
3203 error
= file_vnode(device_fd
, &devvp
);
3207 error
= vnode_getwithref(devvp
);
3209 file_drop(device_fd
);
3212 error
= hfs_journal_replay(devvp
, context
);
3213 file_drop(device_fd
);
3217 #if DEBUG || TARGET_OS_OSX
3218 else if (name
[0] == HFS_ENABLE_RESIZE_DEBUG
) {
3219 if (!kauth_cred_issuser(kauth_cred_get())) {
3223 int old
= hfs_resize_debug
;
3225 int res
= UREPLACE(oldp
, oldlenp
, newp
, newlen
, hfs_resize_debug
);
3227 if (old
!= hfs_resize_debug
) {
3228 printf("hfs: %s resize debug\n",
3229 hfs_resize_debug
? "enabled" : "disabled");
3234 #endif // DEBUG || OSX
3240 * hfs_vfs_vget is not static since it is used in hfs_readwrite.c to support
3241 * the vn_getpath_ext. We use it to leverage the code below that updates
3242 * the origin list cache if necessary
3246 hfs_vfs_vget(struct mount
*mp
, ino64_t ino
, struct vnode
**vpp
, __unused vfs_context_t context
)
3250 struct hfsmount
*hfsmp
;
3252 hfsmp
= VFSTOHFS(mp
);
3254 error
= hfs_vget(hfsmp
, (cnid_t
)ino
, vpp
, 1, 0);
3259 * If the look-up was via the object ID (rather than the link ID),
3260 * then we make sure there's a parent here. We can't leave this
3261 * until hfs_vnop_getattr because if there's a problem getting the
3262 * parent at that point, all the caller will do is call
3263 * hfs_vfs_vget again and we'll end up in an infinite loop.
3266 cnode_t
*cp
= VTOC(*vpp
);
3268 if (ISSET(cp
->c_flag
, C_HARDLINK
) && ino
== cp
->c_fileid
) {
3269 hfs_lock_always(cp
, HFS_SHARED_LOCK
);
3271 if (!hfs_haslinkorigin(cp
)) {
3272 if (!hfs_lock_upgrade(cp
))
3273 hfs_lock_always(cp
, HFS_EXCLUSIVE_LOCK
);
3275 if (cp
->c_cnid
== cp
->c_fileid
) {
3277 * Descriptor is stale, so we need to refresh it. We
3278 * pick the first link.
3282 error
= hfs_first_link(hfsmp
, cp
, &link_id
);
3285 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
, HFS_SHARED_LOCK
);
3286 error
= cat_findname(hfsmp
, link_id
, &cp
->c_desc
);
3287 hfs_systemfile_unlock(hfsmp
, lockflags
);
3290 // We'll use whatever link the descriptor happens to have
3294 hfs_savelinkorigin(cp
, cp
->c_parentcnid
);
3310 * Look up an HFS object by ID.
3312 * The object is returned with an iocount reference and the cnode locked.
3314 * If the object is a file then it will represent the data fork.
3317 hfs_vget(struct hfsmount
*hfsmp
, cnid_t cnid
, struct vnode
**vpp
, int skiplock
, int allow_deleted
)
3319 struct vnode
*vp
= NULLVP
;
3320 struct cat_desc cndesc
;
3321 struct cat_attr cnattr
;
3322 struct cat_fork cnfork
;
3323 u_int32_t linkref
= 0;
3326 /* Check for cnids that should't be exported. */
3327 if ((cnid
< kHFSFirstUserCatalogNodeID
) &&
3328 (cnid
!= kHFSRootFolderID
&& cnid
!= kHFSRootParentID
)) {
3331 /* Don't export our private directories. */
3332 if (cnid
== hfsmp
->hfs_private_desc
[FILE_HARDLINKS
].cd_cnid
||
3333 cnid
== hfsmp
->hfs_private_desc
[DIR_HARDLINKS
].cd_cnid
) {
3337 * Check the hash first
3339 vp
= hfs_chash_getvnode(hfsmp
, cnid
, 0, skiplock
, allow_deleted
);
3345 bzero(&cndesc
, sizeof(cndesc
));
3346 bzero(&cnattr
, sizeof(cnattr
));
3347 bzero(&cnfork
, sizeof(cnfork
));
3350 * Not in hash, lookup in catalog
3352 if (cnid
== kHFSRootParentID
) {
3353 static char hfs_rootname
[] = "/";
3355 cndesc
.cd_nameptr
= (const u_int8_t
*)&hfs_rootname
[0];
3356 cndesc
.cd_namelen
= 1;
3357 cndesc
.cd_parentcnid
= kHFSRootParentID
;
3358 cndesc
.cd_cnid
= kHFSRootFolderID
;
3359 cndesc
.cd_flags
= CD_ISDIR
;
3361 cnattr
.ca_fileid
= kHFSRootFolderID
;
3362 cnattr
.ca_linkcount
= 1;
3363 cnattr
.ca_entries
= 1;
3364 cnattr
.ca_dircount
= 1;
3365 cnattr
.ca_mode
= (S_IFDIR
| S_IRWXU
| S_IRWXG
| S_IRWXO
);
3369 const char *nameptr
;
3371 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
, HFS_SHARED_LOCK
);
3372 error
= cat_idlookup(hfsmp
, cnid
, 0, 0, &cndesc
, &cnattr
, &cnfork
);
3373 hfs_systemfile_unlock(hfsmp
, lockflags
);
3381 * Check for a raw hardlink inode and save its linkref.
3383 pid
= cndesc
.cd_parentcnid
;
3384 nameptr
= (const char *)cndesc
.cd_nameptr
;
3386 if ((pid
== hfsmp
->hfs_private_desc
[FILE_HARDLINKS
].cd_cnid
) &&
3387 cndesc
.cd_namelen
> HFS_INODE_PREFIX_LEN
&&
3388 (bcmp(nameptr
, HFS_INODE_PREFIX
, HFS_INODE_PREFIX_LEN
) == 0)) {
3389 linkref
= strtoul(&nameptr
[HFS_INODE_PREFIX_LEN
], NULL
, 10);
3391 } else if ((pid
== hfsmp
->hfs_private_desc
[DIR_HARDLINKS
].cd_cnid
) &&
3392 cndesc
.cd_namelen
> HFS_DIRINODE_PREFIX_LEN
&&
3393 (bcmp(nameptr
, HFS_DIRINODE_PREFIX
, HFS_DIRINODE_PREFIX_LEN
) == 0)) {
3394 linkref
= strtoul(&nameptr
[HFS_DIRINODE_PREFIX_LEN
], NULL
, 10);
3396 } else if ((pid
== hfsmp
->hfs_private_desc
[FILE_HARDLINKS
].cd_cnid
) &&
3397 cndesc
.cd_namelen
> HFS_DELETE_PREFIX_LEN
&&
3398 (bcmp(nameptr
, HFS_DELETE_PREFIX
, HFS_DELETE_PREFIX_LEN
) == 0)) {
3400 cat_releasedesc(&cndesc
);
3401 return (ENOENT
); /* open unlinked file */
3406 * Finish initializing cnode descriptor for hardlinks.
3408 * We need a valid name and parent for reverse lookups.
3412 struct cat_desc linkdesc
;
3415 cnattr
.ca_linkref
= linkref
;
3416 bzero (&linkdesc
, sizeof (linkdesc
));
3419 * If the caller supplied the raw inode value, then we don't know exactly
3420 * which hardlink they wanted. It's likely that they acquired the raw inode
3421 * value BEFORE the item became a hardlink, in which case, they probably
3422 * want the oldest link. So request the oldest link from the catalog.
3424 * Unfortunately, this requires that we iterate through all N hardlinks. On the plus
3425 * side, since we know that we want the last linkID, we can also have this one
3426 * call give us back the name of the last ID, since it's going to have it in-hand...
3428 linkerr
= hfs_lookup_lastlink (hfsmp
, linkref
, &lastid
, &linkdesc
);
3429 if ((linkerr
== 0) && (lastid
!= 0)) {
3431 * Release any lingering buffers attached to our local descriptor.
3432 * Then copy the name and other business into the cndesc
3434 cat_releasedesc (&cndesc
);
3435 bcopy (&linkdesc
, &cndesc
, sizeof(linkdesc
));
3437 /* If it failed, the linkref code will just use whatever it had in-hand below. */
3441 int newvnode_flags
= 0;
3443 error
= hfs_getnewvnode(hfsmp
, NULL
, NULL
, &cndesc
, 0, &cnattr
,
3444 &cnfork
, &vp
, &newvnode_flags
);
3446 VTOC(vp
)->c_flag
|= C_HARDLINK
;
3447 vnode_setmultipath(vp
);
3450 int newvnode_flags
= 0;
3452 void *buf
= hfs_malloc(MAXPATHLEN
);
3454 /* Supply hfs_getnewvnode with a component name. */
3455 struct componentname cn
= {
3456 .cn_nameiop
= LOOKUP
,
3457 .cn_flags
= ISLASTCN
,
3458 .cn_pnlen
= MAXPATHLEN
,
3459 .cn_namelen
= cndesc
.cd_namelen
,
3464 bcopy(cndesc
.cd_nameptr
, cn
.cn_nameptr
, cndesc
.cd_namelen
+ 1);
3466 error
= hfs_getnewvnode(hfsmp
, NULLVP
, &cn
, &cndesc
, 0, &cnattr
,
3467 &cnfork
, &vp
, &newvnode_flags
);
3469 if (error
== 0 && (VTOC(vp
)->c_flag
& C_HARDLINK
)) {
3470 hfs_savelinkorigin(VTOC(vp
), cndesc
.cd_parentcnid
);
3473 hfs_free(buf
, MAXPATHLEN
);
3475 cat_releasedesc(&cndesc
);
3478 if (vp
&& skiplock
) {
3479 hfs_unlock(VTOC(vp
));
3486 * Flush out all the files in a filesystem.
3490 hfs_flushfiles(struct mount
*mp
, int flags
, struct proc
*p
)
3492 hfs_flushfiles(struct mount
*mp
, int flags
, __unused
struct proc
*p
)
3495 struct hfsmount
*hfsmp
;
3496 struct vnode
*skipvp
= NULLVP
;
3498 int accounted_root_usecounts
;
3503 hfsmp
= VFSTOHFS(mp
);
3505 accounted_root_usecounts
= 0;
3508 * The open quota files have an indirect reference on
3509 * the root directory vnode. We must account for this
3510 * extra reference when doing the intial vflush.
3512 if (((unsigned int)vfs_flags(mp
)) & MNT_QUOTA
) {
3513 /* Find out how many quota files we have open. */
3514 for (i
= 0; i
< MAXQUOTAS
; i
++) {
3515 if (hfsmp
->hfs_qfiles
[i
].qf_vp
!= NULLVP
)
3516 ++accounted_root_usecounts
;
3521 if (accounted_root_usecounts
> 0) {
3522 /* Obtain the root vnode so we can skip over it. */
3523 skipvp
= hfs_chash_getvnode(hfsmp
, kHFSRootFolderID
, 0, 0, 0);
3526 error
= vflush(mp
, skipvp
, SKIPSYSTEM
| SKIPSWAP
| flags
);
3530 error
= vflush(mp
, skipvp
, SKIPSYSTEM
| flags
);
3534 * See if there are additional references on the
3535 * root vp besides the ones obtained from the open
3536 * quota files and CoreStorage.
3539 (vnode_isinuse(skipvp
, accounted_root_usecounts
))) {
3540 error
= EBUSY
; /* root directory is still open */
3542 hfs_unlock(VTOC(skipvp
));
3543 /* release the iocount from the hfs_chash_getvnode call above. */
3546 if (error
&& (flags
& FORCECLOSE
) == 0)
3550 if (((unsigned int)vfs_flags(mp
)) & MNT_QUOTA
) {
3551 for (i
= 0; i
< MAXQUOTAS
; i
++) {
3552 if (hfsmp
->hfs_qfiles
[i
].qf_vp
== NULLVP
)
3554 hfs_quotaoff(p
, mp
, i
);
3560 error
= vflush(mp
, NULLVP
, SKIPSYSTEM
| flags
);
3567 * Update volume encoding bitmap (HFS Plus only)
3569 * Mark a legacy text encoding as in-use (as needed)
3570 * in the volume header of this HFS+ filesystem.
3573 hfs_setencodingbits(struct hfsmount
*hfsmp
, u_int32_t encoding
)
3575 #define kIndexMacUkrainian 48 /* MacUkrainian encoding is 152 */
3576 #define kIndexMacFarsi 49 /* MacFarsi encoding is 140 */
3581 case kTextEncodingMacUkrainian
:
3582 index
= kIndexMacUkrainian
;
3584 case kTextEncodingMacFarsi
:
3585 index
= kIndexMacFarsi
;
3592 /* Only mark the encoding as in-use if it wasn't already set */
3593 if (index
< 64 && (hfsmp
->encodingsBitmap
& (u_int64_t
)(1ULL << index
)) == 0) {
3594 hfs_lock_mount (hfsmp
);
3595 hfsmp
->encodingsBitmap
|= (u_int64_t
)(1ULL << index
);
3596 MarkVCBDirty(hfsmp
);
3597 hfs_unlock_mount(hfsmp
);
3602 * Update volume stats
3604 * On journal volumes this will cause a volume header flush
3607 hfs_volupdate(struct hfsmount
*hfsmp
, enum volop op
, int inroot
)
3613 hfs_lock_mount (hfsmp
);
3615 MarkVCBDirty(hfsmp
);
3616 hfsmp
->hfs_mtime
= tv
.tv_sec
;
3622 if (hfsmp
->hfs_dircount
!= 0xFFFFFFFF)
3623 ++hfsmp
->hfs_dircount
;
3624 if (inroot
&& hfsmp
->vcbNmRtDirs
!= 0xFFFF)
3625 ++hfsmp
->vcbNmRtDirs
;
3628 if (hfsmp
->hfs_dircount
!= 0)
3629 --hfsmp
->hfs_dircount
;
3630 if (inroot
&& hfsmp
->vcbNmRtDirs
!= 0xFFFF)
3631 --hfsmp
->vcbNmRtDirs
;
3634 if (hfsmp
->hfs_filecount
!= 0xFFFFFFFF)
3635 ++hfsmp
->hfs_filecount
;
3636 if (inroot
&& hfsmp
->vcbNmFls
!= 0xFFFF)
3640 if (hfsmp
->hfs_filecount
!= 0)
3641 --hfsmp
->hfs_filecount
;
3642 if (inroot
&& hfsmp
->vcbNmFls
!= 0xFFFF)
3647 hfs_unlock_mount (hfsmp
);
3650 hfs_flushvolumeheader(hfsmp
, 0);
3658 /* HFS Standard MDB flush */
3660 hfs_flushMDB(struct hfsmount
*hfsmp
, int waitfor
, int altflush
)
3662 ExtendedVCB
*vcb
= HFSTOVCB(hfsmp
);
3663 struct filefork
*fp
;
3664 HFSMasterDirectoryBlock
*mdb
;
3665 struct buf
*bp
= NULL
;
3670 sector_size
= hfsmp
->hfs_logical_block_size
;
3671 retval
= (int)buf_bread(hfsmp
->hfs_devvp
, (daddr64_t
)HFS_PRI_SECTOR(sector_size
), sector_size
, NOCRED
, &bp
);
3678 hfs_lock_mount (hfsmp
);
3680 mdb
= (HFSMasterDirectoryBlock
*)(buf_dataptr(bp
) + HFS_PRI_OFFSET(sector_size
));
3682 mdb
->drCrDate
= SWAP_BE32 (UTCToLocal(to_hfs_time(vcb
->hfs_itime
)));
3683 mdb
->drLsMod
= SWAP_BE32 (UTCToLocal(to_hfs_time(vcb
->vcbLsMod
)));
3684 mdb
->drAtrb
= SWAP_BE16 (vcb
->vcbAtrb
);
3685 mdb
->drNmFls
= SWAP_BE16 (vcb
->vcbNmFls
);
3686 mdb
->drAllocPtr
= SWAP_BE16 (vcb
->nextAllocation
);
3687 mdb
->drClpSiz
= SWAP_BE32 (vcb
->vcbClpSiz
);
3688 mdb
->drNxtCNID
= SWAP_BE32 (vcb
->vcbNxtCNID
);
3689 mdb
->drFreeBks
= SWAP_BE16 (vcb
->freeBlocks
);
3691 namelen
= strlen((char *)vcb
->vcbVN
);
3692 retval
= utf8_to_hfs(vcb
, namelen
, vcb
->vcbVN
, mdb
->drVN
);
3693 /* Retry with MacRoman in case that's how it was exported. */
3695 retval
= utf8_to_mac_roman(namelen
, vcb
->vcbVN
, mdb
->drVN
);
3697 mdb
->drVolBkUp
= SWAP_BE32 (UTCToLocal(to_hfs_time(vcb
->vcbVolBkUp
)));
3698 mdb
->drWrCnt
= SWAP_BE32 (vcb
->vcbWrCnt
);
3699 mdb
->drNmRtDirs
= SWAP_BE16 (vcb
->vcbNmRtDirs
);
3700 mdb
->drFilCnt
= SWAP_BE32 (vcb
->vcbFilCnt
);
3701 mdb
->drDirCnt
= SWAP_BE32 (vcb
->vcbDirCnt
);
3703 bcopy(vcb
->vcbFndrInfo
, mdb
->drFndrInfo
, sizeof(mdb
->drFndrInfo
));
3705 fp
= VTOF(vcb
->extentsRefNum
);
3706 mdb
->drXTExtRec
[0].startBlock
= SWAP_BE16 (fp
->ff_extents
[0].startBlock
);
3707 mdb
->drXTExtRec
[0].blockCount
= SWAP_BE16 (fp
->ff_extents
[0].blockCount
);
3708 mdb
->drXTExtRec
[1].startBlock
= SWAP_BE16 (fp
->ff_extents
[1].startBlock
);
3709 mdb
->drXTExtRec
[1].blockCount
= SWAP_BE16 (fp
->ff_extents
[1].blockCount
);
3710 mdb
->drXTExtRec
[2].startBlock
= SWAP_BE16 (fp
->ff_extents
[2].startBlock
);
3711 mdb
->drXTExtRec
[2].blockCount
= SWAP_BE16 (fp
->ff_extents
[2].blockCount
);
3712 mdb
->drXTFlSize
= SWAP_BE32 (fp
->ff_blocks
* vcb
->blockSize
);
3713 mdb
->drXTClpSiz
= SWAP_BE32 (fp
->ff_clumpsize
);
3714 FTOC(fp
)->c_flag
&= ~C_MODIFIED
;
3716 fp
= VTOF(vcb
->catalogRefNum
);
3717 mdb
->drCTExtRec
[0].startBlock
= SWAP_BE16 (fp
->ff_extents
[0].startBlock
);
3718 mdb
->drCTExtRec
[0].blockCount
= SWAP_BE16 (fp
->ff_extents
[0].blockCount
);
3719 mdb
->drCTExtRec
[1].startBlock
= SWAP_BE16 (fp
->ff_extents
[1].startBlock
);
3720 mdb
->drCTExtRec
[1].blockCount
= SWAP_BE16 (fp
->ff_extents
[1].blockCount
);
3721 mdb
->drCTExtRec
[2].startBlock
= SWAP_BE16 (fp
->ff_extents
[2].startBlock
);
3722 mdb
->drCTExtRec
[2].blockCount
= SWAP_BE16 (fp
->ff_extents
[2].blockCount
);
3723 mdb
->drCTFlSize
= SWAP_BE32 (fp
->ff_blocks
* vcb
->blockSize
);
3724 mdb
->drCTClpSiz
= SWAP_BE32 (fp
->ff_clumpsize
);
3725 FTOC(fp
)->c_flag
&= ~C_MODIFIED
;
3727 MarkVCBClean( vcb
);
3729 hfs_unlock_mount (hfsmp
);
3731 /* If requested, flush out the alternate MDB */
3733 struct buf
*alt_bp
= NULL
;
3735 if (buf_meta_bread(hfsmp
->hfs_devvp
, hfsmp
->hfs_partition_avh_sector
, sector_size
, NOCRED
, &alt_bp
) == 0) {
3736 bcopy(mdb
, (char *)buf_dataptr(alt_bp
) + HFS_ALT_OFFSET(sector_size
), kMDBSize
);
3738 (void) VNOP_BWRITE(alt_bp
);
3743 if (waitfor
!= MNT_WAIT
)
3746 retval
= VNOP_BWRITE(bp
);
3753 * Flush any dirty in-memory mount data to the on-disk
3756 * Note: the on-disk volume signature is intentionally
3757 * not flushed since the on-disk "H+" and "HX" signatures
3758 * are always stored in-memory as "H+".
3761 hfs_flushvolumeheader(struct hfsmount
*hfsmp
,
3762 hfs_flush_volume_header_options_t options
)
3764 ExtendedVCB
*vcb
= HFSTOVCB(hfsmp
);
3765 struct filefork
*fp
;
3766 HFSPlusVolumeHeader
*volumeHeader
, *altVH
;
3768 struct buf
*bp
, *alt_bp
;
3770 daddr64_t priIDSector
;
3771 bool critical
= false;
3772 u_int16_t signature
;
3773 u_int16_t hfsversion
;
3774 daddr64_t avh_sector
;
3775 bool altflush
= ISSET(options
, HFS_FVH_WRITE_ALT
);
3777 if (ISSET(options
, HFS_FVH_FLUSH_IF_DIRTY
)
3778 && !hfs_header_needs_flushing(hfsmp
)) {
3782 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
3786 if (hfsmp
->hfs_flags
& HFS_STANDARD
) {
3787 return hfs_flushMDB(hfsmp
, ISSET(options
, HFS_FVH_WAIT
) ? MNT_WAIT
: 0, altflush
);
3790 priIDSector
= (daddr64_t
)((vcb
->hfsPlusIOPosOffset
/ hfsmp
->hfs_logical_block_size
) +
3791 HFS_PRI_SECTOR(hfsmp
->hfs_logical_block_size
));
3793 if (hfs_start_transaction(hfsmp
) != 0) {
3800 retval
= (int)buf_meta_bread(hfsmp
->hfs_devvp
,
3801 HFS_PHYSBLK_ROUNDDOWN(priIDSector
, hfsmp
->hfs_log_per_phys
),
3802 hfsmp
->hfs_physical_block_size
, NOCRED
, &bp
);
3804 printf("hfs: err %d reading VH blk (vol=%s)\n", retval
, vcb
->vcbVN
);
3808 volumeHeader
= (HFSPlusVolumeHeader
*)((char *)buf_dataptr(bp
) +
3809 HFS_PRI_OFFSET(hfsmp
->hfs_physical_block_size
));
3812 * Sanity check what we just read. If it's bad, try the alternate
3815 signature
= SWAP_BE16 (volumeHeader
->signature
);
3816 hfsversion
= SWAP_BE16 (volumeHeader
->version
);
3817 if ((signature
!= kHFSPlusSigWord
&& signature
!= kHFSXSigWord
) ||
3818 (hfsversion
< kHFSPlusVersion
) || (hfsversion
> 100) ||
3819 (SWAP_BE32 (volumeHeader
->blockSize
) != vcb
->blockSize
)) {
3820 printf("hfs: corrupt VH on %s, sig 0x%04x, ver %d, blksize %d\n",
3821 vcb
->vcbVN
, signature
, hfsversion
,
3822 SWAP_BE32 (volumeHeader
->blockSize
));
3823 hfs_mark_inconsistent(hfsmp
, HFS_INCONSISTENCY_DETECTED
);
3825 /* Almost always we read AVH relative to the partition size */
3826 avh_sector
= hfsmp
->hfs_partition_avh_sector
;
3828 if (hfsmp
->hfs_partition_avh_sector
!= hfsmp
->hfs_fs_avh_sector
) {
3830 * The two altVH offsets do not match --- which means that a smaller file
3831 * system exists in a larger partition. Verify that we have the correct
3832 * alternate volume header sector as per the current parititon size.
3833 * The GPT device that we are mounted on top could have changed sizes
3834 * without us knowing.
3836 * We're in a transaction, so it's safe to modify the partition_avh_sector
3837 * field if necessary.
3840 uint64_t sector_count
;
3842 /* Get underlying device block count */
3843 if ((retval
= VNOP_IOCTL(hfsmp
->hfs_devvp
, DKIOCGETBLOCKCOUNT
,
3844 (caddr_t
)§or_count
, 0, vfs_context_current()))) {
3845 printf("hfs_flushVH: err %d getting block count (%s) \n", retval
, vcb
->vcbVN
);
3850 /* Partition size was changed without our knowledge */
3851 if (sector_count
!= (uint64_t)hfsmp
->hfs_logical_block_count
) {
3852 hfsmp
->hfs_partition_avh_sector
= (hfsmp
->hfsPlusIOPosOffset
/ hfsmp
->hfs_logical_block_size
) +
3853 HFS_ALT_SECTOR(hfsmp
->hfs_logical_block_size
, sector_count
);
3854 /* Note: hfs_fs_avh_sector will remain unchanged */
3855 printf ("hfs_flushVH: partition size changed, partition_avh_sector=%qu, fs_avh_sector=%qu\n",
3856 hfsmp
->hfs_partition_avh_sector
, hfsmp
->hfs_fs_avh_sector
);
3859 * We just updated the offset for AVH relative to
3860 * the partition size, so the content of that AVH
3861 * will be invalid. But since we are also maintaining
3862 * a valid AVH relative to the file system size, we
3863 * can read it since primary VH and partition AVH
3866 avh_sector
= hfsmp
->hfs_fs_avh_sector
;
3870 printf ("hfs: trying alternate (for %s) avh_sector=%qu\n",
3871 (avh_sector
== hfsmp
->hfs_fs_avh_sector
) ? "file system" : "partition", avh_sector
);
3874 retval
= buf_meta_bread(hfsmp
->hfs_devvp
,
3875 HFS_PHYSBLK_ROUNDDOWN(avh_sector
, hfsmp
->hfs_log_per_phys
),
3876 hfsmp
->hfs_physical_block_size
, NOCRED
, &alt_bp
);
3878 printf("hfs: err %d reading alternate VH (%s)\n", retval
, vcb
->vcbVN
);
3882 altVH
= (HFSPlusVolumeHeader
*)((char *)buf_dataptr(alt_bp
) +
3883 HFS_ALT_OFFSET(hfsmp
->hfs_physical_block_size
));
3884 signature
= SWAP_BE16(altVH
->signature
);
3885 hfsversion
= SWAP_BE16(altVH
->version
);
3887 if ((signature
!= kHFSPlusSigWord
&& signature
!= kHFSXSigWord
) ||
3888 (hfsversion
< kHFSPlusVersion
) || (kHFSPlusVersion
> 100) ||
3889 (SWAP_BE32(altVH
->blockSize
) != vcb
->blockSize
)) {
3890 printf("hfs: corrupt alternate VH on %s, sig 0x%04x, ver %d, blksize %d\n",
3891 vcb
->vcbVN
, signature
, hfsversion
,
3892 SWAP_BE32(altVH
->blockSize
));
3897 /* The alternate is plausible, so use it. */
3898 bcopy(altVH
, volumeHeader
, kMDBSize
);
3902 /* No alternate VH, nothing more we can do. */
3909 journal_modify_block_start(hfsmp
->jnl
, bp
);
3913 * For embedded HFS+ volumes, update create date if it changed
3914 * (ie from a setattrlist call)
3916 if ((vcb
->hfsPlusIOPosOffset
!= 0) &&
3917 (SWAP_BE32 (volumeHeader
->createDate
) != vcb
->localCreateDate
)) {
3919 HFSMasterDirectoryBlock
*mdb
;
3921 retval
= (int)buf_meta_bread(hfsmp
->hfs_devvp
,
3922 HFS_PHYSBLK_ROUNDDOWN(HFS_PRI_SECTOR(hfsmp
->hfs_logical_block_size
), hfsmp
->hfs_log_per_phys
),
3923 hfsmp
->hfs_physical_block_size
, NOCRED
, &bp2
);
3929 mdb
= (HFSMasterDirectoryBlock
*)(buf_dataptr(bp2
) +
3930 HFS_PRI_OFFSET(hfsmp
->hfs_physical_block_size
));
3932 if ( SWAP_BE32 (mdb
->drCrDate
) != vcb
->localCreateDate
)
3935 journal_modify_block_start(hfsmp
->jnl
, bp2
);
3938 mdb
->drCrDate
= SWAP_BE32 (vcb
->localCreateDate
); /* pick up the new create date */
3941 journal_modify_block_end(hfsmp
->jnl
, bp2
, NULL
, NULL
);
3943 (void) VNOP_BWRITE(bp2
); /* write out the changes */
3948 buf_brelse(bp2
); /* just release it */
3953 hfs_lock_mount (hfsmp
);
3955 /* Note: only update the lower 16 bits worth of attributes */
3956 volumeHeader
->attributes
= SWAP_BE32 (vcb
->vcbAtrb
);
3957 volumeHeader
->journalInfoBlock
= SWAP_BE32 (vcb
->vcbJinfoBlock
);
3959 volumeHeader
->lastMountedVersion
= SWAP_BE32 (kHFSJMountVersion
);
3961 volumeHeader
->lastMountedVersion
= SWAP_BE32 (kHFSPlusMountVersion
);
3963 volumeHeader
->createDate
= SWAP_BE32 (vcb
->localCreateDate
); /* volume create date is in local time */
3964 volumeHeader
->modifyDate
= SWAP_BE32 (to_hfs_time(vcb
->vcbLsMod
));
3965 volumeHeader
->backupDate
= SWAP_BE32 (to_hfs_time(vcb
->vcbVolBkUp
));
3966 volumeHeader
->fileCount
= SWAP_BE32 (vcb
->vcbFilCnt
);
3967 volumeHeader
->folderCount
= SWAP_BE32 (vcb
->vcbDirCnt
);
3968 volumeHeader
->totalBlocks
= SWAP_BE32 (vcb
->totalBlocks
);
3969 volumeHeader
->freeBlocks
= SWAP_BE32 (vcb
->freeBlocks
+ vcb
->reclaimBlocks
);
3970 volumeHeader
->nextAllocation
= SWAP_BE32 (vcb
->nextAllocation
);
3971 volumeHeader
->rsrcClumpSize
= SWAP_BE32 (vcb
->vcbClpSiz
);
3972 volumeHeader
->dataClumpSize
= SWAP_BE32 (vcb
->vcbClpSiz
);
3973 volumeHeader
->nextCatalogID
= SWAP_BE32 (vcb
->vcbNxtCNID
);
3974 volumeHeader
->writeCount
= SWAP_BE32 (vcb
->vcbWrCnt
);
3975 volumeHeader
->encodingsBitmap
= SWAP_BE64 (vcb
->encodingsBitmap
);
3977 if (bcmp(vcb
->vcbFndrInfo
, volumeHeader
->finderInfo
, sizeof(volumeHeader
->finderInfo
)) != 0) {
3978 bcopy(vcb
->vcbFndrInfo
, volumeHeader
->finderInfo
, sizeof(volumeHeader
->finderInfo
));
3982 if (!altflush
&& !ISSET(options
, HFS_FVH_FLUSH_IF_DIRTY
)) {
3986 /* Sync Extents over-flow file meta data */
3987 fp
= VTOF(vcb
->extentsRefNum
);
3988 if (FTOC(fp
)->c_flag
& C_MODIFIED
) {
3989 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
3990 volumeHeader
->extentsFile
.extents
[i
].startBlock
=
3991 SWAP_BE32 (fp
->ff_extents
[i
].startBlock
);
3992 volumeHeader
->extentsFile
.extents
[i
].blockCount
=
3993 SWAP_BE32 (fp
->ff_extents
[i
].blockCount
);
3995 volumeHeader
->extentsFile
.logicalSize
= SWAP_BE64 (fp
->ff_size
);
3996 volumeHeader
->extentsFile
.totalBlocks
= SWAP_BE32 (fp
->ff_blocks
);
3997 volumeHeader
->extentsFile
.clumpSize
= SWAP_BE32 (fp
->ff_clumpsize
);
3998 FTOC(fp
)->c_flag
&= ~C_MODIFIED
;
4002 /* Sync Catalog file meta data */
4003 fp
= VTOF(vcb
->catalogRefNum
);
4004 if (FTOC(fp
)->c_flag
& C_MODIFIED
) {
4005 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
4006 volumeHeader
->catalogFile
.extents
[i
].startBlock
=
4007 SWAP_BE32 (fp
->ff_extents
[i
].startBlock
);
4008 volumeHeader
->catalogFile
.extents
[i
].blockCount
=
4009 SWAP_BE32 (fp
->ff_extents
[i
].blockCount
);
4011 volumeHeader
->catalogFile
.logicalSize
= SWAP_BE64 (fp
->ff_size
);
4012 volumeHeader
->catalogFile
.totalBlocks
= SWAP_BE32 (fp
->ff_blocks
);
4013 volumeHeader
->catalogFile
.clumpSize
= SWAP_BE32 (fp
->ff_clumpsize
);
4014 FTOC(fp
)->c_flag
&= ~C_MODIFIED
;
4018 /* Sync Allocation file meta data */
4019 fp
= VTOF(vcb
->allocationsRefNum
);
4020 if (FTOC(fp
)->c_flag
& C_MODIFIED
) {
4021 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
4022 volumeHeader
->allocationFile
.extents
[i
].startBlock
=
4023 SWAP_BE32 (fp
->ff_extents
[i
].startBlock
);
4024 volumeHeader
->allocationFile
.extents
[i
].blockCount
=
4025 SWAP_BE32 (fp
->ff_extents
[i
].blockCount
);
4027 volumeHeader
->allocationFile
.logicalSize
= SWAP_BE64 (fp
->ff_size
);
4028 volumeHeader
->allocationFile
.totalBlocks
= SWAP_BE32 (fp
->ff_blocks
);
4029 volumeHeader
->allocationFile
.clumpSize
= SWAP_BE32 (fp
->ff_clumpsize
);
4030 FTOC(fp
)->c_flag
&= ~C_MODIFIED
;
4034 /* Sync Attribute file meta data */
4035 if (hfsmp
->hfs_attribute_vp
) {
4036 fp
= VTOF(hfsmp
->hfs_attribute_vp
);
4037 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
4038 volumeHeader
->attributesFile
.extents
[i
].startBlock
=
4039 SWAP_BE32 (fp
->ff_extents
[i
].startBlock
);
4040 volumeHeader
->attributesFile
.extents
[i
].blockCount
=
4041 SWAP_BE32 (fp
->ff_extents
[i
].blockCount
);
4043 if (ISSET(FTOC(fp
)->c_flag
, C_MODIFIED
)) {
4044 FTOC(fp
)->c_flag
&= ~C_MODIFIED
;
4047 volumeHeader
->attributesFile
.logicalSize
= SWAP_BE64 (fp
->ff_size
);
4048 volumeHeader
->attributesFile
.totalBlocks
= SWAP_BE32 (fp
->ff_blocks
);
4049 volumeHeader
->attributesFile
.clumpSize
= SWAP_BE32 (fp
->ff_clumpsize
);
4052 /* Sync Startup file meta data */
4053 if (hfsmp
->hfs_startup_vp
) {
4054 fp
= VTOF(hfsmp
->hfs_startup_vp
);
4055 if (FTOC(fp
)->c_flag
& C_MODIFIED
) {
4056 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
4057 volumeHeader
->startupFile
.extents
[i
].startBlock
=
4058 SWAP_BE32 (fp
->ff_extents
[i
].startBlock
);
4059 volumeHeader
->startupFile
.extents
[i
].blockCount
=
4060 SWAP_BE32 (fp
->ff_extents
[i
].blockCount
);
4062 volumeHeader
->startupFile
.logicalSize
= SWAP_BE64 (fp
->ff_size
);
4063 volumeHeader
->startupFile
.totalBlocks
= SWAP_BE32 (fp
->ff_blocks
);
4064 volumeHeader
->startupFile
.clumpSize
= SWAP_BE32 (fp
->ff_clumpsize
);
4065 FTOC(fp
)->c_flag
&= ~C_MODIFIED
;
4074 MarkVCBClean(hfsmp
);
4075 hfs_unlock_mount (hfsmp
);
4077 /* If requested, flush out the alternate volume header */
4080 * The two altVH offsets do not match --- which means that a smaller file
4081 * system exists in a larger partition. Verify that we have the correct
4082 * alternate volume header sector as per the current parititon size.
4083 * The GPT device that we are mounted on top could have changed sizes
4084 * without us knowning.
4086 * We're in a transaction, so it's safe to modify the partition_avh_sector
4087 * field if necessary.
4089 if (hfsmp
->hfs_partition_avh_sector
!= hfsmp
->hfs_fs_avh_sector
) {
4090 uint64_t sector_count
;
4092 /* Get underlying device block count */
4093 if ((retval
= VNOP_IOCTL(hfsmp
->hfs_devvp
, DKIOCGETBLOCKCOUNT
,
4094 (caddr_t
)§or_count
, 0, vfs_context_current()))) {
4095 printf("hfs_flushVH: err %d getting block count (%s) \n", retval
, vcb
->vcbVN
);
4100 /* Partition size was changed without our knowledge */
4101 if (sector_count
!= (uint64_t)hfsmp
->hfs_logical_block_count
) {
4102 hfsmp
->hfs_partition_avh_sector
= (hfsmp
->hfsPlusIOPosOffset
/ hfsmp
->hfs_logical_block_size
) +
4103 HFS_ALT_SECTOR(hfsmp
->hfs_logical_block_size
, sector_count
);
4104 /* Note: hfs_fs_avh_sector will remain unchanged */
4105 printf ("hfs_flushVH: altflush: partition size changed, partition_avh_sector=%qu, fs_avh_sector=%qu\n",
4106 hfsmp
->hfs_partition_avh_sector
, hfsmp
->hfs_fs_avh_sector
);
4111 * First see if we need to write I/O to the "secondary" AVH
4112 * located at FS Size - 1024 bytes, because this one will
4113 * always go into the journal. We put this AVH into the journal
4114 * because even if the filesystem size has shrunk, this LBA should be
4115 * reachable after the partition-size modification has occurred.
4116 * The one where we need to be careful is partitionsize-1024, since the
4117 * partition size should hopefully shrink.
4119 * Most of the time this block will not execute.
4121 if ((hfsmp
->hfs_fs_avh_sector
) &&
4122 (hfsmp
->hfs_partition_avh_sector
!= hfsmp
->hfs_fs_avh_sector
)) {
4123 if (buf_meta_bread(hfsmp
->hfs_devvp
,
4124 HFS_PHYSBLK_ROUNDDOWN(hfsmp
->hfs_fs_avh_sector
, hfsmp
->hfs_log_per_phys
),
4125 hfsmp
->hfs_physical_block_size
, NOCRED
, &alt_bp
) == 0) {
4127 journal_modify_block_start(hfsmp
->jnl
, alt_bp
);
4130 bcopy(volumeHeader
, (char *)buf_dataptr(alt_bp
) +
4131 HFS_ALT_OFFSET(hfsmp
->hfs_physical_block_size
),
4135 journal_modify_block_end(hfsmp
->jnl
, alt_bp
, NULL
, NULL
);
4137 (void) VNOP_BWRITE(alt_bp
);
4139 } else if (alt_bp
) {
4145 * Flush out alternate volume header located at 1024 bytes before
4146 * end of the partition as part of journal transaction. In
4147 * most cases, this will be the only alternate volume header
4148 * that we need to worry about because the file system size is
4149 * same as the partition size, therefore hfs_fs_avh_sector is
4150 * same as hfs_partition_avh_sector. This is the "priority" AVH.
4152 * However, do not always put this I/O into the journal. If we skipped the
4153 * FS-Size AVH write above, then we will put this I/O into the journal as
4154 * that indicates the two were in sync. However, if the FS size is
4155 * not the same as the partition size, we are tracking two. We don't
4156 * put it in the journal in that case, since if the partition
4157 * size changes between uptimes, and we need to replay the journal,
4158 * this I/O could generate an EIO if during replay it is now trying
4159 * to access blocks beyond the device EOF.
4161 if (hfsmp
->hfs_partition_avh_sector
) {
4162 if (buf_meta_bread(hfsmp
->hfs_devvp
,
4163 HFS_PHYSBLK_ROUNDDOWN(hfsmp
->hfs_partition_avh_sector
, hfsmp
->hfs_log_per_phys
),
4164 hfsmp
->hfs_physical_block_size
, NOCRED
, &alt_bp
) == 0) {
4166 /* only one AVH, put this I/O in the journal. */
4167 if ((hfsmp
->jnl
) && (hfsmp
->hfs_partition_avh_sector
== hfsmp
->hfs_fs_avh_sector
)) {
4168 journal_modify_block_start(hfsmp
->jnl
, alt_bp
);
4171 bcopy(volumeHeader
, (char *)buf_dataptr(alt_bp
) +
4172 HFS_ALT_OFFSET(hfsmp
->hfs_physical_block_size
),
4175 /* If journaled and we only have one AVH to track */
4176 if ((hfsmp
->jnl
) && (hfsmp
->hfs_partition_avh_sector
== hfsmp
->hfs_fs_avh_sector
)) {
4177 journal_modify_block_end (hfsmp
->jnl
, alt_bp
, NULL
, NULL
);
4180 * If we don't have a journal or there are two AVH's at the
4181 * moment, then this one doesn't go in the journal. Note that
4182 * this one may generate I/O errors, since the partition
4183 * can be resized behind our backs at any moment and this I/O
4184 * may now appear to be beyond the device EOF.
4186 (void) VNOP_BWRITE(alt_bp
);
4187 hfs_flush(hfsmp
, HFS_FLUSH_CACHE
);
4189 } else if (alt_bp
) {
4195 /* Finish modifying the block for the primary VH */
4197 journal_modify_block_end(hfsmp
->jnl
, bp
, NULL
, NULL
);
4199 if (!ISSET(options
, HFS_FVH_WAIT
)) {
4202 retval
= VNOP_BWRITE(bp
);
4203 /* When critical data changes, flush the device cache */
4204 if (critical
&& (retval
== 0)) {
4205 hfs_flush(hfsmp
, HFS_FLUSH_CACHE
);
4209 hfs_end_transaction(hfsmp
);
4218 hfs_end_transaction(hfsmp
);
4224 * Creates a UUID from a unique "name" in the HFS UUID Name space.
4225 * See version 3 UUID.
4228 hfs_getvoluuid(struct hfsmount
*hfsmp
, uuid_t result_uuid
)
4231 if (uuid_is_null(hfsmp
->hfs_full_uuid
)) {
4237 ((uint32_t *)rawUUID
)[0] = hfsmp
->vcbFndrInfo
[6];
4238 ((uint32_t *)rawUUID
)[1] = hfsmp
->vcbFndrInfo
[7];
4241 MD5Update( &md5c
, HFS_UUID_NAMESPACE_ID
, sizeof( uuid_t
) );
4242 MD5Update( &md5c
, rawUUID
, sizeof (rawUUID
) );
4243 MD5Final( result
, &md5c
);
4245 result
[6] = 0x30 | ( result
[6] & 0x0F );
4246 result
[8] = 0x80 | ( result
[8] & 0x3F );
4248 uuid_copy(hfsmp
->hfs_full_uuid
, result
);
4250 uuid_copy (result_uuid
, hfsmp
->hfs_full_uuid
);
4255 * Get file system attributes.
4258 hfs_vfs_getattr(struct mount
*mp
, struct vfs_attr
*fsap
, __unused vfs_context_t context
)
4260 #define HFS_ATTR_FILE_VALIDMASK (ATTR_FILE_VALIDMASK & ~(ATTR_FILE_FILETYPE | ATTR_FILE_FORKCOUNT | ATTR_FILE_FORKLIST | ATTR_FILE_CLUMPSIZE))
4261 #define HFS_ATTR_CMN_VOL_VALIDMASK (ATTR_CMN_VALIDMASK & ~(ATTR_CMN_DATA_PROTECT_FLAGS))
4263 ExtendedVCB
*vcb
= VFSTOVCB(mp
);
4264 struct hfsmount
*hfsmp
= VFSTOHFS(mp
);
4266 int searchfs_on
= 0;
4267 int exchangedata_on
= 1;
4274 if (cp_fs_protected(mp
)) {
4275 exchangedata_on
= 0;
4280 * Some of these attributes can be expensive to query if we're
4281 * backed by a disk image; hfs_freeblks() has to ask the backing
4282 * store, and this might involve a trip to a network file server.
4283 * Only ask for them if the caller really wants them. Preserve old
4284 * behavior for file systems not backed by a disk image.
4287 const int diskimage
= (hfsmp
->hfs_backingvp
!= NULL
);
4289 const int diskimage
= 0;
4292 VFSATTR_RETURN(fsap
, f_objcount
, (u_int64_t
)hfsmp
->vcbFilCnt
+ (u_int64_t
)hfsmp
->vcbDirCnt
);
4293 VFSATTR_RETURN(fsap
, f_filecount
, (u_int64_t
)hfsmp
->vcbFilCnt
);
4294 VFSATTR_RETURN(fsap
, f_dircount
, (u_int64_t
)hfsmp
->vcbDirCnt
);
4295 VFSATTR_RETURN(fsap
, f_maxobjcount
, (u_int64_t
)0xFFFFFFFF);
4296 VFSATTR_RETURN(fsap
, f_iosize
, (size_t)cluster_max_io_size(mp
, 0));
4297 VFSATTR_RETURN(fsap
, f_blocks
, (u_int64_t
)hfsmp
->totalBlocks
);
4298 if (VFSATTR_WANTED(fsap
, f_bfree
) || !diskimage
) {
4299 VFSATTR_RETURN(fsap
, f_bfree
, (u_int64_t
)hfs_freeblks(hfsmp
, 0));
4301 if (VFSATTR_WANTED(fsap
, f_bavail
) || !diskimage
) {
4302 VFSATTR_RETURN(fsap
, f_bavail
, (u_int64_t
)hfs_freeblks(hfsmp
, 1));
4304 VFSATTR_RETURN(fsap
, f_bsize
, (u_int32_t
)vcb
->blockSize
);
4305 /* XXX needs clarification */
4306 if (VFSATTR_WANTED(fsap
, f_bused
) || !diskimage
) {
4307 VFSATTR_RETURN(fsap
, f_bused
, hfsmp
->totalBlocks
- hfs_freeblks(hfsmp
, 1));
4309 VFSATTR_RETURN(fsap
, f_files
, (u_int64_t
)HFS_MAX_FILES
);
4310 VFSATTR_RETURN(fsap
, f_ffree
, (u_int64_t
)hfs_free_cnids(hfsmp
));
4312 fsap
->f_fsid
.val
[0] = hfsmp
->hfs_raw_dev
;
4313 fsap
->f_fsid
.val
[1] = vfs_typenum(mp
);
4314 VFSATTR_SET_SUPPORTED(fsap
, f_fsid
);
4316 VFSATTR_RETURN(fsap
, f_signature
, vcb
->vcbSigWord
);
4317 VFSATTR_RETURN(fsap
, f_carbon_fsid
, 0);
4319 if (VFSATTR_IS_ACTIVE(fsap
, f_capabilities
)) {
4320 vol_capabilities_attr_t
*cap
;
4322 cap
= &fsap
->f_capabilities
;
4324 if ((hfsmp
->hfs_flags
& HFS_STANDARD
) == 0) {
4325 /* HFS+ & variants */
4326 cap
->capabilities
[VOL_CAPABILITIES_FORMAT
] =
4327 VOL_CAP_FMT_PERSISTENTOBJECTIDS
|
4328 VOL_CAP_FMT_SYMBOLICLINKS
|
4329 VOL_CAP_FMT_HARDLINKS
|
4330 VOL_CAP_FMT_JOURNAL
|
4331 VOL_CAP_FMT_ZERO_RUNS
|
4332 (hfsmp
->jnl
? VOL_CAP_FMT_JOURNAL_ACTIVE
: 0) |
4333 (hfsmp
->hfs_flags
& HFS_CASE_SENSITIVE
? VOL_CAP_FMT_CASE_SENSITIVE
: 0) |
4334 VOL_CAP_FMT_CASE_PRESERVING
|
4335 VOL_CAP_FMT_FAST_STATFS
|
4336 VOL_CAP_FMT_2TB_FILESIZE
|
4337 VOL_CAP_FMT_HIDDEN_FILES
|
4339 VOL_CAP_FMT_DECMPFS_COMPRESSION
|
4341 #if CONFIG_HFS_DIRLINK
4342 VOL_CAP_FMT_DIR_HARDLINKS
|
4344 #ifdef VOL_CAP_FMT_DOCUMENT_ID
4345 VOL_CAP_FMT_DOCUMENT_ID
|
4346 #endif /* VOL_CAP_FMT_DOCUMENT_ID */
4347 #ifdef VOL_CAP_FMT_WRITE_GENERATION_COUNT
4348 VOL_CAP_FMT_WRITE_GENERATION_COUNT
|
4349 #endif /* VOL_CAP_FMT_WRITE_GENERATION_COUNT */
4350 VOL_CAP_FMT_PATH_FROM_ID
;
4355 cap
->capabilities
[VOL_CAPABILITIES_FORMAT
] =
4356 VOL_CAP_FMT_PERSISTENTOBJECTIDS
|
4357 VOL_CAP_FMT_CASE_PRESERVING
|
4358 VOL_CAP_FMT_FAST_STATFS
|
4359 VOL_CAP_FMT_HIDDEN_FILES
|
4360 VOL_CAP_FMT_PATH_FROM_ID
;
4365 * The capabilities word in 'cap' tell you whether or not
4366 * this particular filesystem instance has feature X enabled.
4369 cap
->capabilities
[VOL_CAPABILITIES_INTERFACES
] =
4370 VOL_CAP_INT_ATTRLIST
|
4371 VOL_CAP_INT_NFSEXPORT
|
4372 VOL_CAP_INT_READDIRATTR
|
4373 VOL_CAP_INT_ALLOCATE
|
4374 VOL_CAP_INT_VOL_RENAME
|
4375 VOL_CAP_INT_ADVLOCK
|
4377 #if VOL_CAP_INT_RENAME_EXCL
4378 VOL_CAP_INT_RENAME_EXCL
|
4381 VOL_CAP_INT_EXTENDED_ATTR
|
4382 VOL_CAP_INT_NAMEDSTREAMS
;
4384 VOL_CAP_INT_EXTENDED_ATTR
;
4387 /* HFS may conditionally support searchfs and exchangedata depending on the runtime */
4390 cap
->capabilities
[VOL_CAPABILITIES_INTERFACES
] |= VOL_CAP_INT_SEARCHFS
;
4392 if (exchangedata_on
) {
4393 cap
->capabilities
[VOL_CAPABILITIES_INTERFACES
] |= VOL_CAP_INT_EXCHANGEDATA
;
4396 cap
->capabilities
[VOL_CAPABILITIES_RESERVED1
] = 0;
4397 cap
->capabilities
[VOL_CAPABILITIES_RESERVED2
] = 0;
4399 cap
->valid
[VOL_CAPABILITIES_FORMAT
] =
4400 VOL_CAP_FMT_PERSISTENTOBJECTIDS
|
4401 VOL_CAP_FMT_SYMBOLICLINKS
|
4402 VOL_CAP_FMT_HARDLINKS
|
4403 VOL_CAP_FMT_JOURNAL
|
4404 VOL_CAP_FMT_JOURNAL_ACTIVE
|
4405 VOL_CAP_FMT_NO_ROOT_TIMES
|
4406 VOL_CAP_FMT_SPARSE_FILES
|
4407 VOL_CAP_FMT_ZERO_RUNS
|
4408 VOL_CAP_FMT_CASE_SENSITIVE
|
4409 VOL_CAP_FMT_CASE_PRESERVING
|
4410 VOL_CAP_FMT_FAST_STATFS
|
4411 VOL_CAP_FMT_2TB_FILESIZE
|
4412 VOL_CAP_FMT_OPENDENYMODES
|
4413 VOL_CAP_FMT_HIDDEN_FILES
|
4414 VOL_CAP_FMT_PATH_FROM_ID
|
4415 VOL_CAP_FMT_DECMPFS_COMPRESSION
|
4416 #ifdef VOL_CAP_FMT_DOCUMENT_ID
4417 VOL_CAP_FMT_DOCUMENT_ID
|
4418 #endif /* VOL_CAP_FMT_DOCUMENT_ID */
4419 #ifdef VOL_CAP_FMT_WRITE_GENERATION_COUNT
4420 VOL_CAP_FMT_WRITE_GENERATION_COUNT
|
4421 #endif /* VOL_CAP_FMT_WRITE_GENERATION_COUNT */
4422 VOL_CAP_FMT_DIR_HARDLINKS
;
4425 * Bits in the "valid" field tell you whether or not the on-disk
4426 * format supports feature X.
4429 cap
->valid
[VOL_CAPABILITIES_INTERFACES
] =
4430 VOL_CAP_INT_ATTRLIST
|
4431 VOL_CAP_INT_NFSEXPORT
|
4432 VOL_CAP_INT_READDIRATTR
|
4433 VOL_CAP_INT_COPYFILE
|
4434 VOL_CAP_INT_ALLOCATE
|
4435 VOL_CAP_INT_VOL_RENAME
|
4436 VOL_CAP_INT_ADVLOCK
|
4438 VOL_CAP_INT_MANLOCK
|
4439 #if VOL_CAP_INT_RENAME_EXCL
4440 VOL_CAP_INT_RENAME_EXCL
|
4444 VOL_CAP_INT_EXTENDED_ATTR
|
4445 VOL_CAP_INT_NAMEDSTREAMS
;
4447 VOL_CAP_INT_EXTENDED_ATTR
;
4450 /* HFS always supports exchangedata and searchfs in the on-disk format natively */
4451 cap
->valid
[VOL_CAPABILITIES_INTERFACES
] |= (VOL_CAP_INT_SEARCHFS
| VOL_CAP_INT_EXCHANGEDATA
);
4454 cap
->valid
[VOL_CAPABILITIES_RESERVED1
] = 0;
4455 cap
->valid
[VOL_CAPABILITIES_RESERVED2
] = 0;
4456 VFSATTR_SET_SUPPORTED(fsap
, f_capabilities
);
4458 if (VFSATTR_IS_ACTIVE(fsap
, f_attributes
)) {
4459 vol_attributes_attr_t
*attrp
= &fsap
->f_attributes
;
4461 attrp
->validattr
.commonattr
= HFS_ATTR_CMN_VOL_VALIDMASK
;
4463 attrp
->validattr
.commonattr
|= ATTR_CMN_DATA_PROTECT_FLAGS
;
4464 #endif // CONFIG_PROTECT
4466 attrp
->validattr
.volattr
= ATTR_VOL_VALIDMASK
& ~ATTR_VOL_INFO
;
4467 attrp
->validattr
.dirattr
= ATTR_DIR_VALIDMASK
;
4468 attrp
->validattr
.fileattr
= HFS_ATTR_FILE_VALIDMASK
;
4469 attrp
->validattr
.forkattr
= 0;
4471 attrp
->nativeattr
.commonattr
= HFS_ATTR_CMN_VOL_VALIDMASK
;
4473 attrp
->nativeattr
.commonattr
|= ATTR_CMN_DATA_PROTECT_FLAGS
;
4474 #endif // CONFIG_PROTECT
4476 attrp
->nativeattr
.volattr
= ATTR_VOL_VALIDMASK
& ~ATTR_VOL_INFO
;
4477 attrp
->nativeattr
.dirattr
= ATTR_DIR_VALIDMASK
;
4478 attrp
->nativeattr
.fileattr
= HFS_ATTR_FILE_VALIDMASK
;
4479 attrp
->nativeattr
.forkattr
= 0;
4480 VFSATTR_SET_SUPPORTED(fsap
, f_attributes
);
4482 fsap
->f_create_time
.tv_sec
= hfsmp
->hfs_itime
;
4483 fsap
->f_create_time
.tv_nsec
= 0;
4484 VFSATTR_SET_SUPPORTED(fsap
, f_create_time
);
4485 fsap
->f_modify_time
.tv_sec
= hfsmp
->vcbLsMod
;
4486 fsap
->f_modify_time
.tv_nsec
= 0;
4487 VFSATTR_SET_SUPPORTED(fsap
, f_modify_time
);
4488 // We really don't have volume access time, they should check the root node, fake it up
4489 if (VFSATTR_IS_ACTIVE(fsap
, f_access_time
)) {
4493 fsap
->f_access_time
.tv_sec
= tv
.tv_sec
;
4494 fsap
->f_access_time
.tv_nsec
= 0;
4495 VFSATTR_SET_SUPPORTED(fsap
, f_access_time
);
4498 fsap
->f_backup_time
.tv_sec
= hfsmp
->vcbVolBkUp
;
4499 fsap
->f_backup_time
.tv_nsec
= 0;
4500 VFSATTR_SET_SUPPORTED(fsap
, f_backup_time
);
4502 if (VFSATTR_IS_ACTIVE(fsap
, f_fssubtype
)) {
4503 u_int16_t subtype
= 0;
4506 * Subtypes (flavors) for HFS
4507 * 0: Mac OS Extended
4508 * 1: Mac OS Extended (Journaled)
4509 * 2: Mac OS Extended (Case Sensitive)
4510 * 3: Mac OS Extended (Case Sensitive, Journaled)
4512 * 128: Mac OS Standard
4515 if ((hfsmp
->hfs_flags
& HFS_STANDARD
) == 0) {
4517 subtype
|= HFS_SUBTYPE_JOURNALED
;
4519 if (hfsmp
->hfs_flags
& HFS_CASE_SENSITIVE
) {
4520 subtype
|= HFS_SUBTYPE_CASESENSITIVE
;
4525 subtype
= HFS_SUBTYPE_STANDARDHFS
;
4528 fsap
->f_fssubtype
= subtype
;
4529 VFSATTR_SET_SUPPORTED(fsap
, f_fssubtype
);
4532 if (VFSATTR_IS_ACTIVE(fsap
, f_vol_name
)) {
4533 strlcpy(fsap
->f_vol_name
, (char *) hfsmp
->vcbVN
, MAXPATHLEN
);
4534 VFSATTR_SET_SUPPORTED(fsap
, f_vol_name
);
4536 if (VFSATTR_IS_ACTIVE(fsap
, f_uuid
)) {
4537 hfs_getvoluuid(hfsmp
, fsap
->f_uuid
);
4538 VFSATTR_SET_SUPPORTED(fsap
, f_uuid
);
4544 * Perform a volume rename. Requires the FS' root vp.
4547 hfs_rename_volume(struct vnode
*vp
, const char *name
, proc_t p
)
4549 ExtendedVCB
*vcb
= VTOVCB(vp
);
4550 struct cnode
*cp
= VTOC(vp
);
4551 struct hfsmount
*hfsmp
= VTOHFS(vp
);
4552 struct cat_desc to_desc
;
4553 struct cat_desc todir_desc
;
4554 struct cat_desc new_desc
;
4555 cat_cookie_t cookie
;
4558 char converted_volname
[256];
4559 size_t volname_length
= 0;
4560 size_t conv_volname_length
= 0;
4564 * Ignore attempts to rename a volume to a zero-length name.
4569 bzero(&to_desc
, sizeof(to_desc
));
4570 bzero(&todir_desc
, sizeof(todir_desc
));
4571 bzero(&new_desc
, sizeof(new_desc
));
4572 bzero(&cookie
, sizeof(cookie
));
4574 todir_desc
.cd_parentcnid
= kHFSRootParentID
;
4575 todir_desc
.cd_cnid
= kHFSRootFolderID
;
4576 todir_desc
.cd_flags
= CD_ISDIR
;
4578 to_desc
.cd_nameptr
= (const u_int8_t
*)name
;
4579 to_desc
.cd_namelen
= strlen(name
);
4580 to_desc
.cd_parentcnid
= kHFSRootParentID
;
4581 to_desc
.cd_cnid
= cp
->c_cnid
;
4582 to_desc
.cd_flags
= CD_ISDIR
;
4584 if ((error
= hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
)) == 0) {
4585 if ((error
= hfs_start_transaction(hfsmp
)) == 0) {
4586 if ((error
= cat_preflight(hfsmp
, CAT_RENAME
, &cookie
, p
)) == 0) {
4587 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
, HFS_EXCLUSIVE_LOCK
);
4589 error
= cat_rename(hfsmp
, &cp
->c_desc
, &todir_desc
, &to_desc
, &new_desc
);
4592 * If successful, update the name in the VCB, ensure it's terminated.
4595 strlcpy((char *)vcb
->vcbVN
, name
, sizeof(vcb
->vcbVN
));
4597 volname_length
= strlen ((const char*)vcb
->vcbVN
);
4598 /* Send the volume name down to CoreStorage if necessary */
4599 error
= utf8_normalizestr(vcb
->vcbVN
, volname_length
, (u_int8_t
*)converted_volname
, &conv_volname_length
, 256, UTF_PRECOMPOSED
);
4601 (void) VNOP_IOCTL (hfsmp
->hfs_devvp
, _DKIOCCSSETLVNAME
, converted_volname
, 0, vfs_context_current());
4606 hfs_systemfile_unlock(hfsmp
, lockflags
);
4607 cat_postflight(hfsmp
, &cookie
, p
);
4611 (void) hfs_flushvolumeheader(hfsmp
, HFS_FVH_WAIT
);
4613 hfs_end_transaction(hfsmp
);
4616 /* Release old allocated name buffer */
4617 if (cp
->c_desc
.cd_flags
& CD_HASBUF
) {
4618 const char *tmp_name
= (const char *)cp
->c_desc
.cd_nameptr
;
4620 cp
->c_desc
.cd_nameptr
= 0;
4621 cp
->c_desc
.cd_namelen
= 0;
4622 cp
->c_desc
.cd_flags
&= ~CD_HASBUF
;
4623 vfs_removename(tmp_name
);
4625 /* Update cnode's catalog descriptor */
4626 replace_desc(cp
, &new_desc
);
4627 vcb
->volumeNameEncodingHint
= new_desc
.cd_encoding
;
4628 cp
->c_touch_chgtime
= TRUE
;
4638 * Get file system attributes.
4641 hfs_vfs_setattr(struct mount
*mp
, struct vfs_attr
*fsap
, vfs_context_t context
)
4643 kauth_cred_t cred
= vfs_context_ucred(context
);
4647 * Must be superuser or owner of filesystem to change volume attributes
4649 if (!kauth_cred_issuser(cred
) && (kauth_cred_getuid(cred
) != vfs_statfs(mp
)->f_owner
))
4652 if (VFSATTR_IS_ACTIVE(fsap
, f_vol_name
)) {
4655 error
= hfs_vfs_root(mp
, &root_vp
, context
);
4659 error
= hfs_rename_volume(root_vp
, fsap
->f_vol_name
, vfs_context_proc(context
));
4660 (void) vnode_put(root_vp
);
4664 VFSATTR_SET_SUPPORTED(fsap
, f_vol_name
);
4671 /* If a runtime corruption is detected, set the volume inconsistent
4672 * bit in the volume attributes. The volume inconsistent bit is a persistent
4673 * bit which represents that the volume is corrupt and needs repair.
4674 * The volume inconsistent bit can be set from the kernel when it detects
4675 * runtime corruption or from file system repair utilities like fsck_hfs when
4676 * a repair operation fails. The bit should be cleared only from file system
4677 * verify/repair utility like fsck_hfs when a verify/repair succeeds.
4679 void hfs_mark_inconsistent(struct hfsmount
*hfsmp
,
4680 hfs_inconsistency_reason_t reason
)
4682 hfs_lock_mount (hfsmp
);
4683 if ((hfsmp
->vcbAtrb
& kHFSVolumeInconsistentMask
) == 0) {
4684 hfsmp
->vcbAtrb
|= kHFSVolumeInconsistentMask
;
4685 MarkVCBDirty(hfsmp
);
4687 if ((hfsmp
->hfs_flags
& HFS_READ_ONLY
)==0) {
4689 case HFS_INCONSISTENCY_DETECTED
:
4690 printf("hfs_mark_inconsistent: Runtime corruption detected on %s, fsck will be forced on next mount.\n",
4693 case HFS_ROLLBACK_FAILED
:
4694 printf("hfs_mark_inconsistent: Failed to roll back; volume `%s' might be inconsistent; fsck will be forced on next mount.\n",
4697 case HFS_OP_INCOMPLETE
:
4698 printf("hfs_mark_inconsistent: Failed to complete operation; volume `%s' might be inconsistent; fsck will be forced on next mount.\n",
4701 case HFS_FSCK_FORCED
:
4702 printf("hfs_mark_inconsistent: fsck requested for `%s'; fsck will be forced on next mount.\n",
4707 hfs_unlock_mount (hfsmp
);
4710 /* Replay the journal on the device node provided. Returns zero if
4711 * journal replay succeeded or no journal was supposed to be replayed.
4713 static int hfs_journal_replay(vnode_t devvp
, vfs_context_t context
)
4718 /* Replay allowed only on raw devices */
4719 if (!vnode_ischr(devvp
) && !vnode_isblk(devvp
))
4722 retval
= hfs_mountfs(devvp
, NULL
, NULL
, /* journal_replay_only: */ 1, context
);
4723 buf_flushdirtyblks(devvp
, TRUE
, 0, "hfs_journal_replay");
4725 /* FSYNC the devnode to be sure all data has been flushed */
4726 error
= VNOP_FSYNC(devvp
, MNT_WAIT
, context
);
4739 hfs_syncer_free(struct hfsmount
*hfsmp
)
4741 if (hfsmp
&& ISSET(hfsmp
->hfs_flags
, HFS_RUN_SYNCER
)) {
4742 hfs_syncer_lock(hfsmp
);
4743 CLR(hfsmp
->hfs_flags
, HFS_RUN_SYNCER
);
4744 hfs_syncer_unlock(hfsmp
);
4746 // Wait for the syncer thread to finish
4747 if (hfsmp
->hfs_syncer_thread
) {
4748 hfs_syncer_wakeup(hfsmp
);
4749 hfs_syncer_lock(hfsmp
);
4750 while (hfsmp
->hfs_syncer_thread
)
4751 hfs_syncer_wait(hfsmp
, NULL
);
4752 hfs_syncer_unlock(hfsmp
);
4757 static int hfs_vfs_ioctl(struct mount
*mp
, u_long command
, caddr_t data
,
4758 __unused
int flags
, __unused vfs_context_t context
)
4762 case FIODEVICELOCKED
:
4763 cp_device_locked_callback(mp
, (cp_lock_state_t
)data
);
4771 * hfs vfs operations.
4773 const struct vfsops hfs_vfsops
= {
4774 .vfs_mount
= hfs_mount
,
4775 .vfs_start
= hfs_start
,
4776 .vfs_unmount
= hfs_unmount
,
4777 .vfs_root
= hfs_vfs_root
,
4778 .vfs_quotactl
= hfs_quotactl
,
4779 .vfs_getattr
= hfs_vfs_getattr
,
4780 .vfs_sync
= hfs_sync
,
4781 .vfs_vget
= hfs_vfs_vget
,
4782 .vfs_fhtovp
= hfs_fhtovp
,
4783 .vfs_vptofh
= hfs_vptofh
,
4784 .vfs_init
= hfs_init
,
4785 .vfs_sysctl
= hfs_sysctl
,
4786 .vfs_setattr
= hfs_vfs_setattr
,
4787 .vfs_ioctl
= hfs_vfs_ioctl
,