2 * Copyright (c) 1999-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1991, 1993, 1994
30 * The Regents of the University of California. All rights reserved.
31 * (c) UNIX System Laboratories, Inc.
32 * All or some portions of this file are derived from material licensed
33 * to the University of California by American Telephone and Telegraph
34 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
35 * the permission of UNIX System Laboratories, Inc.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * derived from @(#)ufs_vfsops.c 8.8 (Berkeley) 5/20/95
68 * (c) Copyright 1997-2002 Apple Computer, Inc. All rights reserved.
70 * hfs_vfsops.c -- VFS layer for loadable HFS file system.
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/kauth.h>
78 #include <sys/ubc_internal.h>
79 #include <sys/vnode_internal.h>
80 #include <sys/mount_internal.h>
81 #include <sys/sysctl.h>
82 #include <sys/malloc.h>
84 #include <sys/quota.h>
86 #include <sys/paths.h>
87 #include <sys/utfconv.h>
88 #include <sys/kdebug.h>
89 #include <sys/fslog.h>
92 #include <kern/locks.h>
94 #include <vfs/vfs_journal.h>
96 #include <miscfs/specfs/specdev.h>
97 #include <hfs/hfs_mount.h>
99 #include <libkern/crypto/md5.h>
100 #include <uuid/uuid.h>
103 #include "hfs_catalog.h"
104 #include "hfs_cnode.h"
106 #include "hfs_endian.h"
107 #include "hfs_hotfiles.h"
108 #include "hfs_quota.h"
109 #include "hfs_btreeio.h"
111 #include "hfscommon/headers/FileMgrInternal.h"
112 #include "hfscommon/headers/BTreesInternal.h"
115 #include <sys/cprotect.h>
118 #if CONFIG_HFS_ALLOC_RBTREE
119 #include "hfscommon/headers/HybridAllocator.h"
122 #define HFS_MOUNT_DEBUG 1
129 /* Enable/disable debugging code for live volume resizing */
130 int hfs_resize_debug
= 0;
132 lck_grp_attr_t
* hfs_group_attr
;
133 lck_attr_t
* hfs_lock_attr
;
134 lck_grp_t
* hfs_mutex_group
;
135 lck_grp_t
* hfs_rwlock_group
;
136 lck_grp_t
* hfs_spinlock_group
;
138 extern struct vnodeopv_desc hfs_vnodeop_opv_desc
;
139 extern struct vnodeopv_desc hfs_std_vnodeop_opv_desc
;
141 /* not static so we can re-use in hfs_readwrite.c for build_path calls */
142 int hfs_vfs_vget(struct mount
*mp
, ino64_t ino
, struct vnode
**vpp
, vfs_context_t context
);
144 static int hfs_changefs(struct mount
*mp
, struct hfs_mount_args
*args
);
145 static int hfs_fhtovp(struct mount
*mp
, int fhlen
, unsigned char *fhp
, struct vnode
**vpp
, vfs_context_t context
);
146 static int hfs_flushfiles(struct mount
*, int, struct proc
*);
147 static int hfs_flushMDB(struct hfsmount
*hfsmp
, int waitfor
, int altflush
);
148 static int hfs_getmountpoint(struct vnode
*vp
, struct hfsmount
**hfsmpp
);
149 static int hfs_init(struct vfsconf
*vfsp
);
150 static int hfs_vfs_root(struct mount
*mp
, struct vnode
**vpp
, vfs_context_t context
);
151 static int hfs_quotactl(struct mount
*, int, uid_t
, caddr_t
, vfs_context_t context
);
152 static int hfs_start(struct mount
*mp
, int flags
, vfs_context_t context
);
153 static int hfs_vptofh(struct vnode
*vp
, int *fhlenp
, unsigned char *fhp
, vfs_context_t context
);
154 static int hfs_file_extent_overlaps(struct hfsmount
*hfsmp
, u_int32_t allocLimit
, struct HFSPlusCatalogFile
*filerec
);
155 static int hfs_journal_replay(vnode_t devvp
, vfs_context_t context
);
156 static int hfs_reclaimspace(struct hfsmount
*hfsmp
, u_int32_t allocLimit
, u_int32_t reclaimblks
, vfs_context_t context
);
158 void hfs_initialize_allocator (struct hfsmount
*hfsmp
);
159 int hfs_teardown_allocator (struct hfsmount
*hfsmp
);
161 int hfs_mount(struct mount
*mp
, vnode_t devvp
, user_addr_t data
, vfs_context_t context
);
162 int hfs_mountfs(struct vnode
*devvp
, struct mount
*mp
, struct hfs_mount_args
*args
, int journal_replay_only
, vfs_context_t context
);
163 int hfs_reload(struct mount
*mp
);
164 int hfs_statfs(struct mount
*mp
, register struct vfsstatfs
*sbp
, vfs_context_t context
);
165 int hfs_sync(struct mount
*mp
, int waitfor
, vfs_context_t context
);
166 int hfs_sysctl(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
167 user_addr_t newp
, size_t newlen
, vfs_context_t context
);
168 int hfs_unmount(struct mount
*mp
, int mntflags
, vfs_context_t context
);
171 * Called by vfs_mountroot when mounting HFS Plus as root.
175 hfs_mountroot(mount_t mp
, vnode_t rvp
, vfs_context_t context
)
177 struct hfsmount
*hfsmp
;
179 struct vfsstatfs
*vfsp
;
182 if ((error
= hfs_mountfs(rvp
, mp
, NULL
, 0, context
))) {
183 if (HFS_MOUNT_DEBUG
) {
184 printf("hfs_mountroot: hfs_mountfs returned %d, rvp (%p) name (%s) \n",
185 error
, rvp
, (rvp
->v_name
? rvp
->v_name
: "unknown device"));
191 hfsmp
= VFSTOHFS(mp
);
193 hfsmp
->hfs_uid
= UNKNOWNUID
;
194 hfsmp
->hfs_gid
= UNKNOWNGID
;
195 hfsmp
->hfs_dir_mask
= (S_IRWXU
| S_IRGRP
|S_IXGRP
| S_IROTH
|S_IXOTH
); /* 0755 */
196 hfsmp
->hfs_file_mask
= (S_IRWXU
| S_IRGRP
|S_IXGRP
| S_IROTH
|S_IXOTH
); /* 0755 */
198 /* Establish the free block reserve. */
199 vcb
= HFSTOVCB(hfsmp
);
200 vcb
->reserveBlocks
= ((u_int64_t
)vcb
->totalBlocks
* HFS_MINFREE
) / 100;
201 vcb
->reserveBlocks
= MIN(vcb
->reserveBlocks
, HFS_MAXRESERVE
/ vcb
->blockSize
);
203 vfsp
= vfs_statfs(mp
);
204 (void)hfs_statfs(mp
, vfsp
, NULL
);
217 hfs_mount(struct mount
*mp
, vnode_t devvp
, user_addr_t data
, vfs_context_t context
)
219 struct proc
*p
= vfs_context_proc(context
);
220 struct hfsmount
*hfsmp
= NULL
;
221 struct hfs_mount_args args
;
225 if ((retval
= copyin(data
, (caddr_t
)&args
, sizeof(args
)))) {
226 if (HFS_MOUNT_DEBUG
) {
227 printf("hfs_mount: copyin returned %d for fs\n", retval
);
231 cmdflags
= (u_int32_t
)vfs_flags(mp
) & MNT_CMDFLAGS
;
232 if (cmdflags
& MNT_UPDATE
) {
233 hfsmp
= VFSTOHFS(mp
);
235 /* Reload incore data after an fsck. */
236 if (cmdflags
& MNT_RELOAD
) {
237 if (vfs_isrdonly(mp
)) {
238 int error
= hfs_reload(mp
);
239 if (error
&& HFS_MOUNT_DEBUG
) {
240 printf("hfs_mount: hfs_reload returned %d on %s \n", error
, hfsmp
->vcbVN
);
245 if (HFS_MOUNT_DEBUG
) {
246 printf("hfs_mount: MNT_RELOAD not supported on rdwr filesystem %s\n", hfsmp
->vcbVN
);
252 /* Change to a read-only file system. */
253 if (((hfsmp
->hfs_flags
& HFS_READ_ONLY
) == 0) &&
257 /* Set flag to indicate that a downgrade to read-only
258 * is in progress and therefore block any further
259 * modifications to the file system.
261 hfs_lock_global (hfsmp
, HFS_EXCLUSIVE_LOCK
);
262 hfsmp
->hfs_flags
|= HFS_RDONLY_DOWNGRADE
;
263 hfsmp
->hfs_downgrading_proc
= current_thread();
264 hfs_unlock_global (hfsmp
);
266 /* use VFS_SYNC to push out System (btree) files */
267 retval
= VFS_SYNC(mp
, MNT_WAIT
, context
);
268 if (retval
&& ((cmdflags
& MNT_FORCE
) == 0)) {
269 hfsmp
->hfs_flags
&= ~HFS_RDONLY_DOWNGRADE
;
270 hfsmp
->hfs_downgrading_proc
= NULL
;
271 if (HFS_MOUNT_DEBUG
) {
272 printf("hfs_mount: VFS_SYNC returned %d during b-tree sync of %s \n", retval
, hfsmp
->vcbVN
);
278 if (cmdflags
& MNT_FORCE
)
281 if ((retval
= hfs_flushfiles(mp
, flags
, p
))) {
282 hfsmp
->hfs_flags
&= ~HFS_RDONLY_DOWNGRADE
;
283 hfsmp
->hfs_downgrading_proc
= NULL
;
284 if (HFS_MOUNT_DEBUG
) {
285 printf("hfs_mount: hfs_flushfiles returned %d on %s \n", retval
, hfsmp
->vcbVN
);
290 /* mark the volume cleanly unmounted */
291 hfsmp
->vcbAtrb
|= kHFSVolumeUnmountedMask
;
292 retval
= hfs_flushvolumeheader(hfsmp
, MNT_WAIT
, 0);
293 hfsmp
->hfs_flags
|= HFS_READ_ONLY
;
295 /* also get the volume bitmap blocks */
297 if (vnode_mount(hfsmp
->hfs_devvp
) == mp
) {
298 retval
= hfs_fsync(hfsmp
->hfs_devvp
, MNT_WAIT
, 0, p
);
300 vnode_get(hfsmp
->hfs_devvp
);
301 retval
= VNOP_FSYNC(hfsmp
->hfs_devvp
, MNT_WAIT
, context
);
302 vnode_put(hfsmp
->hfs_devvp
);
306 if (HFS_MOUNT_DEBUG
) {
307 printf("hfs_mount: FSYNC on devvp returned %d for fs %s\n", retval
, hfsmp
->vcbVN
);
309 hfsmp
->hfs_flags
&= ~HFS_RDONLY_DOWNGRADE
;
310 hfsmp
->hfs_downgrading_proc
= NULL
;
311 hfsmp
->hfs_flags
&= ~HFS_READ_ONLY
;
315 hfs_lock_global (hfsmp
, HFS_EXCLUSIVE_LOCK
);
317 journal_close(hfsmp
->jnl
);
320 // Note: we explicitly don't want to shutdown
321 // access to the jvp because we may need
322 // it later if we go back to being read-write.
324 hfs_unlock_global (hfsmp
);
327 #if CONFIG_HFS_ALLOC_RBTREE
328 (void) hfs_teardown_allocator(hfsmp
);
330 hfsmp
->hfs_downgrading_proc
= NULL
;
333 /* Change to a writable file system. */
334 if (vfs_iswriteupgrade(mp
)) {
335 #if CONFIG_HFS_ALLOC_RBTREE
336 thread_t allocator_thread
;
340 * On inconsistent disks, do not allow read-write mount
341 * unless it is the boot volume being mounted.
343 if (!(vfs_flags(mp
) & MNT_ROOTFS
) &&
344 (hfsmp
->vcbAtrb
& kHFSVolumeInconsistentMask
)) {
345 if (HFS_MOUNT_DEBUG
) {
346 printf("hfs_mount: attempting to mount inconsistent non-root volume %s\n", (hfsmp
->vcbVN
));
352 // If the journal was shut-down previously because we were
353 // asked to be read-only, let's start it back up again now
355 if ( (HFSTOVCB(hfsmp
)->vcbAtrb
& kHFSVolumeJournaledMask
)
356 && hfsmp
->jnl
== NULL
357 && hfsmp
->jvp
!= NULL
) {
360 if (hfsmp
->hfs_flags
& HFS_NEED_JNL_RESET
) {
361 jflags
= JOURNAL_RESET
;
366 hfs_lock_global (hfsmp
, HFS_EXCLUSIVE_LOCK
);
368 hfsmp
->jnl
= journal_open(hfsmp
->jvp
,
369 (hfsmp
->jnl_start
* HFSTOVCB(hfsmp
)->blockSize
) + (off_t
)HFSTOVCB(hfsmp
)->hfsPlusIOPosOffset
,
372 hfsmp
->hfs_logical_block_size
,
375 hfs_sync_metadata
, hfsmp
->hfs_mp
);
378 * Set up the trim callback function so that we can add
379 * recently freed extents to the free extent cache once
380 * the transaction that freed them is written to the
384 journal_trim_set_callback(hfsmp
->jnl
, hfs_trim_callback
, hfsmp
);
386 hfs_unlock_global (hfsmp
);
388 if (hfsmp
->jnl
== NULL
) {
389 if (HFS_MOUNT_DEBUG
) {
390 printf("hfs_mount: journal_open == NULL; couldn't be opened on %s \n", (hfsmp
->vcbVN
));
395 hfsmp
->hfs_flags
&= ~HFS_NEED_JNL_RESET
;
400 /* See if we need to erase unused Catalog nodes due to <rdar://problem/6947811>. */
401 retval
= hfs_erase_unused_nodes(hfsmp
);
402 if (retval
!= E_NONE
) {
403 if (HFS_MOUNT_DEBUG
) {
404 printf("hfs_mount: hfs_erase_unused_nodes returned %d for fs %s\n", retval
, hfsmp
->vcbVN
);
409 /* If this mount point was downgraded from read-write
410 * to read-only, clear that information as we are now
411 * moving back to read-write.
413 hfsmp
->hfs_flags
&= ~HFS_RDONLY_DOWNGRADE
;
414 hfsmp
->hfs_downgrading_proc
= NULL
;
416 /* mark the volume dirty (clear clean unmount bit) */
417 hfsmp
->vcbAtrb
&= ~kHFSVolumeUnmountedMask
;
419 retval
= hfs_flushvolumeheader(hfsmp
, MNT_WAIT
, 0);
420 if (retval
!= E_NONE
) {
421 if (HFS_MOUNT_DEBUG
) {
422 printf("hfs_mount: hfs_flushvolumeheader returned %d for fs %s\n", retval
, hfsmp
->vcbVN
);
427 /* Only clear HFS_READ_ONLY after a successful write */
428 hfsmp
->hfs_flags
&= ~HFS_READ_ONLY
;
431 if (!(hfsmp
->hfs_flags
& (HFS_READ_ONLY
| HFS_STANDARD
))) {
432 /* Setup private/hidden directories for hardlinks. */
433 hfs_privatedir_init(hfsmp
, FILE_HARDLINKS
);
434 hfs_privatedir_init(hfsmp
, DIR_HARDLINKS
);
436 hfs_remove_orphans(hfsmp
);
439 * Allow hot file clustering if conditions allow.
441 if ((hfsmp
->hfs_flags
& HFS_METADATA_ZONE
) &&
442 ((hfsmp
->hfs_flags
& HFS_SSD
) == 0)) {
443 (void) hfs_recording_init(hfsmp
);
445 /* Force ACLs on HFS+ file systems. */
446 if (vfs_extendedsecurity(HFSTOVFS(hfsmp
)) == 0) {
447 vfs_setextendedsecurity(HFSTOVFS(hfsmp
));
451 #if CONFIG_HFS_ALLOC_RBTREE
453 * Like the normal mount case, we need to handle creation of the allocation red-black tree
454 * if we're upgrading from read-only to read-write.
456 * We spawn a thread to create the pair of red-black trees for this volume.
457 * However, in so doing, we must be careful to ensure that if this thread is still
458 * running after mount has finished, it doesn't interfere with an unmount. Specifically,
459 * we'll need to set a bit that indicates we're in progress building the trees here.
460 * Unmount will check for this bit, and then if it's set, mark a corresponding bit that
461 * notifies the tree generation code that an unmount is waiting. Also, mark the extent
462 * tree flags that the allocator is enabled for use before we spawn the thread that will start
463 * scanning the RB tree.
465 * Only do this if we're operating on a read-write mount (we wouldn't care for read-only),
466 * which has not previously encountered a bad error on the red-black tree code. Also, don't
467 * try to re-build a tree that already exists.
470 if (hfsmp
->extent_tree_flags
== 0) {
471 hfsmp
->extent_tree_flags
|= (HFS_ALLOC_TREEBUILD_INFLIGHT
| HFS_ALLOC_RB_ENABLED
);
472 /* Initialize EOF counter so that the thread can assume it started at initial values */
473 hfsmp
->offset_block_end
= 0;
477 kernel_thread_start ((thread_continue_t
) hfs_initialize_allocator
, hfsmp
, &allocator_thread
);
478 thread_deallocate(allocator_thread
);
484 /* Update file system parameters. */
485 retval
= hfs_changefs(mp
, &args
);
486 if (retval
&& HFS_MOUNT_DEBUG
) {
487 printf("hfs_mount: hfs_changefs returned %d for %s\n", retval
, hfsmp
->vcbVN
);
490 } else /* not an update request */ {
492 /* Set the mount flag to indicate that we support volfs */
493 vfs_setflags(mp
, (u_int64_t
)((unsigned int)MNT_DOVOLFS
));
495 retval
= hfs_mountfs(devvp
, mp
, &args
, 0, context
);
496 if (retval
&& HFS_MOUNT_DEBUG
) {
497 printf("hfs_mount: hfs_mountfs returned %d\n", retval
);
501 * If above mount call was successful, and this mount is content protection
502 * enabled, then verify the on-disk EA on the root to ensure that the filesystem
503 * is of a suitable vintage to allow the mount to proceed.
505 if ((retval
== 0) && (cp_fs_protected (mp
))) {
507 struct cp_root_xattr xattr
;
508 bzero (&xattr
, sizeof(struct cp_root_xattr
));
509 hfsmp
= vfs_fsprivate(mp
);
511 /* go get the EA to get the version information */
512 err
= cp_getrootxattr (hfsmp
, &xattr
);
513 /* If there was no EA there, then write one out. */
514 if (err
== ENOATTR
) {
515 bzero(&xattr
, sizeof(struct cp_root_xattr
));
516 xattr
.major_version
= CP_CURRENT_MAJOR_VERS
;
517 xattr
.minor_version
= CP_CURRENT_MINOR_VERS
;
520 err
= cp_setrootxattr (hfsmp
, &xattr
);
523 * For any other error, including having an out of date CP version in the
524 * EA, or for an error out of cp_setrootxattr, deny the mount
525 * and do not proceed further.
527 if (err
|| xattr
.major_version
!= CP_CURRENT_MAJOR_VERS
) {
528 /* Deny the mount and tear down. */
530 (void) hfs_unmount (mp
, MNT_FORCE
, context
);
537 (void)hfs_statfs(mp
, vfs_statfs(mp
), context
);
543 struct hfs_changefs_cargs
{
544 struct hfsmount
*hfsmp
;
551 hfs_changefs_callback(struct vnode
*vp
, void *cargs
)
555 struct cat_desc cndesc
;
556 struct cat_attr cnattr
;
557 struct hfs_changefs_cargs
*args
;
561 args
= (struct hfs_changefs_cargs
*)cargs
;
564 vcb
= HFSTOVCB(args
->hfsmp
);
566 lockflags
= hfs_systemfile_lock(args
->hfsmp
, SFL_CATALOG
, HFS_SHARED_LOCK
);
567 error
= cat_lookup(args
->hfsmp
, &cp
->c_desc
, 0, &cndesc
, &cnattr
, NULL
, NULL
);
568 hfs_systemfile_unlock(args
->hfsmp
, lockflags
);
571 * If we couldn't find this guy skip to the next one
576 return (VNODE_RETURNED
);
579 * Get the real uid/gid and perm mask from disk.
581 if (args
->permswitch
|| args
->permfix
) {
582 cp
->c_uid
= cnattr
.ca_uid
;
583 cp
->c_gid
= cnattr
.ca_gid
;
584 cp
->c_mode
= cnattr
.ca_mode
;
587 * If we're switching name converters then...
588 * Remove the existing entry from the namei cache.
589 * Update name to one based on new encoder.
593 replace_desc(cp
, &cndesc
);
595 if (cndesc
.cd_cnid
== kHFSRootFolderID
) {
596 strlcpy((char *)vcb
->vcbVN
, (const char *)cp
->c_desc
.cd_nameptr
, NAME_MAX
+1);
597 cp
->c_desc
.cd_encoding
= args
->hfsmp
->hfs_encoding
;
600 cat_releasedesc(&cndesc
);
602 return (VNODE_RETURNED
);
605 /* Change fs mount parameters */
607 hfs_changefs(struct mount
*mp
, struct hfs_mount_args
*args
)
610 int namefix
, permfix
, permswitch
;
611 struct hfsmount
*hfsmp
;
613 hfs_to_unicode_func_t get_unicode_func
;
614 unicode_to_hfs_func_t get_hfsname_func
;
615 u_int32_t old_encoding
= 0;
616 struct hfs_changefs_cargs cargs
;
617 u_int32_t mount_flags
;
619 hfsmp
= VFSTOHFS(mp
);
620 vcb
= HFSTOVCB(hfsmp
);
621 mount_flags
= (unsigned int)vfs_flags(mp
);
623 hfsmp
->hfs_flags
|= HFS_IN_CHANGEFS
;
625 permswitch
= (((hfsmp
->hfs_flags
& HFS_UNKNOWN_PERMS
) &&
626 ((mount_flags
& MNT_UNKNOWNPERMISSIONS
) == 0)) ||
627 (((hfsmp
->hfs_flags
& HFS_UNKNOWN_PERMS
) == 0) &&
628 (mount_flags
& MNT_UNKNOWNPERMISSIONS
)));
630 /* The root filesystem must operate with actual permissions: */
631 if (permswitch
&& (mount_flags
& MNT_ROOTFS
) && (mount_flags
& MNT_UNKNOWNPERMISSIONS
)) {
632 vfs_clearflags(mp
, (u_int64_t
)((unsigned int)MNT_UNKNOWNPERMISSIONS
)); /* Just say "No". */
636 if (mount_flags
& MNT_UNKNOWNPERMISSIONS
)
637 hfsmp
->hfs_flags
|= HFS_UNKNOWN_PERMS
;
639 hfsmp
->hfs_flags
&= ~HFS_UNKNOWN_PERMS
;
641 namefix
= permfix
= 0;
644 * Tracking of hot files requires up-to-date access times. So if
645 * access time updates are disabled, we must also disable hot files.
647 if (mount_flags
& MNT_NOATIME
) {
648 (void) hfs_recording_suspend(hfsmp
);
651 /* Change the timezone (Note: this affects all hfs volumes and hfs+ volume create dates) */
652 if (args
->hfs_timezone
.tz_minuteswest
!= VNOVAL
) {
653 gTimeZone
= args
->hfs_timezone
;
656 /* Change the default uid, gid and/or mask */
657 if ((args
->hfs_uid
!= (uid_t
)VNOVAL
) && (hfsmp
->hfs_uid
!= args
->hfs_uid
)) {
658 hfsmp
->hfs_uid
= args
->hfs_uid
;
659 if (vcb
->vcbSigWord
== kHFSPlusSigWord
)
662 if ((args
->hfs_gid
!= (gid_t
)VNOVAL
) && (hfsmp
->hfs_gid
!= args
->hfs_gid
)) {
663 hfsmp
->hfs_gid
= args
->hfs_gid
;
664 if (vcb
->vcbSigWord
== kHFSPlusSigWord
)
667 if (args
->hfs_mask
!= (mode_t
)VNOVAL
) {
668 if (hfsmp
->hfs_dir_mask
!= (args
->hfs_mask
& ALLPERMS
)) {
669 hfsmp
->hfs_dir_mask
= args
->hfs_mask
& ALLPERMS
;
670 hfsmp
->hfs_file_mask
= args
->hfs_mask
& ALLPERMS
;
671 if ((args
->flags
!= VNOVAL
) && (args
->flags
& HFSFSMNT_NOXONFILES
))
672 hfsmp
->hfs_file_mask
= (args
->hfs_mask
& DEFFILEMODE
);
673 if (vcb
->vcbSigWord
== kHFSPlusSigWord
)
678 /* Change the hfs encoding value (hfs only) */
679 if ((vcb
->vcbSigWord
== kHFSSigWord
) &&
680 (args
->hfs_encoding
!= (u_int32_t
)VNOVAL
) &&
681 (hfsmp
->hfs_encoding
!= args
->hfs_encoding
)) {
683 retval
= hfs_getconverter(args
->hfs_encoding
, &get_unicode_func
, &get_hfsname_func
);
688 * Connect the new hfs_get_unicode converter but leave
689 * the old hfs_get_hfsname converter in place so that
690 * we can lookup existing vnodes to get their correctly
693 * When we're all finished, we can then connect the new
694 * hfs_get_hfsname converter and release our interest
695 * in the old converters.
697 hfsmp
->hfs_get_unicode
= get_unicode_func
;
698 old_encoding
= hfsmp
->hfs_encoding
;
699 hfsmp
->hfs_encoding
= args
->hfs_encoding
;
703 if (!(namefix
|| permfix
|| permswitch
))
706 /* XXX 3762912 hack to support HFS filesystem 'owner' */
709 hfsmp
->hfs_uid
== UNKNOWNUID
? KAUTH_UID_NONE
: hfsmp
->hfs_uid
,
710 hfsmp
->hfs_gid
== UNKNOWNGID
? KAUTH_GID_NONE
: hfsmp
->hfs_gid
);
713 * For each active vnode fix things that changed
715 * Note that we can visit a vnode more than once
716 * and we can race with fsync.
718 * hfs_changefs_callback will be called for each vnode
719 * hung off of this mount point
721 * The vnode will be properly referenced and unreferenced
722 * around the callback
725 cargs
.namefix
= namefix
;
726 cargs
.permfix
= permfix
;
727 cargs
.permswitch
= permswitch
;
729 vnode_iterate(mp
, 0, hfs_changefs_callback
, (void *)&cargs
);
732 * If we're switching name converters we can now
733 * connect the new hfs_get_hfsname converter and
734 * release our interest in the old converters.
737 hfsmp
->hfs_get_hfsname
= get_hfsname_func
;
738 vcb
->volumeNameEncodingHint
= args
->hfs_encoding
;
739 (void) hfs_relconverter(old_encoding
);
742 hfsmp
->hfs_flags
&= ~HFS_IN_CHANGEFS
;
747 struct hfs_reload_cargs
{
748 struct hfsmount
*hfsmp
;
753 hfs_reload_callback(struct vnode
*vp
, void *cargs
)
756 struct hfs_reload_cargs
*args
;
759 args
= (struct hfs_reload_cargs
*)cargs
;
761 * flush all the buffers associated with this node
763 (void) buf_invalidateblks(vp
, 0, 0, 0);
767 * Remove any directory hints
770 hfs_reldirhints(cp
, 0);
773 * Re-read cnode data for all active vnodes (non-metadata files).
775 if (!vnode_issystem(vp
) && !VNODE_IS_RSRC(vp
) && (cp
->c_fileid
>= kHFSFirstUserCatalogNodeID
)) {
776 struct cat_fork
*datafork
;
777 struct cat_desc desc
;
779 datafork
= cp
->c_datafork
? &cp
->c_datafork
->ff_data
: NULL
;
781 /* lookup by fileID since name could have changed */
782 lockflags
= hfs_systemfile_lock(args
->hfsmp
, SFL_CATALOG
, HFS_SHARED_LOCK
);
783 args
->error
= cat_idlookup(args
->hfsmp
, cp
->c_fileid
, 0, &desc
, &cp
->c_attr
, datafork
);
784 hfs_systemfile_unlock(args
->hfsmp
, lockflags
);
786 return (VNODE_RETURNED_DONE
);
789 /* update cnode's catalog descriptor */
790 (void) replace_desc(cp
, &desc
);
792 return (VNODE_RETURNED
);
796 * Reload all incore data for a filesystem (used after running fsck on
797 * the root filesystem and finding things to fix). The filesystem must
798 * be mounted read-only.
800 * Things to do to update the mount:
801 * invalidate all cached meta-data.
802 * invalidate all inactive vnodes.
803 * invalidate all cached file data.
804 * re-read volume header from disk.
805 * re-load meta-file info (extents, file size).
806 * re-load B-tree header data.
807 * re-read cnode data for all active vnodes.
810 hfs_reload(struct mount
*mountp
)
812 register struct vnode
*devvp
;
815 struct hfsmount
*hfsmp
;
816 struct HFSPlusVolumeHeader
*vhp
;
818 struct filefork
*forkp
;
819 struct cat_desc cndesc
;
820 struct hfs_reload_cargs args
;
821 daddr64_t priIDSector
;
823 hfsmp
= VFSTOHFS(mountp
);
824 vcb
= HFSTOVCB(hfsmp
);
826 if (vcb
->vcbSigWord
== kHFSSigWord
)
827 return (EINVAL
); /* rooting from HFS is not supported! */
830 * Invalidate all cached meta-data.
832 devvp
= hfsmp
->hfs_devvp
;
833 if (buf_invalidateblks(devvp
, 0, 0, 0))
834 panic("hfs_reload: dirty1");
839 * hfs_reload_callback will be called for each vnode
840 * hung off of this mount point that can't be recycled...
841 * vnode_iterate will recycle those that it can (the VNODE_RELOAD option)
842 * the vnode will be in an 'unbusy' state (VNODE_WAIT) and
843 * properly referenced and unreferenced around the callback
845 vnode_iterate(mountp
, VNODE_RELOAD
| VNODE_WAIT
, hfs_reload_callback
, (void *)&args
);
851 * Re-read VolumeHeader from disk.
853 priIDSector
= (daddr64_t
)((vcb
->hfsPlusIOPosOffset
/ hfsmp
->hfs_logical_block_size
) +
854 HFS_PRI_SECTOR(hfsmp
->hfs_logical_block_size
));
856 error
= (int)buf_meta_bread(hfsmp
->hfs_devvp
,
857 HFS_PHYSBLK_ROUNDDOWN(priIDSector
, hfsmp
->hfs_log_per_phys
),
858 hfsmp
->hfs_physical_block_size
, NOCRED
, &bp
);
865 vhp
= (HFSPlusVolumeHeader
*) (buf_dataptr(bp
) + HFS_PRI_OFFSET(hfsmp
->hfs_physical_block_size
));
867 /* Do a quick sanity check */
868 if ((SWAP_BE16(vhp
->signature
) != kHFSPlusSigWord
&&
869 SWAP_BE16(vhp
->signature
) != kHFSXSigWord
) ||
870 (SWAP_BE16(vhp
->version
) != kHFSPlusVersion
&&
871 SWAP_BE16(vhp
->version
) != kHFSXVersion
) ||
872 SWAP_BE32(vhp
->blockSize
) != vcb
->blockSize
) {
877 vcb
->vcbLsMod
= to_bsd_time(SWAP_BE32(vhp
->modifyDate
));
878 vcb
->vcbAtrb
= SWAP_BE32 (vhp
->attributes
);
879 vcb
->vcbJinfoBlock
= SWAP_BE32(vhp
->journalInfoBlock
);
880 vcb
->vcbClpSiz
= SWAP_BE32 (vhp
->rsrcClumpSize
);
881 vcb
->vcbNxtCNID
= SWAP_BE32 (vhp
->nextCatalogID
);
882 vcb
->vcbVolBkUp
= to_bsd_time(SWAP_BE32(vhp
->backupDate
));
883 vcb
->vcbWrCnt
= SWAP_BE32 (vhp
->writeCount
);
884 vcb
->vcbFilCnt
= SWAP_BE32 (vhp
->fileCount
);
885 vcb
->vcbDirCnt
= SWAP_BE32 (vhp
->folderCount
);
886 HFS_UPDATE_NEXT_ALLOCATION(vcb
, SWAP_BE32 (vhp
->nextAllocation
));
887 vcb
->totalBlocks
= SWAP_BE32 (vhp
->totalBlocks
);
888 vcb
->freeBlocks
= SWAP_BE32 (vhp
->freeBlocks
);
889 vcb
->encodingsBitmap
= SWAP_BE64 (vhp
->encodingsBitmap
);
890 bcopy(vhp
->finderInfo
, vcb
->vcbFndrInfo
, sizeof(vhp
->finderInfo
));
891 vcb
->localCreateDate
= SWAP_BE32 (vhp
->createDate
); /* hfs+ create date is in local time */
894 * Re-load meta-file vnode data (extent info, file size, etc).
896 forkp
= VTOF((struct vnode
*)vcb
->extentsRefNum
);
897 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
898 forkp
->ff_extents
[i
].startBlock
=
899 SWAP_BE32 (vhp
->extentsFile
.extents
[i
].startBlock
);
900 forkp
->ff_extents
[i
].blockCount
=
901 SWAP_BE32 (vhp
->extentsFile
.extents
[i
].blockCount
);
903 forkp
->ff_size
= SWAP_BE64 (vhp
->extentsFile
.logicalSize
);
904 forkp
->ff_blocks
= SWAP_BE32 (vhp
->extentsFile
.totalBlocks
);
905 forkp
->ff_clumpsize
= SWAP_BE32 (vhp
->extentsFile
.clumpSize
);
908 forkp
= VTOF((struct vnode
*)vcb
->catalogRefNum
);
909 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
910 forkp
->ff_extents
[i
].startBlock
=
911 SWAP_BE32 (vhp
->catalogFile
.extents
[i
].startBlock
);
912 forkp
->ff_extents
[i
].blockCount
=
913 SWAP_BE32 (vhp
->catalogFile
.extents
[i
].blockCount
);
915 forkp
->ff_size
= SWAP_BE64 (vhp
->catalogFile
.logicalSize
);
916 forkp
->ff_blocks
= SWAP_BE32 (vhp
->catalogFile
.totalBlocks
);
917 forkp
->ff_clumpsize
= SWAP_BE32 (vhp
->catalogFile
.clumpSize
);
919 if (hfsmp
->hfs_attribute_vp
) {
920 forkp
= VTOF(hfsmp
->hfs_attribute_vp
);
921 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
922 forkp
->ff_extents
[i
].startBlock
=
923 SWAP_BE32 (vhp
->attributesFile
.extents
[i
].startBlock
);
924 forkp
->ff_extents
[i
].blockCount
=
925 SWAP_BE32 (vhp
->attributesFile
.extents
[i
].blockCount
);
927 forkp
->ff_size
= SWAP_BE64 (vhp
->attributesFile
.logicalSize
);
928 forkp
->ff_blocks
= SWAP_BE32 (vhp
->attributesFile
.totalBlocks
);
929 forkp
->ff_clumpsize
= SWAP_BE32 (vhp
->attributesFile
.clumpSize
);
932 forkp
= VTOF((struct vnode
*)vcb
->allocationsRefNum
);
933 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
934 forkp
->ff_extents
[i
].startBlock
=
935 SWAP_BE32 (vhp
->allocationFile
.extents
[i
].startBlock
);
936 forkp
->ff_extents
[i
].blockCount
=
937 SWAP_BE32 (vhp
->allocationFile
.extents
[i
].blockCount
);
939 forkp
->ff_size
= SWAP_BE64 (vhp
->allocationFile
.logicalSize
);
940 forkp
->ff_blocks
= SWAP_BE32 (vhp
->allocationFile
.totalBlocks
);
941 forkp
->ff_clumpsize
= SWAP_BE32 (vhp
->allocationFile
.clumpSize
);
947 * Re-load B-tree header data
949 forkp
= VTOF((struct vnode
*)vcb
->extentsRefNum
);
950 if ( (error
= MacToVFSError( BTReloadData((FCB
*)forkp
) )) )
953 forkp
= VTOF((struct vnode
*)vcb
->catalogRefNum
);
954 if ( (error
= MacToVFSError( BTReloadData((FCB
*)forkp
) )) )
957 if (hfsmp
->hfs_attribute_vp
) {
958 forkp
= VTOF(hfsmp
->hfs_attribute_vp
);
959 if ( (error
= MacToVFSError( BTReloadData((FCB
*)forkp
) )) )
963 /* Reload the volume name */
964 if ((error
= cat_idlookup(hfsmp
, kHFSRootFolderID
, 0, &cndesc
, NULL
, NULL
)))
966 vcb
->volumeNameEncodingHint
= cndesc
.cd_encoding
;
967 bcopy(cndesc
.cd_nameptr
, vcb
->vcbVN
, min(255, cndesc
.cd_namelen
));
968 cat_releasedesc(&cndesc
);
970 /* Re-establish private/hidden directories. */
971 hfs_privatedir_init(hfsmp
, FILE_HARDLINKS
);
972 hfs_privatedir_init(hfsmp
, DIR_HARDLINKS
);
974 /* In case any volume information changed to trigger a notification */
975 hfs_generate_volume_notifications(hfsmp
);
983 hfs_syncer(void *arg0
, void *unused
)
985 #pragma unused(unused)
987 struct hfsmount
*hfsmp
= arg0
;
990 uint32_t delay
= HFS_META_DELAY
;
994 clock_get_calendar_microtime(&secs
, &usecs
);
995 now
= ((uint64_t)secs
* 1000000ULL) + (uint64_t)usecs
;
998 // If the amount of pending writes is more than our limit, wait
999 // for 2/3 of it to drain and then flush the journal.
1001 if (hfsmp
->hfs_mp
->mnt_pending_write_size
> hfsmp
->hfs_max_pending_io
) {
1003 uint64_t pending_io
, start
, rate
= 0;
1007 hfs_start_transaction(hfsmp
); // so we hold off any new i/o's
1009 pending_io
= hfsmp
->hfs_mp
->mnt_pending_write_size
;
1011 clock_get_calendar_microtime(&secs
, &usecs
);
1012 start
= ((uint64_t)secs
* 1000000ULL) + (uint64_t)usecs
;
1014 while(hfsmp
->hfs_mp
->mnt_pending_write_size
> (pending_io
/3) && counter
++ < 500) {
1015 tsleep((caddr_t
)hfsmp
, PRIBIO
, "hfs-wait-for-io-to-drain", 10);
1018 if (counter
>= 500) {
1019 printf("hfs: timed out waiting for io to drain (%lld)\n", (int64_t)hfsmp
->hfs_mp
->mnt_pending_write_size
);
1023 journal_flush(hfsmp
->jnl
, FALSE
);
1025 hfs_sync(hfsmp
->hfs_mp
, MNT_WAIT
, vfs_context_kernel());
1028 clock_get_calendar_microtime(&secs
, &usecs
);
1029 now
= ((uint64_t)secs
* 1000000ULL) + (uint64_t)usecs
;
1030 hfsmp
->hfs_last_sync_time
= now
;
1032 rate
= ((pending_io
* 1000000ULL) / (now
- start
)); // yields bytes per second
1035 hfs_end_transaction(hfsmp
);
1038 // If a reasonable amount of time elapsed then check the
1039 // i/o rate. If it's taking less than 1 second or more
1040 // than 2 seconds, adjust hfs_max_pending_io so that we
1041 // will allow about 1.5 seconds of i/o to queue up.
1043 if (((now
- start
) >= 300000) && (rate
!= 0)) {
1044 uint64_t scale
= (pending_io
* 100) / rate
;
1046 if (scale
< 100 || scale
> 200) {
1047 // set it so that it should take about 1.5 seconds to drain
1048 hfsmp
->hfs_max_pending_io
= (rate
* 150ULL) / 100ULL;
1052 } else if ( ((now
- hfsmp
->hfs_last_sync_time
) >= 5000000ULL)
1053 || (((now
- hfsmp
->hfs_last_sync_time
) >= 100000LL)
1054 && ((now
- hfsmp
->hfs_last_sync_request_time
) >= 100000LL)
1055 && (hfsmp
->hfs_active_threads
== 0)
1056 && (hfsmp
->hfs_global_lock_nesting
== 0))) {
1059 // Flush the journal if more than 5 seconds elapsed since
1060 // the last sync OR we have not sync'ed recently and the
1061 // last sync request time was more than 100 milliseconds
1062 // ago and no one is in the middle of a transaction right
1063 // now. Else we defer the sync and reschedule it.
1066 hfs_lock_global (hfsmp
, HFS_SHARED_LOCK
);
1068 journal_flush(hfsmp
->jnl
, FALSE
);
1070 hfs_unlock_global (hfsmp
);
1072 hfs_sync(hfsmp
->hfs_mp
, MNT_WAIT
, vfs_context_kernel());
1075 clock_get_calendar_microtime(&secs
, &usecs
);
1076 now
= ((uint64_t)secs
* 1000000ULL) + (uint64_t)usecs
;
1077 hfsmp
->hfs_last_sync_time
= now
;
1079 } else if (hfsmp
->hfs_active_threads
== 0) {
1082 clock_interval_to_deadline(delay
, HFS_MILLISEC_SCALE
, &deadline
);
1083 thread_call_enter_delayed(hfsmp
->hfs_syncer
, deadline
);
1085 // note: we intentionally return early here and do not
1086 // decrement the sync_scheduled and sync_incomplete
1087 // variables because we rescheduled the timer.
1093 // NOTE: we decrement these *after* we're done the journal_flush() since
1094 // it can take a significant amount of time and so we don't want more
1095 // callbacks scheduled until we're done this one.
1097 OSDecrementAtomic((volatile SInt32
*)&hfsmp
->hfs_sync_scheduled
);
1098 OSDecrementAtomic((volatile SInt32
*)&hfsmp
->hfs_sync_incomplete
);
1099 wakeup((caddr_t
)&hfsmp
->hfs_sync_incomplete
);
1103 extern int IOBSDIsMediaEjectable( const char *cdev_name
);
1106 * Initialization code for Red-Black Tree Allocator
1108 * This function will build the two red-black trees necessary for allocating space
1109 * from the metadata zone as well as normal allocations. Currently, we use
1110 * an advisory read to get most of the data into the buffer cache.
1111 * This function is intended to be run in a separate thread so as not to slow down mount.
1116 hfs_initialize_allocator (struct hfsmount
*hfsmp
) {
1118 #if CONFIG_HFS_ALLOC_RBTREE
1122 * Take the allocation file lock. Journal transactions will block until
1125 int flags
= hfs_systemfile_lock(hfsmp
, SFL_BITMAP
, HFS_EXCLUSIVE_LOCK
);
1128 * GenerateTree assumes that the bitmap lock is held when you call the function.
1129 * It will drop and re-acquire the lock periodically as needed to let other allocations
1130 * through. It returns with the bitmap lock held. Since we only maintain one tree,
1131 * we don't need to specify a start block (always starts at 0).
1133 err
= GenerateTree(hfsmp
, hfsmp
->totalBlocks
, &flags
, 1);
1137 /* Mark offset tree as built */
1138 hfsmp
->extent_tree_flags
|= HFS_ALLOC_RB_ACTIVE
;
1142 * GenerateTree may drop the bitmap lock during operation in order to give other
1143 * threads a chance to allocate blocks, but it will always return with the lock held, so
1144 * we don't need to re-grab the lock in order to update the TREEBUILD_INFLIGHT bit.
1146 hfsmp
->extent_tree_flags
&= ~HFS_ALLOC_TREEBUILD_INFLIGHT
;
1148 /* Wakeup any waiters on the allocation bitmap lock */
1149 wakeup((caddr_t
)&hfsmp
->extent_tree_flags
);
1152 hfs_systemfile_unlock(hfsmp
, flags
);
1154 #pragma unused (hfsmp)
1160 * Teardown code for the Red-Black Tree allocator.
1161 * This function consolidates the code which serializes with respect
1162 * to a thread that may be potentially still building the tree when we need to begin
1163 * tearing it down. Since the red-black tree may not be live when we enter this function
1165 * 1 -> Tree was live.
1166 * 0 -> Tree was not active at time of call.
1170 hfs_teardown_allocator (struct hfsmount
*hfsmp
) {
1173 #if CONFIG_HFS_ALLOC_RBTREE
1178 * Check to see if the tree-generation is still on-going.
1179 * If it is, then block until it's done.
1182 flags
= hfs_systemfile_lock(hfsmp
, SFL_BITMAP
, HFS_EXCLUSIVE_LOCK
);
1185 while (hfsmp
->extent_tree_flags
& HFS_ALLOC_TREEBUILD_INFLIGHT
) {
1186 hfsmp
->extent_tree_flags
|= HFS_ALLOC_TEARDOWN_INFLIGHT
;
1188 lck_rw_sleep(&(VTOC(hfsmp
->hfs_allocation_vp
))->c_rwlock
, LCK_SLEEP_EXCLUSIVE
,
1189 &hfsmp
->extent_tree_flags
, THREAD_UNINT
);
1192 if (hfs_isrbtree_active (hfsmp
)) {
1195 /* Tear down the RB Trees while we have the bitmap locked */
1196 DestroyTrees(hfsmp
);
1200 hfs_systemfile_unlock(hfsmp
, flags
);
1202 #pragma unused (hfsmp)
1209 static int hfs_root_unmounted_cleanly
= 0;
1211 SYSCTL_DECL(_vfs_generic
);
1212 SYSCTL_INT(_vfs_generic
, OID_AUTO
, root_unmounted_cleanly
, CTLFLAG_RD
, &hfs_root_unmounted_cleanly
, 0, "Root filesystem was unmounted cleanly");
1215 * Common code for mount and mountroot
1218 hfs_mountfs(struct vnode
*devvp
, struct mount
*mp
, struct hfs_mount_args
*args
,
1219 int journal_replay_only
, vfs_context_t context
)
1221 struct proc
*p
= vfs_context_proc(context
);
1222 int retval
= E_NONE
;
1223 struct hfsmount
*hfsmp
= NULL
;
1226 HFSMasterDirectoryBlock
*mdbp
= NULL
;
1234 daddr64_t log_blkcnt
;
1235 u_int32_t log_blksize
;
1236 u_int32_t phys_blksize
;
1237 u_int32_t minblksize
;
1238 u_int32_t iswritable
;
1239 daddr64_t mdb_offset
;
1243 #if CONFIG_HFS_ALLOC_RBTREE
1244 thread_t allocator_thread
;
1248 /* only hfs_mountroot passes us NULL as the 'args' argument */
1252 ronly
= vfs_isrdonly(mp
);
1253 dev
= vnode_specrdev(devvp
);
1254 cred
= p
? vfs_context_ucred(context
) : NOCRED
;
1260 minblksize
= kHFSBlockSize
;
1262 /* Advisory locking should be handled at the VFS layer */
1263 vfs_setlocklocal(mp
);
1265 /* Get the logical block size (treated as physical block size everywhere) */
1266 if (VNOP_IOCTL(devvp
, DKIOCGETBLOCKSIZE
, (caddr_t
)&log_blksize
, 0, context
)) {
1267 if (HFS_MOUNT_DEBUG
) {
1268 printf("hfs_mountfs: DKIOCGETBLOCKSIZE failed\n");
1273 if (log_blksize
== 0 || log_blksize
> 1024*1024*1024) {
1274 printf("hfs: logical block size 0x%x looks bad. Not mounting.\n", log_blksize
);
1279 /* Get the physical block size. */
1280 retval
= VNOP_IOCTL(devvp
, DKIOCGETPHYSICALBLOCKSIZE
, (caddr_t
)&phys_blksize
, 0, context
);
1282 if ((retval
!= ENOTSUP
) && (retval
!= ENOTTY
)) {
1283 if (HFS_MOUNT_DEBUG
) {
1284 printf("hfs_mountfs: DKIOCGETPHYSICALBLOCKSIZE failed\n");
1289 /* If device does not support this ioctl, assume that physical
1290 * block size is same as logical block size
1292 phys_blksize
= log_blksize
;
1294 if (phys_blksize
== 0 || phys_blksize
> 1024*1024*1024) {
1295 printf("hfs: physical block size 0x%x looks bad. Not mounting.\n", phys_blksize
);
1300 /* Switch to 512 byte sectors (temporarily) */
1301 if (log_blksize
> 512) {
1302 u_int32_t size512
= 512;
1304 if (VNOP_IOCTL(devvp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&size512
, FWRITE
, context
)) {
1305 if (HFS_MOUNT_DEBUG
) {
1306 printf("hfs_mountfs: DKIOCSETBLOCKSIZE failed \n");
1312 /* Get the number of 512 byte physical blocks. */
1313 if (VNOP_IOCTL(devvp
, DKIOCGETBLOCKCOUNT
, (caddr_t
)&log_blkcnt
, 0, context
)) {
1314 /* resetting block size may fail if getting block count did */
1315 (void)VNOP_IOCTL(devvp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&log_blksize
, FWRITE
, context
);
1316 if (HFS_MOUNT_DEBUG
) {
1317 printf("hfs_mountfs: DKIOCGETBLOCKCOUNT failed\n");
1322 /* Compute an accurate disk size (i.e. within 512 bytes) */
1323 disksize
= (u_int64_t
)log_blkcnt
* (u_int64_t
)512;
1326 * On Tiger it is not necessary to switch the device
1327 * block size to be 4k if there are more than 31-bits
1328 * worth of blocks but to insure compatibility with
1329 * pre-Tiger systems we have to do it.
1331 * If the device size is not a multiple of 4K (8 * 512), then
1332 * switching the logical block size isn't going to help because
1333 * we will be unable to write the alternate volume header.
1334 * In this case, just leave the logical block size unchanged.
1336 if (log_blkcnt
> 0x000000007fffffff && (log_blkcnt
& 7) == 0) {
1337 minblksize
= log_blksize
= 4096;
1338 if (phys_blksize
< log_blksize
)
1339 phys_blksize
= log_blksize
;
1343 * The cluster layer is not currently prepared to deal with a logical
1344 * block size larger than the system's page size. (It can handle
1345 * blocks per page, but not multiple pages per block.) So limit the
1346 * logical block size to the page size.
1348 if (log_blksize
> PAGE_SIZE
)
1349 log_blksize
= PAGE_SIZE
;
1351 /* Now switch to our preferred physical block size. */
1352 if (log_blksize
> 512) {
1353 if (VNOP_IOCTL(devvp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&log_blksize
, FWRITE
, context
)) {
1354 if (HFS_MOUNT_DEBUG
) {
1355 printf("hfs_mountfs: DKIOCSETBLOCKSIZE (2) failed\n");
1360 /* Get the count of physical blocks. */
1361 if (VNOP_IOCTL(devvp
, DKIOCGETBLOCKCOUNT
, (caddr_t
)&log_blkcnt
, 0, context
)) {
1362 if (HFS_MOUNT_DEBUG
) {
1363 printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (2) failed\n");
1371 * minblksize is the minimum physical block size
1372 * log_blksize has our preferred physical block size
1373 * log_blkcnt has the total number of physical blocks
1376 mdb_offset
= (daddr64_t
)HFS_PRI_SECTOR(log_blksize
);
1377 if ((retval
= (int)buf_meta_bread(devvp
,
1378 HFS_PHYSBLK_ROUNDDOWN(mdb_offset
, (phys_blksize
/log_blksize
)),
1379 phys_blksize
, cred
, &bp
))) {
1380 if (HFS_MOUNT_DEBUG
) {
1381 printf("hfs_mountfs: buf_meta_bread failed with %d\n", retval
);
1385 MALLOC(mdbp
, HFSMasterDirectoryBlock
*, kMDBSize
, M_TEMP
, M_WAITOK
);
1388 if (HFS_MOUNT_DEBUG
) {
1389 printf("hfs_mountfs: MALLOC failed\n");
1393 bcopy((char *)buf_dataptr(bp
) + HFS_PRI_OFFSET(phys_blksize
), mdbp
, kMDBSize
);
1397 MALLOC(hfsmp
, struct hfsmount
*, sizeof(struct hfsmount
), M_HFSMNT
, M_WAITOK
);
1398 if (hfsmp
== NULL
) {
1399 if (HFS_MOUNT_DEBUG
) {
1400 printf("hfs_mountfs: MALLOC (2) failed\n");
1405 bzero(hfsmp
, sizeof(struct hfsmount
));
1407 hfs_chashinit_finish(hfsmp
);
1410 * See if the disk is a solid state device. We need this to decide what to do about
1413 if (VNOP_IOCTL(devvp
, DKIOCISSOLIDSTATE
, (caddr_t
)&isssd
, 0, context
) == 0) {
1415 hfsmp
->hfs_flags
|= HFS_SSD
;
1421 * Init the volume information structure
1424 lck_mtx_init(&hfsmp
->hfs_mutex
, hfs_mutex_group
, hfs_lock_attr
);
1425 lck_mtx_init(&hfsmp
->hfc_mutex
, hfs_mutex_group
, hfs_lock_attr
);
1426 lck_rw_init(&hfsmp
->hfs_global_lock
, hfs_rwlock_group
, hfs_lock_attr
);
1427 lck_rw_init(&hfsmp
->hfs_insync
, hfs_rwlock_group
, hfs_lock_attr
);
1428 lck_spin_init(&hfsmp
->vcbFreeExtLock
, hfs_spinlock_group
, hfs_lock_attr
);
1430 vfs_setfsprivate(mp
, hfsmp
);
1431 hfsmp
->hfs_mp
= mp
; /* Make VFSTOHFS work */
1432 hfsmp
->hfs_raw_dev
= vnode_specrdev(devvp
);
1433 hfsmp
->hfs_devvp
= devvp
;
1434 vnode_ref(devvp
); /* Hold a ref on the device, dropped when hfsmp is freed. */
1435 hfsmp
->hfs_logical_block_size
= log_blksize
;
1436 hfsmp
->hfs_logical_block_count
= log_blkcnt
;
1437 hfsmp
->hfs_physical_block_size
= phys_blksize
;
1438 hfsmp
->hfs_log_per_phys
= (phys_blksize
/ log_blksize
);
1439 hfsmp
->hfs_flags
|= HFS_WRITEABLE_MEDIA
;
1441 hfsmp
->hfs_flags
|= HFS_READ_ONLY
;
1442 if (((unsigned int)vfs_flags(mp
)) & MNT_UNKNOWNPERMISSIONS
)
1443 hfsmp
->hfs_flags
|= HFS_UNKNOWN_PERMS
;
1446 for (i
= 0; i
< MAXQUOTAS
; i
++)
1447 dqfileinit(&hfsmp
->hfs_qfiles
[i
]);
1451 hfsmp
->hfs_uid
= (args
->hfs_uid
== (uid_t
)VNOVAL
) ? UNKNOWNUID
: args
->hfs_uid
;
1452 if (hfsmp
->hfs_uid
== 0xfffffffd) hfsmp
->hfs_uid
= UNKNOWNUID
;
1453 hfsmp
->hfs_gid
= (args
->hfs_gid
== (gid_t
)VNOVAL
) ? UNKNOWNGID
: args
->hfs_gid
;
1454 if (hfsmp
->hfs_gid
== 0xfffffffd) hfsmp
->hfs_gid
= UNKNOWNGID
;
1455 vfs_setowner(mp
, hfsmp
->hfs_uid
, hfsmp
->hfs_gid
); /* tell the VFS */
1456 if (args
->hfs_mask
!= (mode_t
)VNOVAL
) {
1457 hfsmp
->hfs_dir_mask
= args
->hfs_mask
& ALLPERMS
;
1458 if (args
->flags
& HFSFSMNT_NOXONFILES
) {
1459 hfsmp
->hfs_file_mask
= (args
->hfs_mask
& DEFFILEMODE
);
1461 hfsmp
->hfs_file_mask
= args
->hfs_mask
& ALLPERMS
;
1464 hfsmp
->hfs_dir_mask
= UNKNOWNPERMISSIONS
& ALLPERMS
; /* 0777: rwx---rwx */
1465 hfsmp
->hfs_file_mask
= UNKNOWNPERMISSIONS
& DEFFILEMODE
; /* 0666: no --x by default? */
1467 if ((args
->flags
!= (int)VNOVAL
) && (args
->flags
& HFSFSMNT_WRAPPER
))
1470 /* Even w/o explicit mount arguments, MNT_UNKNOWNPERMISSIONS requires setting up uid, gid, and mask: */
1471 if (((unsigned int)vfs_flags(mp
)) & MNT_UNKNOWNPERMISSIONS
) {
1472 hfsmp
->hfs_uid
= UNKNOWNUID
;
1473 hfsmp
->hfs_gid
= UNKNOWNGID
;
1474 vfs_setowner(mp
, hfsmp
->hfs_uid
, hfsmp
->hfs_gid
); /* tell the VFS */
1475 hfsmp
->hfs_dir_mask
= UNKNOWNPERMISSIONS
& ALLPERMS
; /* 0777: rwx---rwx */
1476 hfsmp
->hfs_file_mask
= UNKNOWNPERMISSIONS
& DEFFILEMODE
; /* 0666: no --x by default? */
1480 /* Find out if disk media is writable. */
1481 if (VNOP_IOCTL(devvp
, DKIOCISWRITABLE
, (caddr_t
)&iswritable
, 0, context
) == 0) {
1483 hfsmp
->hfs_flags
|= HFS_WRITEABLE_MEDIA
;
1485 hfsmp
->hfs_flags
&= ~HFS_WRITEABLE_MEDIA
;
1488 // record the current time at which we're mounting this volume
1491 hfsmp
->hfs_mount_time
= tv
.tv_sec
;
1493 /* Mount a standard HFS disk */
1494 if ((SWAP_BE16(mdbp
->drSigWord
) == kHFSSigWord
) &&
1495 (mntwrapper
|| (SWAP_BE16(mdbp
->drEmbedSigWord
) != kHFSPlusSigWord
))) {
1497 /* On 10.6 and beyond, non read-only mounts for HFS standard vols get rejected */
1498 if (vfs_isrdwr(mp
)) {
1503 printf("hfs_mountfs: Mounting HFS Standard volumes was deprecated in Mac OS 10.7 \n");
1505 /* Treat it as if it's read-only and not writeable */
1506 hfsmp
->hfs_flags
|= HFS_READ_ONLY
;
1507 hfsmp
->hfs_flags
&= ~HFS_WRITEABLE_MEDIA
;
1509 /* If only journal replay is requested, exit immediately */
1510 if (journal_replay_only
) {
1515 if ((vfs_flags(mp
) & MNT_ROOTFS
)) {
1516 retval
= EINVAL
; /* Cannot root from HFS standard disks */
1519 /* HFS disks can only use 512 byte physical blocks */
1520 if (log_blksize
> kHFSBlockSize
) {
1521 log_blksize
= kHFSBlockSize
;
1522 if (VNOP_IOCTL(devvp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&log_blksize
, FWRITE
, context
)) {
1526 if (VNOP_IOCTL(devvp
, DKIOCGETBLOCKCOUNT
, (caddr_t
)&log_blkcnt
, 0, context
)) {
1530 hfsmp
->hfs_logical_block_size
= log_blksize
;
1531 hfsmp
->hfs_logical_block_count
= log_blkcnt
;
1532 hfsmp
->hfs_physical_block_size
= log_blksize
;
1533 hfsmp
->hfs_log_per_phys
= 1;
1536 hfsmp
->hfs_encoding
= args
->hfs_encoding
;
1537 HFSTOVCB(hfsmp
)->volumeNameEncodingHint
= args
->hfs_encoding
;
1539 /* establish the timezone */
1540 gTimeZone
= args
->hfs_timezone
;
1543 retval
= hfs_getconverter(hfsmp
->hfs_encoding
, &hfsmp
->hfs_get_unicode
,
1544 &hfsmp
->hfs_get_hfsname
);
1548 retval
= hfs_MountHFSVolume(hfsmp
, mdbp
, p
);
1550 (void) hfs_relconverter(hfsmp
->hfs_encoding
);
1552 } else /* Mount an HFS Plus disk */ {
1553 HFSPlusVolumeHeader
*vhp
;
1554 off_t embeddedOffset
;
1555 int jnl_disable
= 0;
1557 /* Get the embedded Volume Header */
1558 if (SWAP_BE16(mdbp
->drEmbedSigWord
) == kHFSPlusSigWord
) {
1559 embeddedOffset
= SWAP_BE16(mdbp
->drAlBlSt
) * kHFSBlockSize
;
1560 embeddedOffset
+= (u_int64_t
)SWAP_BE16(mdbp
->drEmbedExtent
.startBlock
) *
1561 (u_int64_t
)SWAP_BE32(mdbp
->drAlBlkSiz
);
1564 * If the embedded volume doesn't start on a block
1565 * boundary, then switch the device to a 512-byte
1566 * block size so everything will line up on a block
1569 if ((embeddedOffset
% log_blksize
) != 0) {
1570 printf("hfs_mountfs: embedded volume offset not"
1571 " a multiple of physical block size (%d);"
1572 " switching to 512\n", log_blksize
);
1574 if (VNOP_IOCTL(devvp
, DKIOCSETBLOCKSIZE
,
1575 (caddr_t
)&log_blksize
, FWRITE
, context
)) {
1577 if (HFS_MOUNT_DEBUG
) {
1578 printf("hfs_mountfs: DKIOCSETBLOCKSIZE (3) failed\n");
1583 if (VNOP_IOCTL(devvp
, DKIOCGETBLOCKCOUNT
,
1584 (caddr_t
)&log_blkcnt
, 0, context
)) {
1585 if (HFS_MOUNT_DEBUG
) {
1586 printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (3) failed\n");
1591 /* Note: relative block count adjustment */
1592 hfsmp
->hfs_logical_block_count
*=
1593 hfsmp
->hfs_logical_block_size
/ log_blksize
;
1595 /* Update logical /physical block size */
1596 hfsmp
->hfs_logical_block_size
= log_blksize
;
1597 hfsmp
->hfs_physical_block_size
= log_blksize
;
1598 phys_blksize
= log_blksize
;
1599 hfsmp
->hfs_log_per_phys
= 1;
1602 disksize
= (u_int64_t
)SWAP_BE16(mdbp
->drEmbedExtent
.blockCount
) *
1603 (u_int64_t
)SWAP_BE32(mdbp
->drAlBlkSiz
);
1605 hfsmp
->hfs_logical_block_count
= disksize
/ log_blksize
;
1607 mdb_offset
= (daddr64_t
)((embeddedOffset
/ log_blksize
) + HFS_PRI_SECTOR(log_blksize
));
1608 retval
= (int)buf_meta_bread(devvp
, HFS_PHYSBLK_ROUNDDOWN(mdb_offset
, hfsmp
->hfs_log_per_phys
),
1609 phys_blksize
, cred
, &bp
);
1611 if (HFS_MOUNT_DEBUG
) {
1612 printf("hfs_mountfs: buf_meta_bread (2) failed with %d\n", retval
);
1616 bcopy((char *)buf_dataptr(bp
) + HFS_PRI_OFFSET(phys_blksize
), mdbp
, 512);
1619 vhp
= (HFSPlusVolumeHeader
*) mdbp
;
1621 } else /* pure HFS+ */ {
1623 vhp
= (HFSPlusVolumeHeader
*) mdbp
;
1627 hfs_root_unmounted_cleanly
= (SWAP_BE32(vhp
->attributes
) & kHFSVolumeUnmountedMask
) != 0;
1631 * On inconsistent disks, do not allow read-write mount
1632 * unless it is the boot volume being mounted. We also
1633 * always want to replay the journal if the journal_replay_only
1634 * flag is set because that will (most likely) get the
1635 * disk into a consistent state before fsck_hfs starts
1638 if ( !(vfs_flags(mp
) & MNT_ROOTFS
)
1639 && (SWAP_BE32(vhp
->attributes
) & kHFSVolumeInconsistentMask
)
1640 && !journal_replay_only
1641 && !(hfsmp
->hfs_flags
& HFS_READ_ONLY
)) {
1643 if (HFS_MOUNT_DEBUG
) {
1644 printf("hfs_mountfs: failed to mount non-root inconsistent disk\n");
1655 if (args
!= NULL
&& (args
->flags
& HFSFSMNT_EXTENDED_ARGS
) &&
1656 args
->journal_disable
) {
1661 // We only initialize the journal here if the last person
1662 // to mount this volume was journaling aware. Otherwise
1663 // we delay journal initialization until later at the end
1664 // of hfs_MountHFSPlusVolume() because the last person who
1665 // mounted it could have messed things up behind our back
1666 // (so we need to go find the .journal file, make sure it's
1667 // the right size, re-sync up if it was moved, etc).
1669 if ( (SWAP_BE32(vhp
->lastMountedVersion
) == kHFSJMountVersion
)
1670 && (SWAP_BE32(vhp
->attributes
) & kHFSVolumeJournaledMask
)
1673 // if we're able to init the journal, mark the mount
1674 // point as journaled.
1676 if ((retval
= hfs_early_journal_init(hfsmp
, vhp
, args
, embeddedOffset
, mdb_offset
, mdbp
, cred
)) == 0) {
1677 vfs_setflags(mp
, (u_int64_t
)((unsigned int)MNT_JOURNALED
));
1679 if (retval
== EROFS
) {
1680 // EROFS is a special error code that means the volume has an external
1681 // journal which we couldn't find. in that case we do not want to
1682 // rewrite the volume header - we'll just refuse to mount the volume.
1683 if (HFS_MOUNT_DEBUG
) {
1684 printf("hfs_mountfs: hfs_early_journal_init indicated external jnl \n");
1690 // if the journal failed to open, then set the lastMountedVersion
1691 // to be "FSK!" which fsck_hfs will see and force the fsck instead
1692 // of just bailing out because the volume is journaled.
1694 if (HFS_MOUNT_DEBUG
) {
1695 printf("hfs_mountfs: hfs_early_journal_init failed, setting to FSK \n");
1698 HFSPlusVolumeHeader
*jvhp
;
1700 hfsmp
->hfs_flags
|= HFS_NEED_JNL_RESET
;
1702 if (mdb_offset
== 0) {
1703 mdb_offset
= (daddr64_t
)((embeddedOffset
/ log_blksize
) + HFS_PRI_SECTOR(log_blksize
));
1707 retval
= (int)buf_meta_bread(devvp
,
1708 HFS_PHYSBLK_ROUNDDOWN(mdb_offset
, hfsmp
->hfs_log_per_phys
),
1709 phys_blksize
, cred
, &bp
);
1711 jvhp
= (HFSPlusVolumeHeader
*)(buf_dataptr(bp
) + HFS_PRI_OFFSET(phys_blksize
));
1713 if (SWAP_BE16(jvhp
->signature
) == kHFSPlusSigWord
|| SWAP_BE16(jvhp
->signature
) == kHFSXSigWord
) {
1714 printf ("hfs(1): Journal replay fail. Writing lastMountVersion as FSK!\n");
1715 jvhp
->lastMountedVersion
= SWAP_BE32(kFSKMountVersion
);
1723 // clear this so the error exit path won't try to use it
1728 // if this isn't the root device just bail out.
1729 // If it is the root device we just continue on
1730 // in the hopes that fsck_hfs will be able to
1731 // fix any damage that exists on the volume.
1732 if ( !(vfs_flags(mp
) & MNT_ROOTFS
)) {
1733 if (HFS_MOUNT_DEBUG
) {
1734 printf("hfs_mountfs: hfs_early_journal_init failed, erroring out \n");
1743 /* Either the journal is replayed successfully, or there
1744 * was nothing to replay, or no journal exists. In any case,
1747 if (journal_replay_only
) {
1752 (void) hfs_getconverter(0, &hfsmp
->hfs_get_unicode
, &hfsmp
->hfs_get_hfsname
);
1754 retval
= hfs_MountHFSPlusVolume(hfsmp
, vhp
, embeddedOffset
, disksize
, p
, args
, cred
);
1756 * If the backend didn't like our physical blocksize
1757 * then retry with physical blocksize of 512.
1759 if ((retval
== ENXIO
) && (log_blksize
> 512) && (log_blksize
!= minblksize
)) {
1760 printf("hfs_mountfs: could not use physical block size "
1761 "(%d) switching to 512\n", log_blksize
);
1763 if (VNOP_IOCTL(devvp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&log_blksize
, FWRITE
, context
)) {
1764 if (HFS_MOUNT_DEBUG
) {
1765 printf("hfs_mountfs: DKIOCSETBLOCKSIZE (4) failed \n");
1770 if (VNOP_IOCTL(devvp
, DKIOCGETBLOCKCOUNT
, (caddr_t
)&log_blkcnt
, 0, context
)) {
1771 if (HFS_MOUNT_DEBUG
) {
1772 printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (4) failed \n");
1777 devvp
->v_specsize
= log_blksize
;
1778 /* Note: relative block count adjustment (in case this is an embedded volume). */
1779 hfsmp
->hfs_logical_block_count
*= hfsmp
->hfs_logical_block_size
/ log_blksize
;
1780 hfsmp
->hfs_logical_block_size
= log_blksize
;
1781 hfsmp
->hfs_log_per_phys
= hfsmp
->hfs_physical_block_size
/ log_blksize
;
1783 if (hfsmp
->jnl
&& hfsmp
->jvp
== devvp
) {
1784 // close and re-open this with the new block size
1785 journal_close(hfsmp
->jnl
);
1787 if (hfs_early_journal_init(hfsmp
, vhp
, args
, embeddedOffset
, mdb_offset
, mdbp
, cred
) == 0) {
1788 vfs_setflags(mp
, (u_int64_t
)((unsigned int)MNT_JOURNALED
));
1790 // if the journal failed to open, then set the lastMountedVersion
1791 // to be "FSK!" which fsck_hfs will see and force the fsck instead
1792 // of just bailing out because the volume is journaled.
1794 if (HFS_MOUNT_DEBUG
) {
1795 printf("hfs_mountfs: hfs_early_journal_init (2) resetting.. \n");
1797 HFSPlusVolumeHeader
*jvhp
;
1799 hfsmp
->hfs_flags
|= HFS_NEED_JNL_RESET
;
1801 if (mdb_offset
== 0) {
1802 mdb_offset
= (daddr64_t
)((embeddedOffset
/ log_blksize
) + HFS_PRI_SECTOR(log_blksize
));
1806 retval
= (int)buf_meta_bread(devvp
, HFS_PHYSBLK_ROUNDDOWN(mdb_offset
, hfsmp
->hfs_log_per_phys
),
1807 phys_blksize
, cred
, &bp
);
1809 jvhp
= (HFSPlusVolumeHeader
*)(buf_dataptr(bp
) + HFS_PRI_OFFSET(phys_blksize
));
1811 if (SWAP_BE16(jvhp
->signature
) == kHFSPlusSigWord
|| SWAP_BE16(jvhp
->signature
) == kHFSXSigWord
) {
1812 printf ("hfs(2): Journal replay fail. Writing lastMountVersion as FSK!\n");
1813 jvhp
->lastMountedVersion
= SWAP_BE32(kFSKMountVersion
);
1821 // clear this so the error exit path won't try to use it
1826 // if this isn't the root device just bail out.
1827 // If it is the root device we just continue on
1828 // in the hopes that fsck_hfs will be able to
1829 // fix any damage that exists on the volume.
1830 if ( !(vfs_flags(mp
) & MNT_ROOTFS
)) {
1831 if (HFS_MOUNT_DEBUG
) {
1832 printf("hfs_mountfs: hfs_early_journal_init (2) failed \n");
1840 /* Try again with a smaller block size... */
1841 retval
= hfs_MountHFSPlusVolume(hfsmp
, vhp
, embeddedOffset
, disksize
, p
, args
, cred
);
1842 if (retval
&& HFS_MOUNT_DEBUG
) {
1843 printf("hfs_MountHFSPlusVolume (late) returned %d\n",retval
);
1847 (void) hfs_relconverter(0);
1850 // save off a snapshot of the mtime from the previous mount
1852 hfsmp
->hfs_last_mounted_mtime
= hfsmp
->hfs_mtime
;
1855 if (HFS_MOUNT_DEBUG
) {
1856 printf("hfs_mountfs: encountered failure %d \n", retval
);
1861 mp
->mnt_vfsstat
.f_fsid
.val
[0] = (long)dev
;
1862 mp
->mnt_vfsstat
.f_fsid
.val
[1] = vfs_typenum(mp
);
1863 vfs_setmaxsymlen(mp
, 0);
1865 mp
->mnt_vtable
->vfc_vfsflags
|= VFC_VFSNATIVEXATTR
;
1867 mp
->mnt_kern_flag
|= MNTK_NAMED_STREAMS
;
1869 if (!(hfsmp
->hfs_flags
& HFS_STANDARD
)) {
1870 /* Tell VFS that we support directory hard links. */
1871 mp
->mnt_vtable
->vfc_vfsflags
|= VFC_VFSDIRLINKS
;
1873 /* HFS standard doesn't support extended readdir! */
1874 mount_set_noreaddirext (mp
);
1879 * Set the free space warning levels for a non-root volume:
1881 * Set the "danger" limit to 1% of the volume size or 100MB, whichever
1882 * is less. Set the "warning" limit to 2% of the volume size or 150MB,
1883 * whichever is less. And last, set the "desired" freespace level to
1884 * to 3% of the volume size or 200MB, whichever is less.
1886 hfsmp
->hfs_freespace_notify_dangerlimit
=
1887 MIN(HFS_VERYLOWDISKTRIGGERLEVEL
/ HFSTOVCB(hfsmp
)->blockSize
,
1888 (HFSTOVCB(hfsmp
)->totalBlocks
/ 100) * HFS_VERYLOWDISKTRIGGERFRACTION
);
1889 hfsmp
->hfs_freespace_notify_warninglimit
=
1890 MIN(HFS_LOWDISKTRIGGERLEVEL
/ HFSTOVCB(hfsmp
)->blockSize
,
1891 (HFSTOVCB(hfsmp
)->totalBlocks
/ 100) * HFS_LOWDISKTRIGGERFRACTION
);
1892 hfsmp
->hfs_freespace_notify_desiredlevel
=
1893 MIN(HFS_LOWDISKSHUTOFFLEVEL
/ HFSTOVCB(hfsmp
)->blockSize
,
1894 (HFSTOVCB(hfsmp
)->totalBlocks
/ 100) * HFS_LOWDISKSHUTOFFFRACTION
);
1897 * Set the free space warning levels for the root volume:
1899 * Set the "danger" limit to 5% of the volume size or 512MB, whichever
1900 * is less. Set the "warning" limit to 10% of the volume size or 1GB,
1901 * whichever is less. And last, set the "desired" freespace level to
1902 * to 11% of the volume size or 1.25GB, whichever is less.
1904 hfsmp
->hfs_freespace_notify_dangerlimit
=
1905 MIN(HFS_ROOTVERYLOWDISKTRIGGERLEVEL
/ HFSTOVCB(hfsmp
)->blockSize
,
1906 (HFSTOVCB(hfsmp
)->totalBlocks
/ 100) * HFS_ROOTVERYLOWDISKTRIGGERFRACTION
);
1907 hfsmp
->hfs_freespace_notify_warninglimit
=
1908 MIN(HFS_ROOTLOWDISKTRIGGERLEVEL
/ HFSTOVCB(hfsmp
)->blockSize
,
1909 (HFSTOVCB(hfsmp
)->totalBlocks
/ 100) * HFS_ROOTLOWDISKTRIGGERFRACTION
);
1910 hfsmp
->hfs_freespace_notify_desiredlevel
=
1911 MIN(HFS_ROOTLOWDISKSHUTOFFLEVEL
/ HFSTOVCB(hfsmp
)->blockSize
,
1912 (HFSTOVCB(hfsmp
)->totalBlocks
/ 100) * HFS_ROOTLOWDISKSHUTOFFFRACTION
);
1915 /* Check if the file system exists on virtual device, like disk image */
1916 if (VNOP_IOCTL(devvp
, DKIOCISVIRTUAL
, (caddr_t
)&isvirtual
, 0, context
) == 0) {
1918 hfsmp
->hfs_flags
|= HFS_VIRTUAL_DEVICE
;
1922 /* do not allow ejectability checks on the root device */
1924 if ((hfsmp
->hfs_flags
& HFS_VIRTUAL_DEVICE
) == 0 &&
1925 IOBSDIsMediaEjectable(mp
->mnt_vfsstat
.f_mntfromname
)) {
1926 hfsmp
->hfs_max_pending_io
= 4096*1024; // a reasonable value to start with.
1927 hfsmp
->hfs_syncer
= thread_call_allocate(hfs_syncer
, hfsmp
);
1928 if (hfsmp
->hfs_syncer
== NULL
) {
1929 printf("hfs: failed to allocate syncer thread callback for %s (%s)\n",
1930 mp
->mnt_vfsstat
.f_mntfromname
, mp
->mnt_vfsstat
.f_mntonname
);
1935 #if CONFIG_HFS_ALLOC_RBTREE
1937 * We spawn a thread to create the pair of red-black trees for this volume.
1938 * However, in so doing, we must be careful to ensure that if this thread is still
1939 * running after mount has finished, it doesn't interfere with an unmount. Specifically,
1940 * we'll need to set a bit that indicates we're in progress building the trees here.
1941 * Unmount will check for this bit, and then if it's set, mark a corresponding bit that
1942 * notifies the tree generation code that an unmount is waiting. Also mark the bit that
1943 * indicates the tree is live and operating.
1945 * Only do this if we're operating on a read-write mount (we wouldn't care for read-only).
1948 if ((hfsmp
->hfs_flags
& HFS_READ_ONLY
) == 0) {
1949 hfsmp
->extent_tree_flags
|= (HFS_ALLOC_TREEBUILD_INFLIGHT
| HFS_ALLOC_RB_ENABLED
);
1951 /* Initialize EOF counter so that the thread can assume it started at initial values */
1952 hfsmp
->offset_block_end
= 0;
1955 kernel_thread_start ((thread_continue_t
) hfs_initialize_allocator
, hfsmp
, &allocator_thread
);
1956 thread_deallocate(allocator_thread
);
1962 * Start looking for free space to drop below this level and generate a
1963 * warning immediately if needed:
1965 hfsmp
->hfs_notification_conditions
= 0;
1966 hfs_generate_volume_notifications(hfsmp
);
1969 (void) hfs_flushvolumeheader(hfsmp
, MNT_WAIT
, 0);
1980 if (hfsmp
&& hfsmp
->jvp
&& hfsmp
->jvp
!= hfsmp
->hfs_devvp
) {
1981 vnode_clearmountedon(hfsmp
->jvp
);
1982 (void)VNOP_CLOSE(hfsmp
->jvp
, ronly
? FREAD
: FREAD
|FWRITE
, vfs_context_kernel());
1986 if (hfsmp
->hfs_devvp
) {
1987 vnode_rele(hfsmp
->hfs_devvp
);
1989 hfs_delete_chash(hfsmp
);
1991 FREE(hfsmp
, M_HFSMNT
);
1992 vfs_setfsprivate(mp
, NULL
);
1999 * Make a filesystem operational.
2000 * Nothing to do at the moment.
2004 hfs_start(__unused
struct mount
*mp
, __unused
int flags
, __unused vfs_context_t context
)
2011 * unmount system call
2014 hfs_unmount(struct mount
*mp
, int mntflags
, vfs_context_t context
)
2016 struct proc
*p
= vfs_context_proc(context
);
2017 struct hfsmount
*hfsmp
= VFSTOHFS(mp
);
2018 int retval
= E_NONE
;
2026 if (mntflags
& MNT_FORCE
) {
2027 flags
|= FORCECLOSE
;
2031 if ((retval
= hfs_flushfiles(mp
, flags
, p
)) && !force
)
2034 if (hfsmp
->hfs_flags
& HFS_METADATA_ZONE
)
2035 (void) hfs_recording_suspend(hfsmp
);
2038 * Cancel any pending timers for this volume. Then wait for any timers
2039 * which have fired, but whose callbacks have not yet completed.
2041 if (hfsmp
->hfs_syncer
)
2043 struct timespec ts
= {0, 100000000}; /* 0.1 seconds */
2046 * Cancel any timers that have been scheduled, but have not
2047 * fired yet. NOTE: The kernel considers a timer complete as
2048 * soon as it starts your callback, so the kernel does not
2049 * keep track of the number of callbacks in progress.
2051 if (thread_call_cancel(hfsmp
->hfs_syncer
))
2052 OSDecrementAtomic((volatile SInt32
*)&hfsmp
->hfs_sync_incomplete
);
2053 thread_call_free(hfsmp
->hfs_syncer
);
2054 hfsmp
->hfs_syncer
= NULL
;
2057 * This waits for all of the callbacks that were entered before
2058 * we did thread_call_cancel above, but have not completed yet.
2060 while(hfsmp
->hfs_sync_incomplete
> 0)
2062 msleep((caddr_t
)&hfsmp
->hfs_sync_incomplete
, NULL
, PWAIT
, "hfs_unmount", &ts
);
2065 if (hfsmp
->hfs_sync_incomplete
< 0)
2066 panic("hfs_unmount: pm_sync_incomplete underflow!\n");
2069 #if CONFIG_HFS_ALLOC_RBTREE
2070 rb_used
= hfs_teardown_allocator(hfsmp
);
2074 * Flush out the b-trees, volume bitmap and Volume Header
2076 if ((hfsmp
->hfs_flags
& HFS_READ_ONLY
) == 0) {
2077 retval
= hfs_start_transaction(hfsmp
);
2080 } else if (!force
) {
2084 if (hfsmp
->hfs_startup_vp
) {
2085 (void) hfs_lock(VTOC(hfsmp
->hfs_startup_vp
), HFS_EXCLUSIVE_LOCK
);
2086 retval
= hfs_fsync(hfsmp
->hfs_startup_vp
, MNT_WAIT
, 0, p
);
2087 hfs_unlock(VTOC(hfsmp
->hfs_startup_vp
));
2088 if (retval
&& !force
)
2092 if (hfsmp
->hfs_attribute_vp
) {
2093 (void) hfs_lock(VTOC(hfsmp
->hfs_attribute_vp
), HFS_EXCLUSIVE_LOCK
);
2094 retval
= hfs_fsync(hfsmp
->hfs_attribute_vp
, MNT_WAIT
, 0, p
);
2095 hfs_unlock(VTOC(hfsmp
->hfs_attribute_vp
));
2096 if (retval
&& !force
)
2100 (void) hfs_lock(VTOC(hfsmp
->hfs_catalog_vp
), HFS_EXCLUSIVE_LOCK
);
2101 retval
= hfs_fsync(hfsmp
->hfs_catalog_vp
, MNT_WAIT
, 0, p
);
2102 hfs_unlock(VTOC(hfsmp
->hfs_catalog_vp
));
2103 if (retval
&& !force
)
2106 (void) hfs_lock(VTOC(hfsmp
->hfs_extents_vp
), HFS_EXCLUSIVE_LOCK
);
2107 retval
= hfs_fsync(hfsmp
->hfs_extents_vp
, MNT_WAIT
, 0, p
);
2108 hfs_unlock(VTOC(hfsmp
->hfs_extents_vp
));
2109 if (retval
&& !force
)
2112 if (hfsmp
->hfs_allocation_vp
) {
2113 (void) hfs_lock(VTOC(hfsmp
->hfs_allocation_vp
), HFS_EXCLUSIVE_LOCK
);
2114 retval
= hfs_fsync(hfsmp
->hfs_allocation_vp
, MNT_WAIT
, 0, p
);
2115 hfs_unlock(VTOC(hfsmp
->hfs_allocation_vp
));
2116 if (retval
&& !force
)
2120 if (hfsmp
->hfc_filevp
&& vnode_issystem(hfsmp
->hfc_filevp
)) {
2121 retval
= hfs_fsync(hfsmp
->hfc_filevp
, MNT_WAIT
, 0, p
);
2122 if (retval
&& !force
)
2126 /* If runtime corruption was detected, indicate that the volume
2127 * was not unmounted cleanly.
2129 if (hfsmp
->vcbAtrb
& kHFSVolumeInconsistentMask
) {
2130 HFSTOVCB(hfsmp
)->vcbAtrb
&= ~kHFSVolumeUnmountedMask
;
2132 HFSTOVCB(hfsmp
)->vcbAtrb
|= kHFSVolumeUnmountedMask
;
2137 /* If the rb-tree was live, just set min_start to 0 */
2138 hfsmp
->nextAllocation
= 0;
2141 if (hfsmp
->hfs_flags
& HFS_HAS_SPARSE_DEVICE
) {
2143 u_int32_t min_start
= hfsmp
->totalBlocks
;
2145 // set the nextAllocation pointer to the smallest free block number
2146 // we've seen so on the next mount we won't rescan unnecessarily
2147 lck_spin_lock(&hfsmp
->vcbFreeExtLock
);
2148 for(i
=0; i
< (int)hfsmp
->vcbFreeExtCnt
; i
++) {
2149 if (hfsmp
->vcbFreeExt
[i
].startBlock
< min_start
) {
2150 min_start
= hfsmp
->vcbFreeExt
[i
].startBlock
;
2153 lck_spin_unlock(&hfsmp
->vcbFreeExtLock
);
2154 if (min_start
< hfsmp
->nextAllocation
) {
2155 hfsmp
->nextAllocation
= min_start
;
2161 retval
= hfs_flushvolumeheader(hfsmp
, MNT_WAIT
, 0);
2163 HFSTOVCB(hfsmp
)->vcbAtrb
&= ~kHFSVolumeUnmountedMask
;
2165 goto err_exit
; /* could not flush everything */
2169 hfs_end_transaction(hfsmp
);
2175 hfs_journal_flush(hfsmp
, FALSE
);
2179 * Invalidate our caches and release metadata vnodes
2181 (void) hfsUnmount(hfsmp
, p
);
2183 if (HFSTOVCB(hfsmp
)->vcbSigWord
== kHFSSigWord
)
2184 (void) hfs_relconverter(hfsmp
->hfs_encoding
);
2188 journal_close(hfsmp
->jnl
);
2192 VNOP_FSYNC(hfsmp
->hfs_devvp
, MNT_WAIT
, context
);
2194 if (hfsmp
->jvp
&& hfsmp
->jvp
!= hfsmp
->hfs_devvp
) {
2195 vnode_clearmountedon(hfsmp
->jvp
);
2196 retval
= VNOP_CLOSE(hfsmp
->jvp
,
2197 hfsmp
->hfs_flags
& HFS_READ_ONLY
? FREAD
: FREAD
|FWRITE
,
2198 vfs_context_kernel());
2199 vnode_put(hfsmp
->jvp
);
2205 * Last chance to dump unreferenced system files.
2207 (void) vflush(mp
, NULLVP
, FORCECLOSE
);
2210 /* Drop our reference on the backing fs (if any). */
2211 if ((hfsmp
->hfs_flags
& HFS_HAS_SPARSE_DEVICE
) && hfsmp
->hfs_backingfs_rootvp
) {
2212 struct vnode
* tmpvp
;
2214 hfsmp
->hfs_flags
&= ~HFS_HAS_SPARSE_DEVICE
;
2215 tmpvp
= hfsmp
->hfs_backingfs_rootvp
;
2216 hfsmp
->hfs_backingfs_rootvp
= NULLVP
;
2219 #endif /* HFS_SPARSE_DEV */
2220 lck_mtx_destroy(&hfsmp
->hfc_mutex
, hfs_mutex_group
);
2221 lck_spin_destroy(&hfsmp
->vcbFreeExtLock
, hfs_spinlock_group
);
2222 vnode_rele(hfsmp
->hfs_devvp
);
2224 hfs_delete_chash(hfsmp
);
2225 FREE(hfsmp
, M_HFSMNT
);
2231 hfs_end_transaction(hfsmp
);
2238 * Return the root of a filesystem.
2241 hfs_vfs_root(struct mount
*mp
, struct vnode
**vpp
, __unused vfs_context_t context
)
2243 return hfs_vget(VFSTOHFS(mp
), (cnid_t
)kHFSRootFolderID
, vpp
, 1, 0);
2248 * Do operations associated with quotas
2252 hfs_quotactl(__unused
struct mount
*mp
, __unused
int cmds
, __unused uid_t uid
, __unused caddr_t datap
, __unused vfs_context_t context
)
2258 hfs_quotactl(struct mount
*mp
, int cmds
, uid_t uid
, caddr_t datap
, vfs_context_t context
)
2260 struct proc
*p
= vfs_context_proc(context
);
2261 int cmd
, type
, error
;
2264 uid
= kauth_cred_getuid(vfs_context_ucred(context
));
2265 cmd
= cmds
>> SUBCMDSHIFT
;
2272 if (uid
== kauth_cred_getuid(vfs_context_ucred(context
)))
2276 if ( (error
= vfs_context_suser(context
)) )
2280 type
= cmds
& SUBCMDMASK
;
2281 if ((u_int
)type
>= MAXQUOTAS
)
2283 if (vfs_busy(mp
, LK_NOWAIT
))
2289 error
= hfs_quotaon(p
, mp
, type
, datap
);
2293 error
= hfs_quotaoff(p
, mp
, type
);
2297 error
= hfs_setquota(mp
, uid
, type
, datap
);
2301 error
= hfs_setuse(mp
, uid
, type
, datap
);
2305 error
= hfs_getquota(mp
, uid
, type
, datap
);
2309 error
= hfs_qsync(mp
);
2313 error
= hfs_quotastat(mp
, type
, datap
);
2326 /* Subtype is composite of bits */
2327 #define HFS_SUBTYPE_JOURNALED 0x01
2328 #define HFS_SUBTYPE_CASESENSITIVE 0x02
2329 /* bits 2 - 6 reserved */
2330 #define HFS_SUBTYPE_STANDARDHFS 0x80
2333 * Get file system statistics.
2336 hfs_statfs(struct mount
*mp
, register struct vfsstatfs
*sbp
, __unused vfs_context_t context
)
2338 ExtendedVCB
*vcb
= VFSTOVCB(mp
);
2339 struct hfsmount
*hfsmp
= VFSTOHFS(mp
);
2340 u_int32_t freeCNIDs
;
2341 u_int16_t subtype
= 0;
2343 freeCNIDs
= (u_int32_t
)0xFFFFFFFF - (u_int32_t
)vcb
->vcbNxtCNID
;
2345 sbp
->f_bsize
= (u_int32_t
)vcb
->blockSize
;
2346 sbp
->f_iosize
= (size_t)cluster_max_io_size(mp
, 0);
2347 sbp
->f_blocks
= (u_int64_t
)((u_int32_t
)vcb
->totalBlocks
);
2348 sbp
->f_bfree
= (u_int64_t
)((u_int32_t
)hfs_freeblks(hfsmp
, 0));
2349 sbp
->f_bavail
= (u_int64_t
)((u_int32_t
)hfs_freeblks(hfsmp
, 1));
2350 sbp
->f_files
= (u_int64_t
)((u_int32_t
)(vcb
->totalBlocks
- 2)); /* max files is constrained by total blocks */
2351 sbp
->f_ffree
= (u_int64_t
)((u_int32_t
)(MIN(freeCNIDs
, sbp
->f_bavail
)));
2354 * Subtypes (flavors) for HFS
2355 * 0: Mac OS Extended
2356 * 1: Mac OS Extended (Journaled)
2357 * 2: Mac OS Extended (Case Sensitive)
2358 * 3: Mac OS Extended (Case Sensitive, Journaled)
2360 * 128: Mac OS Standard
2363 if (hfsmp
->hfs_flags
& HFS_STANDARD
) {
2364 subtype
= HFS_SUBTYPE_STANDARDHFS
;
2365 } else /* HFS Plus */ {
2367 subtype
|= HFS_SUBTYPE_JOURNALED
;
2368 if (hfsmp
->hfs_flags
& HFS_CASE_SENSITIVE
)
2369 subtype
|= HFS_SUBTYPE_CASESENSITIVE
;
2371 sbp
->f_fssubtype
= subtype
;
2378 // XXXdbg -- this is a callback to be used by the journal to
2379 // get meta data blocks flushed out to disk.
2381 // XXXdbg -- be smarter and don't flush *every* block on each
2382 // call. try to only flush some so we don't wind up
2383 // being too synchronous.
2387 hfs_sync_metadata(void *arg
)
2389 struct mount
*mp
= (struct mount
*)arg
;
2390 struct hfsmount
*hfsmp
;
2394 daddr64_t priIDSector
;
2395 hfsmp
= VFSTOHFS(mp
);
2396 vcb
= HFSTOVCB(hfsmp
);
2398 // now make sure the super block is flushed
2399 priIDSector
= (daddr64_t
)((vcb
->hfsPlusIOPosOffset
/ hfsmp
->hfs_logical_block_size
) +
2400 HFS_PRI_SECTOR(hfsmp
->hfs_logical_block_size
));
2402 retval
= (int)buf_meta_bread(hfsmp
->hfs_devvp
,
2403 HFS_PHYSBLK_ROUNDDOWN(priIDSector
, hfsmp
->hfs_log_per_phys
),
2404 hfsmp
->hfs_physical_block_size
, NOCRED
, &bp
);
2405 if ((retval
!= 0 ) && (retval
!= ENXIO
)) {
2406 printf("hfs_sync_metadata: can't read volume header at %d! (retval 0x%x)\n",
2407 (int)priIDSector
, retval
);
2410 if (retval
== 0 && ((buf_flags(bp
) & (B_DELWRI
| B_LOCKED
)) == B_DELWRI
)) {
2416 // the alternate super block...
2417 // XXXdbg - we probably don't need to do this each and every time.
2418 // hfs_btreeio.c:FlushAlternate() should flag when it was
2420 if (hfsmp
->hfs_alt_id_sector
) {
2421 retval
= (int)buf_meta_bread(hfsmp
->hfs_devvp
,
2422 HFS_PHYSBLK_ROUNDDOWN(hfsmp
->hfs_alt_id_sector
, hfsmp
->hfs_log_per_phys
),
2423 hfsmp
->hfs_physical_block_size
, NOCRED
, &bp
);
2424 if (retval
== 0 && ((buf_flags(bp
) & (B_DELWRI
| B_LOCKED
)) == B_DELWRI
)) {
2433 struct hfs_sync_cargs
{
2442 hfs_sync_callback(struct vnode
*vp
, void *cargs
)
2445 struct hfs_sync_cargs
*args
;
2448 args
= (struct hfs_sync_cargs
*)cargs
;
2450 if (hfs_lock(VTOC(vp
), HFS_EXCLUSIVE_LOCK
) != 0) {
2451 return (VNODE_RETURNED
);
2455 if ((cp
->c_flag
& C_MODIFIED
) ||
2456 (cp
->c_touch_acctime
| cp
->c_touch_chgtime
| cp
->c_touch_modtime
) ||
2457 vnode_hasdirtyblks(vp
)) {
2458 error
= hfs_fsync(vp
, args
->waitfor
, 0, args
->p
);
2461 args
->error
= error
;
2464 return (VNODE_RETURNED
);
2470 * Go through the disk queues to initiate sandbagged IO;
2471 * go through the inodes to write those that have been modified;
2472 * initiate the writing of the super block if it has been modified.
2474 * Note: we are always called with the filesystem marked `MPBUSY'.
2477 hfs_sync(struct mount
*mp
, int waitfor
, vfs_context_t context
)
2479 struct proc
*p
= vfs_context_proc(context
);
2481 struct hfsmount
*hfsmp
;
2483 struct vnode
*meta_vp
[4];
2485 int error
, allerror
= 0;
2486 struct hfs_sync_cargs args
;
2488 hfsmp
= VFSTOHFS(mp
);
2491 * hfs_changefs might be manipulating vnodes so back off
2493 if (hfsmp
->hfs_flags
& HFS_IN_CHANGEFS
)
2496 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
)
2499 /* skip over frozen volumes */
2500 if (!lck_rw_try_lock_shared(&hfsmp
->hfs_insync
))
2503 args
.cred
= kauth_cred_get();
2504 args
.waitfor
= waitfor
;
2508 * hfs_sync_callback will be called for each vnode
2509 * hung off of this mount point... the vnode will be
2510 * properly referenced and unreferenced around the callback
2512 vnode_iterate(mp
, 0, hfs_sync_callback
, (void *)&args
);
2515 allerror
= args
.error
;
2517 vcb
= HFSTOVCB(hfsmp
);
2519 meta_vp
[0] = vcb
->extentsRefNum
;
2520 meta_vp
[1] = vcb
->catalogRefNum
;
2521 meta_vp
[2] = vcb
->allocationsRefNum
; /* This is NULL for standard HFS */
2522 meta_vp
[3] = hfsmp
->hfs_attribute_vp
; /* Optional file */
2524 /* Now sync our three metadata files */
2525 for (i
= 0; i
< 4; ++i
) {
2529 if ((btvp
==0) || (vnode_mount(btvp
) != mp
))
2532 /* XXX use hfs_systemfile_lock instead ? */
2533 (void) hfs_lock(VTOC(btvp
), HFS_EXCLUSIVE_LOCK
);
2536 if (((cp
->c_flag
& C_MODIFIED
) == 0) &&
2537 (cp
->c_touch_acctime
== 0) &&
2538 (cp
->c_touch_chgtime
== 0) &&
2539 (cp
->c_touch_modtime
== 0) &&
2540 vnode_hasdirtyblks(btvp
) == 0) {
2541 hfs_unlock(VTOC(btvp
));
2544 error
= vnode_get(btvp
);
2546 hfs_unlock(VTOC(btvp
));
2549 if ((error
= hfs_fsync(btvp
, waitfor
, 0, p
)))
2557 * Force stale file system control information to be flushed.
2559 if (vcb
->vcbSigWord
== kHFSSigWord
) {
2560 if ((error
= VNOP_FSYNC(hfsmp
->hfs_devvp
, waitfor
, context
))) {
2568 hfs_hotfilesync(hfsmp
, vfs_context_kernel());
2571 * Write back modified superblock.
2573 if (IsVCBDirty(vcb
)) {
2574 error
= hfs_flushvolumeheader(hfsmp
, waitfor
, 0);
2580 hfs_journal_flush(hfsmp
, FALSE
);
2588 clock_get_calendar_microtime(&secs
, &usecs
);
2589 now
= ((uint64_t)secs
* 1000000ULL) + (uint64_t)usecs
;
2590 hfsmp
->hfs_last_sync_time
= now
;
2593 lck_rw_unlock_shared(&hfsmp
->hfs_insync
);
2599 * File handle to vnode
2601 * Have to be really careful about stale file handles:
2602 * - check that the cnode id is valid
2603 * - call hfs_vget() to get the locked cnode
2604 * - check for an unallocated cnode (i_mode == 0)
2605 * - check that the given client host has export rights and return
2606 * those rights via. exflagsp and credanonp
2609 hfs_fhtovp(struct mount
*mp
, int fhlen
, unsigned char *fhp
, struct vnode
**vpp
, __unused vfs_context_t context
)
2611 struct hfsfid
*hfsfhp
;
2616 hfsfhp
= (struct hfsfid
*)fhp
;
2618 if (fhlen
< (int)sizeof(struct hfsfid
))
2621 result
= hfs_vget(VFSTOHFS(mp
), ntohl(hfsfhp
->hfsfid_cnid
), &nvp
, 0, 0);
2623 if (result
== ENOENT
)
2629 * We used to use the create time as the gen id of the file handle,
2630 * but it is not static enough because it can change at any point
2631 * via system calls. We still don't have another volume ID or other
2632 * unique identifier to use for a generation ID across reboots that
2633 * persists until the file is removed. Using only the CNID exposes
2634 * us to the potential wrap-around case, but as of 2/2008, it would take
2635 * over 2 months to wrap around if the machine did nothing but allocate
2636 * CNIDs. Using some kind of wrap counter would only be effective if
2637 * each file had the wrap counter associated with it. For now,
2638 * we use only the CNID to identify the file as it's good enough.
2643 hfs_unlock(VTOC(nvp
));
2649 * Vnode pointer to File handle
2653 hfs_vptofh(struct vnode
*vp
, int *fhlenp
, unsigned char *fhp
, __unused vfs_context_t context
)
2656 struct hfsfid
*hfsfhp
;
2658 if (ISHFS(VTOVCB(vp
)))
2659 return (ENOTSUP
); /* hfs standard is not exportable */
2661 if (*fhlenp
< (int)sizeof(struct hfsfid
))
2665 hfsfhp
= (struct hfsfid
*)fhp
;
2666 /* only the CNID is used to identify the file now */
2667 hfsfhp
->hfsfid_cnid
= htonl(cp
->c_fileid
);
2668 hfsfhp
->hfsfid_gen
= htonl(cp
->c_fileid
);
2669 *fhlenp
= sizeof(struct hfsfid
);
2676 * Initial HFS filesystems, done only once.
2679 hfs_init(__unused
struct vfsconf
*vfsp
)
2681 static int done
= 0;
2687 hfs_converterinit();
2692 hfs_lock_attr
= lck_attr_alloc_init();
2693 hfs_group_attr
= lck_grp_attr_alloc_init();
2694 hfs_mutex_group
= lck_grp_alloc_init("hfs-mutex", hfs_group_attr
);
2695 hfs_rwlock_group
= lck_grp_alloc_init("hfs-rwlock", hfs_group_attr
);
2696 hfs_spinlock_group
= lck_grp_alloc_init("hfs-spinlock", hfs_group_attr
);
2706 hfs_getmountpoint(struct vnode
*vp
, struct hfsmount
**hfsmpp
)
2708 struct hfsmount
* hfsmp
;
2709 char fstypename
[MFSNAMELEN
];
2714 if (!vnode_isvroot(vp
))
2717 vnode_vfsname(vp
, fstypename
);
2718 if (strncmp(fstypename
, "hfs", sizeof(fstypename
)) != 0)
2723 if (HFSTOVCB(hfsmp
)->vcbSigWord
== kHFSSigWord
)
2732 #include <sys/filedesc.h>
2735 * HFS filesystem related variables.
2738 hfs_sysctl(int *name
, __unused u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
2739 user_addr_t newp
, size_t newlen
, vfs_context_t context
)
2741 struct proc
*p
= vfs_context_proc(context
);
2743 struct hfsmount
*hfsmp
;
2745 /* all sysctl names at this level are terminal */
2747 if (name
[0] == HFS_ENCODINGBIAS
) {
2750 bias
= hfs_getencodingbias();
2751 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &bias
);
2752 if (error
== 0 && newp
)
2753 hfs_setencodingbias(bias
);
2756 } else if (name
[0] == HFS_EXTEND_FS
) {
2758 vnode_t vp
= vfs_context_cwd(context
);
2760 if (newp
== USER_ADDR_NULL
|| vp
== NULLVP
)
2762 if ((error
= hfs_getmountpoint(vp
, &hfsmp
)))
2764 error
= sysctl_quad(oldp
, oldlenp
, newp
, newlen
, (quad_t
*)&newsize
);
2768 error
= hfs_extendfs(hfsmp
, newsize
, context
);
2771 } else if (name
[0] == HFS_ENCODINGHINT
) {
2775 u_int16_t
*unicode_name
= NULL
;
2776 char *filename
= NULL
;
2778 if ((newlen
<= 0) || (newlen
> MAXPATHLEN
))
2781 bufsize
= MAX(newlen
* 3, MAXPATHLEN
);
2782 MALLOC(filename
, char *, newlen
, M_TEMP
, M_WAITOK
);
2783 if (filename
== NULL
) {
2785 goto encodinghint_exit
;
2787 MALLOC(unicode_name
, u_int16_t
*, bufsize
, M_TEMP
, M_WAITOK
);
2788 if (filename
== NULL
) {
2790 goto encodinghint_exit
;
2793 error
= copyin(newp
, (caddr_t
)filename
, newlen
);
2795 error
= utf8_decodestr((u_int8_t
*)filename
, newlen
- 1, unicode_name
,
2796 &bytes
, bufsize
, 0, UTF_DECOMPOSED
);
2798 hint
= hfs_pickencoding(unicode_name
, bytes
/ 2);
2799 error
= sysctl_int(oldp
, oldlenp
, USER_ADDR_NULL
, 0, (int32_t *)&hint
);
2805 FREE(unicode_name
, M_TEMP
);
2807 FREE(filename
, M_TEMP
);
2810 } else if (name
[0] == HFS_ENABLE_JOURNALING
) {
2811 // make the file system journaled...
2812 vnode_t vp
= vfs_context_cwd(context
);
2815 struct cat_attr jnl_attr
, jinfo_attr
;
2816 struct cat_fork jnl_fork
, jinfo_fork
;
2820 /* Only root can enable journaling */
2828 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2831 if (HFSTOVCB(hfsmp
)->vcbSigWord
== kHFSSigWord
) {
2832 printf("hfs: can't make a plain hfs volume journaled.\n");
2837 printf("hfs: volume @ mp %p is already journaled!\n", vnode_mount(vp
));
2841 vcb
= HFSTOVCB(hfsmp
);
2842 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
| SFL_EXTENTS
, HFS_EXCLUSIVE_LOCK
);
2843 if (BTHasContiguousNodes(VTOF(vcb
->catalogRefNum
)) == 0 ||
2844 BTHasContiguousNodes(VTOF(vcb
->extentsRefNum
)) == 0) {
2846 printf("hfs: volume has a btree w/non-contiguous nodes. can not enable journaling.\n");
2847 hfs_systemfile_unlock(hfsmp
, lockflags
);
2850 hfs_systemfile_unlock(hfsmp
, lockflags
);
2852 // make sure these both exist!
2853 if ( GetFileInfo(vcb
, kHFSRootFolderID
, ".journal_info_block", &jinfo_attr
, &jinfo_fork
) == 0
2854 || GetFileInfo(vcb
, kHFSRootFolderID
, ".journal", &jnl_attr
, &jnl_fork
) == 0) {
2859 hfs_sync(hfsmp
->hfs_mp
, MNT_WAIT
, context
);
2861 printf("hfs: Initializing the journal (joffset 0x%llx sz 0x%llx)...\n",
2862 (off_t
)name
[2], (off_t
)name
[3]);
2865 // XXXdbg - note that currently (Sept, 08) hfs_util does not support
2866 // enabling the journal on a separate device so it is safe
2867 // to just copy hfs_devvp here. If hfs_util gets the ability
2868 // to dynamically enable the journal on a separate device then
2869 // we will have to do the same thing as hfs_early_journal_init()
2870 // to locate and open the journal device.
2872 jvp
= hfsmp
->hfs_devvp
;
2873 jnl
= journal_create(jvp
,
2874 (off_t
)name
[2] * (off_t
)HFSTOVCB(hfsmp
)->blockSize
2875 + HFSTOVCB(hfsmp
)->hfsPlusIOPosOffset
,
2876 (off_t
)((unsigned)name
[3]),
2878 hfsmp
->hfs_logical_block_size
,
2881 hfs_sync_metadata
, hfsmp
->hfs_mp
);
2884 * Set up the trim callback function so that we can add
2885 * recently freed extents to the free extent cache once
2886 * the transaction that freed them is written to the
2890 journal_trim_set_callback(jnl
, hfs_trim_callback
, hfsmp
);
2893 printf("hfs: FAILED to create the journal!\n");
2894 if (jvp
&& jvp
!= hfsmp
->hfs_devvp
) {
2895 vnode_clearmountedon(jvp
);
2896 VNOP_CLOSE(jvp
, hfsmp
->hfs_flags
& HFS_READ_ONLY
? FREAD
: FREAD
|FWRITE
, vfs_context_kernel());
2903 hfs_lock_global (hfsmp
, HFS_EXCLUSIVE_LOCK
);
2906 * Flush all dirty metadata buffers.
2908 buf_flushdirtyblks(hfsmp
->hfs_devvp
, TRUE
, 0, "hfs_sysctl");
2909 buf_flushdirtyblks(hfsmp
->hfs_extents_vp
, TRUE
, 0, "hfs_sysctl");
2910 buf_flushdirtyblks(hfsmp
->hfs_catalog_vp
, TRUE
, 0, "hfs_sysctl");
2911 buf_flushdirtyblks(hfsmp
->hfs_allocation_vp
, TRUE
, 0, "hfs_sysctl");
2912 if (hfsmp
->hfs_attribute_vp
)
2913 buf_flushdirtyblks(hfsmp
->hfs_attribute_vp
, TRUE
, 0, "hfs_sysctl");
2915 HFSTOVCB(hfsmp
)->vcbJinfoBlock
= name
[1];
2916 HFSTOVCB(hfsmp
)->vcbAtrb
|= kHFSVolumeJournaledMask
;
2920 // save this off for the hack-y check in hfs_remove()
2921 hfsmp
->jnl_start
= (u_int32_t
)name
[2];
2922 hfsmp
->jnl_size
= (off_t
)((unsigned)name
[3]);
2923 hfsmp
->hfs_jnlinfoblkid
= jinfo_attr
.ca_fileid
;
2924 hfsmp
->hfs_jnlfileid
= jnl_attr
.ca_fileid
;
2926 vfs_setflags(hfsmp
->hfs_mp
, (u_int64_t
)((unsigned int)MNT_JOURNALED
));
2928 hfs_unlock_global (hfsmp
);
2929 hfs_flushvolumeheader(hfsmp
, MNT_WAIT
, 1);
2934 fsid
.val
[0] = (int32_t)hfsmp
->hfs_raw_dev
;
2935 fsid
.val
[1] = (int32_t)vfs_typenum(HFSTOVFS(hfsmp
));
2936 vfs_event_signal(&fsid
, VQ_UPDATE
, (intptr_t)NULL
);
2939 } else if (name
[0] == HFS_DISABLE_JOURNALING
) {
2940 // clear the journaling bit
2941 vnode_t vp
= vfs_context_cwd(context
);
2943 /* Only root can disable journaling */
2953 * Disabling journaling is disallowed on volumes with directory hard links
2954 * because we have not tested the relevant code path.
2956 if (hfsmp
->hfs_private_attr
[DIR_HARDLINKS
].ca_entries
!= 0){
2957 printf("hfs: cannot disable journaling on volumes with directory hardlinks\n");
2961 printf("hfs: disabling journaling for mount @ %p\n", vnode_mount(vp
));
2963 hfs_lock_global (hfsmp
, HFS_EXCLUSIVE_LOCK
);
2965 // Lights out for you buddy!
2966 journal_close(hfsmp
->jnl
);
2969 if (hfsmp
->jvp
&& hfsmp
->jvp
!= hfsmp
->hfs_devvp
) {
2970 vnode_clearmountedon(hfsmp
->jvp
);
2971 VNOP_CLOSE(hfsmp
->jvp
, hfsmp
->hfs_flags
& HFS_READ_ONLY
? FREAD
: FREAD
|FWRITE
, vfs_context_kernel());
2972 vnode_put(hfsmp
->jvp
);
2975 vfs_clearflags(hfsmp
->hfs_mp
, (u_int64_t
)((unsigned int)MNT_JOURNALED
));
2976 hfsmp
->jnl_start
= 0;
2977 hfsmp
->hfs_jnlinfoblkid
= 0;
2978 hfsmp
->hfs_jnlfileid
= 0;
2980 HFSTOVCB(hfsmp
)->vcbAtrb
&= ~kHFSVolumeJournaledMask
;
2982 hfs_unlock_global (hfsmp
);
2984 hfs_flushvolumeheader(hfsmp
, MNT_WAIT
, 1);
2989 fsid
.val
[0] = (int32_t)hfsmp
->hfs_raw_dev
;
2990 fsid
.val
[1] = (int32_t)vfs_typenum(HFSTOVFS(hfsmp
));
2991 vfs_event_signal(&fsid
, VQ_UPDATE
, (intptr_t)NULL
);
2994 } else if (name
[0] == HFS_GET_JOURNAL_INFO
) {
2995 vnode_t vp
= vfs_context_cwd(context
);
2996 off_t jnl_start
, jnl_size
;
3001 /* 64-bit processes won't work with this sysctl -- can't fit a pointer into an int! */
3002 if (proc_is64bit(current_proc()))
3006 if (hfsmp
->jnl
== NULL
) {
3010 jnl_start
= (off_t
)(hfsmp
->jnl_start
* HFSTOVCB(hfsmp
)->blockSize
) + (off_t
)HFSTOVCB(hfsmp
)->hfsPlusIOPosOffset
;
3011 jnl_size
= (off_t
)hfsmp
->jnl_size
;
3014 if ((error
= copyout((caddr_t
)&jnl_start
, CAST_USER_ADDR_T(name
[1]), sizeof(off_t
))) != 0) {
3017 if ((error
= copyout((caddr_t
)&jnl_size
, CAST_USER_ADDR_T(name
[2]), sizeof(off_t
))) != 0) {
3022 } else if (name
[0] == HFS_SET_PKG_EXTENSIONS
) {
3024 return set_package_extensions_table((user_addr_t
)((unsigned)name
[1]), name
[2], name
[3]);
3026 } else if (name
[0] == VFS_CTL_QUERY
) {
3027 struct sysctl_req
*req
;
3028 union union_vfsidctl vc
;
3032 req
= CAST_DOWN(struct sysctl_req
*, oldp
); /* we're new style vfs sysctl. */
3034 error
= SYSCTL_IN(req
, &vc
, proc_is64bit(p
)? sizeof(vc
.vc64
):sizeof(vc
.vc32
));
3035 if (error
) return (error
);
3037 mp
= vfs_getvfs(&vc
.vc32
.vc_fsid
); /* works for 32 and 64 */
3038 if (mp
== NULL
) return (ENOENT
);
3040 hfsmp
= VFSTOHFS(mp
);
3041 bzero(&vq
, sizeof(vq
));
3042 vq
.vq_flags
= hfsmp
->hfs_notification_conditions
;
3043 return SYSCTL_OUT(req
, &vq
, sizeof(vq
));;
3044 } else if (name
[0] == HFS_REPLAY_JOURNAL
) {
3045 vnode_t devvp
= NULL
;
3050 device_fd
= name
[1];
3051 error
= file_vnode(device_fd
, &devvp
);
3055 error
= vnode_getwithref(devvp
);
3057 file_drop(device_fd
);
3060 error
= hfs_journal_replay(devvp
, context
);
3061 file_drop(device_fd
);
3064 } else if (name
[0] == HFS_ENABLE_RESIZE_DEBUG
) {
3065 hfs_resize_debug
= 1;
3066 printf ("hfs_sysctl: Enabled volume resize debugging.\n");
3074 * hfs_vfs_vget is not static since it is used in hfs_readwrite.c to support
3075 * the build_path ioctl. We use it to leverage the code below that updates
3076 * the origin list cache if necessary
3080 hfs_vfs_vget(struct mount
*mp
, ino64_t ino
, struct vnode
**vpp
, __unused vfs_context_t context
)
3084 struct hfsmount
*hfsmp
;
3086 hfsmp
= VFSTOHFS(mp
);
3088 error
= hfs_vget(hfsmp
, (cnid_t
)ino
, vpp
, 1, 0);
3093 * ADLs may need to have their origin state updated
3094 * since build_path needs a valid parent. The same is true
3095 * for hardlinked files as well. There isn't a race window here
3096 * in re-acquiring the cnode lock since we aren't pulling any data
3097 * out of the cnode; instead, we're going to the catalog.
3099 if ((VTOC(*vpp
)->c_flag
& C_HARDLINK
) &&
3100 (hfs_lock(VTOC(*vpp
), HFS_EXCLUSIVE_LOCK
) == 0)) {
3101 cnode_t
*cp
= VTOC(*vpp
);
3102 struct cat_desc cdesc
;
3104 if (!hfs_haslinkorigin(cp
)) {
3105 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
, HFS_SHARED_LOCK
);
3106 error
= cat_findname(hfsmp
, (cnid_t
)ino
, &cdesc
);
3107 hfs_systemfile_unlock(hfsmp
, lockflags
);
3109 if ((cdesc
.cd_parentcnid
!= hfsmp
->hfs_private_desc
[DIR_HARDLINKS
].cd_cnid
) &&
3110 (cdesc
.cd_parentcnid
!= hfsmp
->hfs_private_desc
[FILE_HARDLINKS
].cd_cnid
)) {
3111 hfs_savelinkorigin(cp
, cdesc
.cd_parentcnid
);
3113 cat_releasedesc(&cdesc
);
3123 * Look up an HFS object by ID.
3125 * The object is returned with an iocount reference and the cnode locked.
3127 * If the object is a file then it will represent the data fork.
3130 hfs_vget(struct hfsmount
*hfsmp
, cnid_t cnid
, struct vnode
**vpp
, int skiplock
, int allow_deleted
)
3132 struct vnode
*vp
= NULLVP
;
3133 struct cat_desc cndesc
;
3134 struct cat_attr cnattr
;
3135 struct cat_fork cnfork
;
3136 u_int32_t linkref
= 0;
3139 /* Check for cnids that should't be exported. */
3140 if ((cnid
< kHFSFirstUserCatalogNodeID
) &&
3141 (cnid
!= kHFSRootFolderID
&& cnid
!= kHFSRootParentID
)) {
3144 /* Don't export our private directories. */
3145 if (cnid
== hfsmp
->hfs_private_desc
[FILE_HARDLINKS
].cd_cnid
||
3146 cnid
== hfsmp
->hfs_private_desc
[DIR_HARDLINKS
].cd_cnid
) {
3150 * Check the hash first
3152 vp
= hfs_chash_getvnode(hfsmp
, cnid
, 0, skiplock
, allow_deleted
);
3158 bzero(&cndesc
, sizeof(cndesc
));
3159 bzero(&cnattr
, sizeof(cnattr
));
3160 bzero(&cnfork
, sizeof(cnfork
));
3163 * Not in hash, lookup in catalog
3165 if (cnid
== kHFSRootParentID
) {
3166 static char hfs_rootname
[] = "/";
3168 cndesc
.cd_nameptr
= (const u_int8_t
*)&hfs_rootname
[0];
3169 cndesc
.cd_namelen
= 1;
3170 cndesc
.cd_parentcnid
= kHFSRootParentID
;
3171 cndesc
.cd_cnid
= kHFSRootFolderID
;
3172 cndesc
.cd_flags
= CD_ISDIR
;
3174 cnattr
.ca_fileid
= kHFSRootFolderID
;
3175 cnattr
.ca_linkcount
= 1;
3176 cnattr
.ca_entries
= 1;
3177 cnattr
.ca_dircount
= 1;
3178 cnattr
.ca_mode
= (S_IFDIR
| S_IRWXU
| S_IRWXG
| S_IRWXO
);
3182 const char *nameptr
;
3184 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
, HFS_SHARED_LOCK
);
3185 error
= cat_idlookup(hfsmp
, cnid
, 0, &cndesc
, &cnattr
, &cnfork
);
3186 hfs_systemfile_unlock(hfsmp
, lockflags
);
3194 * Check for a raw hardlink inode and save its linkref.
3196 pid
= cndesc
.cd_parentcnid
;
3197 nameptr
= (const char *)cndesc
.cd_nameptr
;
3199 if ((pid
== hfsmp
->hfs_private_desc
[FILE_HARDLINKS
].cd_cnid
) &&
3200 (bcmp(nameptr
, HFS_INODE_PREFIX
, HFS_INODE_PREFIX_LEN
) == 0)) {
3201 linkref
= strtoul(&nameptr
[HFS_INODE_PREFIX_LEN
], NULL
, 10);
3203 } else if ((pid
== hfsmp
->hfs_private_desc
[DIR_HARDLINKS
].cd_cnid
) &&
3204 (bcmp(nameptr
, HFS_DIRINODE_PREFIX
, HFS_DIRINODE_PREFIX_LEN
) == 0)) {
3205 linkref
= strtoul(&nameptr
[HFS_DIRINODE_PREFIX_LEN
], NULL
, 10);
3207 } else if ((pid
== hfsmp
->hfs_private_desc
[FILE_HARDLINKS
].cd_cnid
) &&
3208 (bcmp(nameptr
, HFS_DELETE_PREFIX
, HFS_DELETE_PREFIX_LEN
) == 0)) {
3210 cat_releasedesc(&cndesc
);
3211 return (ENOENT
); /* open unlinked file */
3216 * Finish initializing cnode descriptor for hardlinks.
3218 * We need a valid name and parent for reverse lookups.
3223 struct cat_desc linkdesc
;
3226 cnattr
.ca_linkref
= linkref
;
3229 * Pick up the first link in the chain and get a descriptor for it.
3230 * This allows blind volfs paths to work for hardlinks.
3232 if ((hfs_lookup_siblinglinks(hfsmp
, linkref
, &prevlinkid
, &nextlinkid
) == 0) &&
3233 (nextlinkid
!= 0)) {
3234 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
, HFS_SHARED_LOCK
);
3235 error
= cat_findname(hfsmp
, nextlinkid
, &linkdesc
);
3236 hfs_systemfile_unlock(hfsmp
, lockflags
);
3238 cat_releasedesc(&cndesc
);
3239 bcopy(&linkdesc
, &cndesc
, sizeof(linkdesc
));
3245 int newvnode_flags
= 0;
3247 error
= hfs_getnewvnode(hfsmp
, NULL
, NULL
, &cndesc
, 0, &cnattr
,
3248 &cnfork
, &vp
, &newvnode_flags
);
3250 VTOC(vp
)->c_flag
|= C_HARDLINK
;
3251 vnode_setmultipath(vp
);
3254 struct componentname cn
;
3255 int newvnode_flags
= 0;
3257 /* Supply hfs_getnewvnode with a component name. */
3258 MALLOC_ZONE(cn
.cn_pnbuf
, caddr_t
, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
3259 cn
.cn_nameiop
= LOOKUP
;
3260 cn
.cn_flags
= ISLASTCN
| HASBUF
;
3261 cn
.cn_context
= NULL
;
3262 cn
.cn_pnlen
= MAXPATHLEN
;
3263 cn
.cn_nameptr
= cn
.cn_pnbuf
;
3264 cn
.cn_namelen
= cndesc
.cd_namelen
;
3267 bcopy(cndesc
.cd_nameptr
, cn
.cn_nameptr
, cndesc
.cd_namelen
+ 1);
3269 error
= hfs_getnewvnode(hfsmp
, NULLVP
, &cn
, &cndesc
, 0, &cnattr
,
3270 &cnfork
, &vp
, &newvnode_flags
);
3272 if (error
== 0 && (VTOC(vp
)->c_flag
& C_HARDLINK
)) {
3273 hfs_savelinkorigin(VTOC(vp
), cndesc
.cd_parentcnid
);
3275 FREE_ZONE(cn
.cn_pnbuf
, cn
.cn_pnlen
, M_NAMEI
);
3277 cat_releasedesc(&cndesc
);
3280 if (vp
&& skiplock
) {
3281 hfs_unlock(VTOC(vp
));
3288 * Flush out all the files in a filesystem.
3292 hfs_flushfiles(struct mount
*mp
, int flags
, struct proc
*p
)
3294 hfs_flushfiles(struct mount
*mp
, int flags
, __unused
struct proc
*p
)
3297 struct hfsmount
*hfsmp
;
3298 struct vnode
*skipvp
= NULLVP
;
3305 hfsmp
= VFSTOHFS(mp
);
3309 * The open quota files have an indirect reference on
3310 * the root directory vnode. We must account for this
3311 * extra reference when doing the intial vflush.
3314 if (((unsigned int)vfs_flags(mp
)) & MNT_QUOTA
) {
3316 /* Find out how many quota files we have open. */
3317 for (i
= 0; i
< MAXQUOTAS
; i
++) {
3318 if (hfsmp
->hfs_qfiles
[i
].qf_vp
!= NULLVP
)
3322 /* Obtain the root vnode so we can skip over it. */
3323 skipvp
= hfs_chash_getvnode(hfsmp
, kHFSRootFolderID
, 0, 0, 0);
3327 error
= vflush(mp
, skipvp
, SKIPSYSTEM
| SKIPSWAP
| flags
);
3331 error
= vflush(mp
, skipvp
, SKIPSYSTEM
| flags
);
3334 if (((unsigned int)vfs_flags(mp
)) & MNT_QUOTA
) {
3337 * See if there are additional references on the
3338 * root vp besides the ones obtained from the open
3339 * quota files and the hfs_chash_getvnode call above.
3342 (vnode_isinuse(skipvp
, quotafilecnt
))) {
3343 error
= EBUSY
; /* root directory is still open */
3345 hfs_unlock(VTOC(skipvp
));
3348 if (error
&& (flags
& FORCECLOSE
) == 0)
3351 for (i
= 0; i
< MAXQUOTAS
; i
++) {
3352 if (hfsmp
->hfs_qfiles
[i
].qf_vp
== NULLVP
)
3354 hfs_quotaoff(p
, mp
, i
);
3356 error
= vflush(mp
, NULLVP
, SKIPSYSTEM
| flags
);
3364 * Update volume encoding bitmap (HFS Plus only)
3368 hfs_setencodingbits(struct hfsmount
*hfsmp
, u_int32_t encoding
)
3370 #define kIndexMacUkrainian 48 /* MacUkrainian encoding is 152 */
3371 #define kIndexMacFarsi 49 /* MacFarsi encoding is 140 */
3376 case kTextEncodingMacUkrainian
:
3377 index
= kIndexMacUkrainian
;
3379 case kTextEncodingMacFarsi
:
3380 index
= kIndexMacFarsi
;
3387 if (index
< 64 && (hfsmp
->encodingsBitmap
& (u_int64_t
)(1ULL << index
)) == 0) {
3388 HFS_MOUNT_LOCK(hfsmp
, TRUE
)
3389 hfsmp
->encodingsBitmap
|= (u_int64_t
)(1ULL << index
);
3390 MarkVCBDirty(hfsmp
);
3391 HFS_MOUNT_UNLOCK(hfsmp
, TRUE
);
3396 * Update volume stats
3398 * On journal volumes this will cause a volume header flush
3401 hfs_volupdate(struct hfsmount
*hfsmp
, enum volop op
, int inroot
)
3407 lck_mtx_lock(&hfsmp
->hfs_mutex
);
3409 MarkVCBDirty(hfsmp
);
3410 hfsmp
->hfs_mtime
= tv
.tv_sec
;
3416 if (hfsmp
->hfs_dircount
!= 0xFFFFFFFF)
3417 ++hfsmp
->hfs_dircount
;
3418 if (inroot
&& hfsmp
->vcbNmRtDirs
!= 0xFFFF)
3419 ++hfsmp
->vcbNmRtDirs
;
3422 if (hfsmp
->hfs_dircount
!= 0)
3423 --hfsmp
->hfs_dircount
;
3424 if (inroot
&& hfsmp
->vcbNmRtDirs
!= 0xFFFF)
3425 --hfsmp
->vcbNmRtDirs
;
3428 if (hfsmp
->hfs_filecount
!= 0xFFFFFFFF)
3429 ++hfsmp
->hfs_filecount
;
3430 if (inroot
&& hfsmp
->vcbNmFls
!= 0xFFFF)
3434 if (hfsmp
->hfs_filecount
!= 0)
3435 --hfsmp
->hfs_filecount
;
3436 if (inroot
&& hfsmp
->vcbNmFls
!= 0xFFFF)
3441 lck_mtx_unlock(&hfsmp
->hfs_mutex
);
3444 hfs_flushvolumeheader(hfsmp
, 0, 0);
3452 hfs_flushMDB(struct hfsmount
*hfsmp
, int waitfor
, int altflush
)
3454 ExtendedVCB
*vcb
= HFSTOVCB(hfsmp
);
3455 struct filefork
*fp
;
3456 HFSMasterDirectoryBlock
*mdb
;
3457 struct buf
*bp
= NULL
;
3462 sectorsize
= hfsmp
->hfs_logical_block_size
;
3463 retval
= (int)buf_bread(hfsmp
->hfs_devvp
, (daddr64_t
)HFS_PRI_SECTOR(sectorsize
), sectorsize
, NOCRED
, &bp
);
3470 lck_mtx_lock(&hfsmp
->hfs_mutex
);
3472 mdb
= (HFSMasterDirectoryBlock
*)(buf_dataptr(bp
) + HFS_PRI_OFFSET(sectorsize
));
3474 mdb
->drCrDate
= SWAP_BE32 (UTCToLocal(to_hfs_time(vcb
->hfs_itime
)));
3475 mdb
->drLsMod
= SWAP_BE32 (UTCToLocal(to_hfs_time(vcb
->vcbLsMod
)));
3476 mdb
->drAtrb
= SWAP_BE16 (vcb
->vcbAtrb
);
3477 mdb
->drNmFls
= SWAP_BE16 (vcb
->vcbNmFls
);
3478 mdb
->drAllocPtr
= SWAP_BE16 (vcb
->nextAllocation
);
3479 mdb
->drClpSiz
= SWAP_BE32 (vcb
->vcbClpSiz
);
3480 mdb
->drNxtCNID
= SWAP_BE32 (vcb
->vcbNxtCNID
);
3481 mdb
->drFreeBks
= SWAP_BE16 (vcb
->freeBlocks
);
3483 namelen
= strlen((char *)vcb
->vcbVN
);
3484 retval
= utf8_to_hfs(vcb
, namelen
, vcb
->vcbVN
, mdb
->drVN
);
3485 /* Retry with MacRoman in case that's how it was exported. */
3487 retval
= utf8_to_mac_roman(namelen
, vcb
->vcbVN
, mdb
->drVN
);
3489 mdb
->drVolBkUp
= SWAP_BE32 (UTCToLocal(to_hfs_time(vcb
->vcbVolBkUp
)));
3490 mdb
->drWrCnt
= SWAP_BE32 (vcb
->vcbWrCnt
);
3491 mdb
->drNmRtDirs
= SWAP_BE16 (vcb
->vcbNmRtDirs
);
3492 mdb
->drFilCnt
= SWAP_BE32 (vcb
->vcbFilCnt
);
3493 mdb
->drDirCnt
= SWAP_BE32 (vcb
->vcbDirCnt
);
3495 bcopy(vcb
->vcbFndrInfo
, mdb
->drFndrInfo
, sizeof(mdb
->drFndrInfo
));
3497 fp
= VTOF(vcb
->extentsRefNum
);
3498 mdb
->drXTExtRec
[0].startBlock
= SWAP_BE16 (fp
->ff_extents
[0].startBlock
);
3499 mdb
->drXTExtRec
[0].blockCount
= SWAP_BE16 (fp
->ff_extents
[0].blockCount
);
3500 mdb
->drXTExtRec
[1].startBlock
= SWAP_BE16 (fp
->ff_extents
[1].startBlock
);
3501 mdb
->drXTExtRec
[1].blockCount
= SWAP_BE16 (fp
->ff_extents
[1].blockCount
);
3502 mdb
->drXTExtRec
[2].startBlock
= SWAP_BE16 (fp
->ff_extents
[2].startBlock
);
3503 mdb
->drXTExtRec
[2].blockCount
= SWAP_BE16 (fp
->ff_extents
[2].blockCount
);
3504 mdb
->drXTFlSize
= SWAP_BE32 (fp
->ff_blocks
* vcb
->blockSize
);
3505 mdb
->drXTClpSiz
= SWAP_BE32 (fp
->ff_clumpsize
);
3506 FTOC(fp
)->c_flag
&= ~C_MODIFIED
;
3508 fp
= VTOF(vcb
->catalogRefNum
);
3509 mdb
->drCTExtRec
[0].startBlock
= SWAP_BE16 (fp
->ff_extents
[0].startBlock
);
3510 mdb
->drCTExtRec
[0].blockCount
= SWAP_BE16 (fp
->ff_extents
[0].blockCount
);
3511 mdb
->drCTExtRec
[1].startBlock
= SWAP_BE16 (fp
->ff_extents
[1].startBlock
);
3512 mdb
->drCTExtRec
[1].blockCount
= SWAP_BE16 (fp
->ff_extents
[1].blockCount
);
3513 mdb
->drCTExtRec
[2].startBlock
= SWAP_BE16 (fp
->ff_extents
[2].startBlock
);
3514 mdb
->drCTExtRec
[2].blockCount
= SWAP_BE16 (fp
->ff_extents
[2].blockCount
);
3515 mdb
->drCTFlSize
= SWAP_BE32 (fp
->ff_blocks
* vcb
->blockSize
);
3516 mdb
->drCTClpSiz
= SWAP_BE32 (fp
->ff_clumpsize
);
3517 FTOC(fp
)->c_flag
&= ~C_MODIFIED
;
3519 MarkVCBClean( vcb
);
3521 lck_mtx_unlock(&hfsmp
->hfs_mutex
);
3523 /* If requested, flush out the alternate MDB */
3525 struct buf
*alt_bp
= NULL
;
3527 if (buf_meta_bread(hfsmp
->hfs_devvp
, hfsmp
->hfs_alt_id_sector
, sectorsize
, NOCRED
, &alt_bp
) == 0) {
3528 bcopy(mdb
, (char *)buf_dataptr(alt_bp
) + HFS_ALT_OFFSET(sectorsize
), kMDBSize
);
3530 (void) VNOP_BWRITE(alt_bp
);
3535 if (waitfor
!= MNT_WAIT
)
3538 retval
= VNOP_BWRITE(bp
);
3544 * Flush any dirty in-memory mount data to the on-disk
3547 * Note: the on-disk volume signature is intentionally
3548 * not flushed since the on-disk "H+" and "HX" signatures
3549 * are always stored in-memory as "H+".
3552 hfs_flushvolumeheader(struct hfsmount
*hfsmp
, int waitfor
, int altflush
)
3554 ExtendedVCB
*vcb
= HFSTOVCB(hfsmp
);
3555 struct filefork
*fp
;
3556 HFSPlusVolumeHeader
*volumeHeader
, *altVH
;
3558 struct buf
*bp
, *alt_bp
;
3560 daddr64_t priIDSector
;
3562 u_int16_t signature
;
3563 u_int16_t hfsversion
;
3565 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
3568 if (hfsmp
->hfs_flags
& HFS_STANDARD
) {
3569 return hfs_flushMDB(hfsmp
, waitfor
, altflush
);
3571 critical
= altflush
;
3572 priIDSector
= (daddr64_t
)((vcb
->hfsPlusIOPosOffset
/ hfsmp
->hfs_logical_block_size
) +
3573 HFS_PRI_SECTOR(hfsmp
->hfs_logical_block_size
));
3575 if (hfs_start_transaction(hfsmp
) != 0) {
3582 retval
= (int)buf_meta_bread(hfsmp
->hfs_devvp
,
3583 HFS_PHYSBLK_ROUNDDOWN(priIDSector
, hfsmp
->hfs_log_per_phys
),
3584 hfsmp
->hfs_physical_block_size
, NOCRED
, &bp
);
3586 printf("hfs: err %d reading VH blk (%s)\n", retval
, vcb
->vcbVN
);
3590 volumeHeader
= (HFSPlusVolumeHeader
*)((char *)buf_dataptr(bp
) +
3591 HFS_PRI_OFFSET(hfsmp
->hfs_physical_block_size
));
3594 * Sanity check what we just read. If it's bad, try the alternate
3597 signature
= SWAP_BE16 (volumeHeader
->signature
);
3598 hfsversion
= SWAP_BE16 (volumeHeader
->version
);
3599 if ((signature
!= kHFSPlusSigWord
&& signature
!= kHFSXSigWord
) ||
3600 (hfsversion
< kHFSPlusVersion
) || (hfsversion
> 100) ||
3601 (SWAP_BE32 (volumeHeader
->blockSize
) != vcb
->blockSize
)) {
3602 printf("hfs: corrupt VH on %s, sig 0x%04x, ver %d, blksize %d%s\n",
3603 vcb
->vcbVN
, signature
, hfsversion
,
3604 SWAP_BE32 (volumeHeader
->blockSize
),
3605 hfsmp
->hfs_alt_id_sector
? "; trying alternate" : "");
3606 hfs_mark_volume_inconsistent(hfsmp
);
3608 if (hfsmp
->hfs_alt_id_sector
) {
3609 retval
= buf_meta_bread(hfsmp
->hfs_devvp
,
3610 HFS_PHYSBLK_ROUNDDOWN(hfsmp
->hfs_alt_id_sector
, hfsmp
->hfs_log_per_phys
),
3611 hfsmp
->hfs_physical_block_size
, NOCRED
, &alt_bp
);
3613 printf("hfs: err %d reading alternate VH (%s)\n", retval
, vcb
->vcbVN
);
3617 altVH
= (HFSPlusVolumeHeader
*)((char *)buf_dataptr(alt_bp
) +
3618 HFS_ALT_OFFSET(hfsmp
->hfs_physical_block_size
));
3619 signature
= SWAP_BE16(altVH
->signature
);
3620 hfsversion
= SWAP_BE16(altVH
->version
);
3622 if ((signature
!= kHFSPlusSigWord
&& signature
!= kHFSXSigWord
) ||
3623 (hfsversion
< kHFSPlusVersion
) || (kHFSPlusVersion
> 100) ||
3624 (SWAP_BE32(altVH
->blockSize
) != vcb
->blockSize
)) {
3625 printf("hfs: corrupt alternate VH on %s, sig 0x%04x, ver %d, blksize %d\n",
3626 vcb
->vcbVN
, signature
, hfsversion
,
3627 SWAP_BE32(altVH
->blockSize
));
3632 /* The alternate is plausible, so use it. */
3633 bcopy(altVH
, volumeHeader
, kMDBSize
);
3637 /* No alternate VH, nothing more we can do. */
3644 journal_modify_block_start(hfsmp
->jnl
, bp
);
3648 * For embedded HFS+ volumes, update create date if it changed
3649 * (ie from a setattrlist call)
3651 if ((vcb
->hfsPlusIOPosOffset
!= 0) &&
3652 (SWAP_BE32 (volumeHeader
->createDate
) != vcb
->localCreateDate
)) {
3654 HFSMasterDirectoryBlock
*mdb
;
3656 retval
= (int)buf_meta_bread(hfsmp
->hfs_devvp
,
3657 HFS_PHYSBLK_ROUNDDOWN(HFS_PRI_SECTOR(hfsmp
->hfs_logical_block_size
), hfsmp
->hfs_log_per_phys
),
3658 hfsmp
->hfs_physical_block_size
, NOCRED
, &bp2
);
3664 mdb
= (HFSMasterDirectoryBlock
*)(buf_dataptr(bp2
) +
3665 HFS_PRI_OFFSET(hfsmp
->hfs_physical_block_size
));
3667 if ( SWAP_BE32 (mdb
->drCrDate
) != vcb
->localCreateDate
)
3670 journal_modify_block_start(hfsmp
->jnl
, bp2
);
3673 mdb
->drCrDate
= SWAP_BE32 (vcb
->localCreateDate
); /* pick up the new create date */
3676 journal_modify_block_end(hfsmp
->jnl
, bp2
, NULL
, NULL
);
3678 (void) VNOP_BWRITE(bp2
); /* write out the changes */
3683 buf_brelse(bp2
); /* just release it */
3688 lck_mtx_lock(&hfsmp
->hfs_mutex
);
3690 /* Note: only update the lower 16 bits worth of attributes */
3691 volumeHeader
->attributes
= SWAP_BE32 (vcb
->vcbAtrb
);
3692 volumeHeader
->journalInfoBlock
= SWAP_BE32 (vcb
->vcbJinfoBlock
);
3694 volumeHeader
->lastMountedVersion
= SWAP_BE32 (kHFSJMountVersion
);
3696 volumeHeader
->lastMountedVersion
= SWAP_BE32 (kHFSPlusMountVersion
);
3698 volumeHeader
->createDate
= SWAP_BE32 (vcb
->localCreateDate
); /* volume create date is in local time */
3699 volumeHeader
->modifyDate
= SWAP_BE32 (to_hfs_time(vcb
->vcbLsMod
));
3700 volumeHeader
->backupDate
= SWAP_BE32 (to_hfs_time(vcb
->vcbVolBkUp
));
3701 volumeHeader
->fileCount
= SWAP_BE32 (vcb
->vcbFilCnt
);
3702 volumeHeader
->folderCount
= SWAP_BE32 (vcb
->vcbDirCnt
);
3703 volumeHeader
->totalBlocks
= SWAP_BE32 (vcb
->totalBlocks
);
3704 volumeHeader
->freeBlocks
= SWAP_BE32 (vcb
->freeBlocks
);
3705 volumeHeader
->nextAllocation
= SWAP_BE32 (vcb
->nextAllocation
);
3706 volumeHeader
->rsrcClumpSize
= SWAP_BE32 (vcb
->vcbClpSiz
);
3707 volumeHeader
->dataClumpSize
= SWAP_BE32 (vcb
->vcbClpSiz
);
3708 volumeHeader
->nextCatalogID
= SWAP_BE32 (vcb
->vcbNxtCNID
);
3709 volumeHeader
->writeCount
= SWAP_BE32 (vcb
->vcbWrCnt
);
3710 volumeHeader
->encodingsBitmap
= SWAP_BE64 (vcb
->encodingsBitmap
);
3712 if (bcmp(vcb
->vcbFndrInfo
, volumeHeader
->finderInfo
, sizeof(volumeHeader
->finderInfo
)) != 0) {
3713 bcopy(vcb
->vcbFndrInfo
, volumeHeader
->finderInfo
, sizeof(volumeHeader
->finderInfo
));
3718 * System files are only dirty when altflush is set.
3720 if (altflush
== 0) {
3724 /* Sync Extents over-flow file meta data */
3725 fp
= VTOF(vcb
->extentsRefNum
);
3726 if (FTOC(fp
)->c_flag
& C_MODIFIED
) {
3727 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
3728 volumeHeader
->extentsFile
.extents
[i
].startBlock
=
3729 SWAP_BE32 (fp
->ff_extents
[i
].startBlock
);
3730 volumeHeader
->extentsFile
.extents
[i
].blockCount
=
3731 SWAP_BE32 (fp
->ff_extents
[i
].blockCount
);
3733 volumeHeader
->extentsFile
.logicalSize
= SWAP_BE64 (fp
->ff_size
);
3734 volumeHeader
->extentsFile
.totalBlocks
= SWAP_BE32 (fp
->ff_blocks
);
3735 volumeHeader
->extentsFile
.clumpSize
= SWAP_BE32 (fp
->ff_clumpsize
);
3736 FTOC(fp
)->c_flag
&= ~C_MODIFIED
;
3739 /* Sync Catalog file meta data */
3740 fp
= VTOF(vcb
->catalogRefNum
);
3741 if (FTOC(fp
)->c_flag
& C_MODIFIED
) {
3742 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
3743 volumeHeader
->catalogFile
.extents
[i
].startBlock
=
3744 SWAP_BE32 (fp
->ff_extents
[i
].startBlock
);
3745 volumeHeader
->catalogFile
.extents
[i
].blockCount
=
3746 SWAP_BE32 (fp
->ff_extents
[i
].blockCount
);
3748 volumeHeader
->catalogFile
.logicalSize
= SWAP_BE64 (fp
->ff_size
);
3749 volumeHeader
->catalogFile
.totalBlocks
= SWAP_BE32 (fp
->ff_blocks
);
3750 volumeHeader
->catalogFile
.clumpSize
= SWAP_BE32 (fp
->ff_clumpsize
);
3751 FTOC(fp
)->c_flag
&= ~C_MODIFIED
;
3754 /* Sync Allocation file meta data */
3755 fp
= VTOF(vcb
->allocationsRefNum
);
3756 if (FTOC(fp
)->c_flag
& C_MODIFIED
) {
3757 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
3758 volumeHeader
->allocationFile
.extents
[i
].startBlock
=
3759 SWAP_BE32 (fp
->ff_extents
[i
].startBlock
);
3760 volumeHeader
->allocationFile
.extents
[i
].blockCount
=
3761 SWAP_BE32 (fp
->ff_extents
[i
].blockCount
);
3763 volumeHeader
->allocationFile
.logicalSize
= SWAP_BE64 (fp
->ff_size
);
3764 volumeHeader
->allocationFile
.totalBlocks
= SWAP_BE32 (fp
->ff_blocks
);
3765 volumeHeader
->allocationFile
.clumpSize
= SWAP_BE32 (fp
->ff_clumpsize
);
3766 FTOC(fp
)->c_flag
&= ~C_MODIFIED
;
3769 /* Sync Attribute file meta data */
3770 if (hfsmp
->hfs_attribute_vp
) {
3771 fp
= VTOF(hfsmp
->hfs_attribute_vp
);
3772 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
3773 volumeHeader
->attributesFile
.extents
[i
].startBlock
=
3774 SWAP_BE32 (fp
->ff_extents
[i
].startBlock
);
3775 volumeHeader
->attributesFile
.extents
[i
].blockCount
=
3776 SWAP_BE32 (fp
->ff_extents
[i
].blockCount
);
3778 FTOC(fp
)->c_flag
&= ~C_MODIFIED
;
3779 volumeHeader
->attributesFile
.logicalSize
= SWAP_BE64 (fp
->ff_size
);
3780 volumeHeader
->attributesFile
.totalBlocks
= SWAP_BE32 (fp
->ff_blocks
);
3781 volumeHeader
->attributesFile
.clumpSize
= SWAP_BE32 (fp
->ff_clumpsize
);
3784 /* Sync Startup file meta data */
3785 if (hfsmp
->hfs_startup_vp
) {
3786 fp
= VTOF(hfsmp
->hfs_startup_vp
);
3787 if (FTOC(fp
)->c_flag
& C_MODIFIED
) {
3788 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
3789 volumeHeader
->startupFile
.extents
[i
].startBlock
=
3790 SWAP_BE32 (fp
->ff_extents
[i
].startBlock
);
3791 volumeHeader
->startupFile
.extents
[i
].blockCount
=
3792 SWAP_BE32 (fp
->ff_extents
[i
].blockCount
);
3794 volumeHeader
->startupFile
.logicalSize
= SWAP_BE64 (fp
->ff_size
);
3795 volumeHeader
->startupFile
.totalBlocks
= SWAP_BE32 (fp
->ff_blocks
);
3796 volumeHeader
->startupFile
.clumpSize
= SWAP_BE32 (fp
->ff_clumpsize
);
3797 FTOC(fp
)->c_flag
&= ~C_MODIFIED
;
3802 MarkVCBClean(hfsmp
);
3803 lck_mtx_unlock(&hfsmp
->hfs_mutex
);
3805 /* If requested, flush out the alternate volume header */
3806 if (altflush
&& hfsmp
->hfs_alt_id_sector
) {
3807 if (buf_meta_bread(hfsmp
->hfs_devvp
,
3808 HFS_PHYSBLK_ROUNDDOWN(hfsmp
->hfs_alt_id_sector
, hfsmp
->hfs_log_per_phys
),
3809 hfsmp
->hfs_physical_block_size
, NOCRED
, &alt_bp
) == 0) {
3811 journal_modify_block_start(hfsmp
->jnl
, alt_bp
);
3814 bcopy(volumeHeader
, (char *)buf_dataptr(alt_bp
) +
3815 HFS_ALT_OFFSET(hfsmp
->hfs_physical_block_size
),
3819 journal_modify_block_end(hfsmp
->jnl
, alt_bp
, NULL
, NULL
);
3821 (void) VNOP_BWRITE(alt_bp
);
3828 journal_modify_block_end(hfsmp
->jnl
, bp
, NULL
, NULL
);
3830 if (waitfor
!= MNT_WAIT
)
3833 retval
= VNOP_BWRITE(bp
);
3834 /* When critical data changes, flush the device cache */
3835 if (critical
&& (retval
== 0)) {
3836 (void) VNOP_IOCTL(hfsmp
->hfs_devvp
, DKIOCSYNCHRONIZECACHE
,
3837 NULL
, FWRITE
, NULL
);
3841 hfs_end_transaction(hfsmp
);
3850 hfs_end_transaction(hfsmp
);
3856 * Extend a file system.
3859 hfs_extendfs(struct hfsmount
*hfsmp
, u_int64_t newsize
, vfs_context_t context
)
3861 struct proc
*p
= vfs_context_proc(context
);
3862 kauth_cred_t cred
= vfs_context_ucred(context
);
3864 struct vnode
*devvp
;
3866 struct filefork
*fp
= NULL
;
3868 struct cat_fork forkdata
;
3870 u_int64_t newblkcnt
;
3871 u_int64_t prev_phys_block_count
;
3873 u_int64_t sectorcnt
;
3874 u_int32_t sectorsize
;
3875 u_int32_t phys_sectorsize
;
3876 daddr64_t prev_alt_sector
;
3880 int64_t oldBitmapSize
;
3881 Boolean usedExtendFileC
= false;
3882 int transaction_begun
= 0;
3884 devvp
= hfsmp
->hfs_devvp
;
3885 vcb
= HFSTOVCB(hfsmp
);
3888 * - HFS Plus file systems only.
3889 * - Journaling must be enabled.
3890 * - No embedded volumes.
3892 if ((vcb
->vcbSigWord
== kHFSSigWord
) ||
3893 (hfsmp
->jnl
== NULL
) ||
3894 (vcb
->hfsPlusIOPosOffset
!= 0)) {
3898 * If extending file system by non-root, then verify
3899 * ownership and check permissions.
3901 if (suser(cred
, NULL
)) {
3902 error
= hfs_vget(hfsmp
, kHFSRootFolderID
, &vp
, 0, 0);
3906 error
= hfs_owner_rights(hfsmp
, VTOC(vp
)->c_uid
, cred
, p
, 0);
3908 error
= hfs_write_access(vp
, cred
, p
, false);
3910 hfs_unlock(VTOC(vp
));
3915 error
= vnode_authorize(devvp
, NULL
, KAUTH_VNODE_READ_DATA
| KAUTH_VNODE_WRITE_DATA
, context
);
3919 if (VNOP_IOCTL(devvp
, DKIOCGETBLOCKSIZE
, (caddr_t
)§orsize
, 0, context
)) {
3922 if (sectorsize
!= hfsmp
->hfs_logical_block_size
) {
3925 if (VNOP_IOCTL(devvp
, DKIOCGETBLOCKCOUNT
, (caddr_t
)§orcnt
, 0, context
)) {
3928 if ((sectorsize
* sectorcnt
) < newsize
) {
3929 printf("hfs_extendfs: not enough space on device\n");
3932 error
= VNOP_IOCTL(devvp
, DKIOCGETPHYSICALBLOCKSIZE
, (caddr_t
)&phys_sectorsize
, 0, context
);
3934 if ((error
!= ENOTSUP
) && (error
!= ENOTTY
)) {
3937 /* If ioctl is not supported, force physical and logical sector size to be same */
3938 phys_sectorsize
= sectorsize
;
3940 oldsize
= (u_int64_t
)hfsmp
->totalBlocks
* (u_int64_t
)hfsmp
->blockSize
;
3943 * Validate new size.
3945 if ((newsize
<= oldsize
) || (newsize
% sectorsize
) || (newsize
% phys_sectorsize
)) {
3946 printf("hfs_extendfs: invalid size\n");
3949 newblkcnt
= newsize
/ vcb
->blockSize
;
3950 if (newblkcnt
> (u_int64_t
)0xFFFFFFFF)
3953 addblks
= newblkcnt
- vcb
->totalBlocks
;
3955 if (hfs_resize_debug
) {
3956 printf ("hfs_extendfs: old: size=%qu, blkcnt=%u\n", oldsize
, hfsmp
->totalBlocks
);
3957 printf ("hfs_extendfs: new: size=%qu, blkcnt=%u, addblks=%u\n", newsize
, (u_int32_t
)newblkcnt
, addblks
);
3959 printf("hfs_extendfs: will extend \"%s\" by %d blocks\n", vcb
->vcbVN
, addblks
);
3961 HFS_MOUNT_LOCK(hfsmp
, TRUE
);
3962 if (hfsmp
->hfs_flags
& HFS_RESIZE_IN_PROGRESS
) {
3963 HFS_MOUNT_UNLOCK(hfsmp
, TRUE
);
3967 hfsmp
->hfs_flags
|= HFS_RESIZE_IN_PROGRESS
;
3968 HFS_MOUNT_UNLOCK(hfsmp
, TRUE
);
3970 /* Start with a clean journal. */
3971 hfs_journal_flush(hfsmp
, TRUE
);
3974 * Enclose changes inside a transaction.
3976 if (hfs_start_transaction(hfsmp
) != 0) {
3980 transaction_begun
= 1;
3983 * Note: we take the attributes lock in case we have an attribute data vnode
3984 * which needs to change size.
3986 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_ATTRIBUTE
| SFL_EXTENTS
| SFL_BITMAP
, HFS_EXCLUSIVE_LOCK
);
3987 vp
= vcb
->allocationsRefNum
;
3989 bcopy(&fp
->ff_data
, &forkdata
, sizeof(forkdata
));
3992 * Calculate additional space required (if any) by allocation bitmap.
3994 oldBitmapSize
= fp
->ff_size
;
3995 bitmapblks
= roundup((newblkcnt
+7) / 8, vcb
->vcbVBMIOSize
) / vcb
->blockSize
;
3996 if (bitmapblks
> (daddr_t
)fp
->ff_blocks
)
3997 bitmapblks
-= fp
->ff_blocks
;
4002 * The allocation bitmap can contain unused bits that are beyond end of
4003 * current volume's allocation blocks. Usually they are supposed to be
4004 * zero'ed out but there can be cases where they might be marked as used.
4005 * After extending the file system, those bits can represent valid
4006 * allocation blocks, so we mark all the bits from the end of current
4007 * volume to end of allocation bitmap as "free".
4009 BlockMarkFreeUnused(vcb
, vcb
->totalBlocks
,
4010 (fp
->ff_blocks
* vcb
->blockSize
* 8) - vcb
->totalBlocks
);
4012 if (bitmapblks
> 0) {
4018 * Get the bitmap's current size (in allocation blocks) so we know
4019 * where to start zero filling once the new space is added. We've
4020 * got to do this before the bitmap is grown.
4022 blkno
= (daddr64_t
)fp
->ff_blocks
;
4025 * Try to grow the allocation file in the normal way, using allocation
4026 * blocks already existing in the file system. This way, we might be
4027 * able to grow the bitmap contiguously, or at least in the metadata
4030 error
= ExtendFileC(vcb
, fp
, bitmapblks
* vcb
->blockSize
, 0,
4031 kEFAllMask
| kEFNoClumpMask
| kEFReserveMask
4032 | kEFMetadataMask
| kEFContigMask
, &bytesAdded
);
4035 usedExtendFileC
= true;
4038 * If the above allocation failed, fall back to allocating the new
4039 * extent of the bitmap from the space we're going to add. Since those
4040 * blocks don't yet belong to the file system, we have to update the
4041 * extent list directly, and manually adjust the file size.
4044 error
= AddFileExtent(vcb
, fp
, vcb
->totalBlocks
, bitmapblks
);
4046 printf("hfs_extendfs: error %d adding extents\n", error
);
4049 fp
->ff_blocks
+= bitmapblks
;
4050 VTOC(vp
)->c_blocks
= fp
->ff_blocks
;
4051 VTOC(vp
)->c_flag
|= C_MODIFIED
;
4055 * Update the allocation file's size to include the newly allocated
4056 * blocks. Note that ExtendFileC doesn't do this, which is why this
4057 * statement is outside the above "if" statement.
4059 fp
->ff_size
+= (u_int64_t
)bitmapblks
* (u_int64_t
)vcb
->blockSize
;
4062 * Zero out the new bitmap blocks.
4067 blkcnt
= bitmapblks
;
4068 while (blkcnt
> 0) {
4069 error
= (int)buf_meta_bread(vp
, blkno
, vcb
->blockSize
, NOCRED
, &bp
);
4076 bzero((char *)buf_dataptr(bp
), vcb
->blockSize
);
4078 error
= (int)buf_bwrite(bp
);
4086 printf("hfs_extendfs: error %d clearing blocks\n", error
);
4090 * Mark the new bitmap space as allocated.
4092 * Note that ExtendFileC will have marked any blocks it allocated, so
4093 * this is only needed if we used AddFileExtent. Also note that this
4094 * has to come *after* the zero filling of new blocks in the case where
4095 * we used AddFileExtent (since the part of the bitmap we're touching
4096 * is in those newly allocated blocks).
4098 if (!usedExtendFileC
) {
4099 error
= BlockMarkAllocated(vcb
, vcb
->totalBlocks
, bitmapblks
);
4101 printf("hfs_extendfs: error %d setting bitmap\n", error
);
4104 vcb
->freeBlocks
-= bitmapblks
;
4108 * Mark the new alternate VH as allocated.
4110 if (vcb
->blockSize
== 512)
4111 error
= BlockMarkAllocated(vcb
, vcb
->totalBlocks
+ addblks
- 2, 2);
4113 error
= BlockMarkAllocated(vcb
, vcb
->totalBlocks
+ addblks
- 1, 1);
4115 printf("hfs_extendfs: error %d setting bitmap (VH)\n", error
);
4119 * Mark the old alternate VH as free.
4121 if (vcb
->blockSize
== 512)
4122 (void) BlockMarkFree(vcb
, vcb
->totalBlocks
- 2, 2);
4124 (void) BlockMarkFree(vcb
, vcb
->totalBlocks
- 1, 1);
4126 * Adjust file system variables for new space.
4128 prev_phys_block_count
= hfsmp
->hfs_logical_block_count
;
4129 prev_alt_sector
= hfsmp
->hfs_alt_id_sector
;
4131 vcb
->totalBlocks
+= addblks
;
4132 vcb
->freeBlocks
+= addblks
;
4133 hfsmp
->hfs_logical_block_count
= newsize
/ sectorsize
;
4134 hfsmp
->hfs_alt_id_sector
= (hfsmp
->hfsPlusIOPosOffset
/ sectorsize
) +
4135 HFS_ALT_SECTOR(sectorsize
, hfsmp
->hfs_logical_block_count
);
4137 error
= hfs_flushvolumeheader(hfsmp
, MNT_WAIT
, HFS_ALTFLUSH
);
4139 printf("hfs_extendfs: couldn't flush volume headers (%d)", error
);
4141 * Restore to old state.
4143 if (usedExtendFileC
) {
4144 (void) TruncateFileC(vcb
, fp
, oldBitmapSize
, 0, FORK_IS_RSRC(fp
),
4145 FTOC(fp
)->c_fileid
, false);
4147 fp
->ff_blocks
-= bitmapblks
;
4148 fp
->ff_size
-= (u_int64_t
)bitmapblks
* (u_int64_t
)vcb
->blockSize
;
4150 * No need to mark the excess blocks free since those bitmap blocks
4151 * are no longer part of the bitmap. But we do need to undo the
4152 * effect of the "vcb->freeBlocks -= bitmapblks" above.
4154 vcb
->freeBlocks
+= bitmapblks
;
4156 vcb
->totalBlocks
-= addblks
;
4157 vcb
->freeBlocks
-= addblks
;
4158 hfsmp
->hfs_logical_block_count
= prev_phys_block_count
;
4159 hfsmp
->hfs_alt_id_sector
= prev_alt_sector
;
4161 if (vcb
->blockSize
== 512) {
4162 if (BlockMarkAllocated(vcb
, vcb
->totalBlocks
- 2, 2)) {
4163 hfs_mark_volume_inconsistent(hfsmp
);
4166 if (BlockMarkAllocated(vcb
, vcb
->totalBlocks
- 1, 1)) {
4167 hfs_mark_volume_inconsistent(hfsmp
);
4173 * Invalidate the old alternate volume header.
4176 if (prev_alt_sector
) {
4177 if (buf_meta_bread(hfsmp
->hfs_devvp
,
4178 HFS_PHYSBLK_ROUNDDOWN(prev_alt_sector
, hfsmp
->hfs_log_per_phys
),
4179 hfsmp
->hfs_physical_block_size
, NOCRED
, &bp
) == 0) {
4180 journal_modify_block_start(hfsmp
->jnl
, bp
);
4182 bzero((char *)buf_dataptr(bp
) + HFS_ALT_OFFSET(hfsmp
->hfs_physical_block_size
), kMDBSize
);
4184 journal_modify_block_end(hfsmp
->jnl
, bp
, NULL
, NULL
);
4191 * Update the metadata zone size based on current volume size
4193 hfs_metadatazone_init(hfsmp
, false);
4196 * Adjust the size of hfsmp->hfs_attrdata_vp
4198 if (hfsmp
->hfs_attrdata_vp
) {
4199 struct cnode
*attr_cp
;
4200 struct filefork
*attr_fp
;
4202 if (vnode_get(hfsmp
->hfs_attrdata_vp
) == 0) {
4203 attr_cp
= VTOC(hfsmp
->hfs_attrdata_vp
);
4204 attr_fp
= VTOF(hfsmp
->hfs_attrdata_vp
);
4206 attr_cp
->c_blocks
= newblkcnt
;
4207 attr_fp
->ff_blocks
= newblkcnt
;
4208 attr_fp
->ff_extents
[0].blockCount
= newblkcnt
;
4209 attr_fp
->ff_size
= (off_t
) newblkcnt
* hfsmp
->blockSize
;
4210 ubc_setsize(hfsmp
->hfs_attrdata_vp
, attr_fp
->ff_size
);
4211 vnode_put(hfsmp
->hfs_attrdata_vp
);
4216 * Update the R/B Tree if necessary. Since we don't have to drop the systemfile
4217 * locks in the middle of these operations like we do in the truncate case
4218 * where we have to relocate files, we can only update the red-black tree
4219 * if there were actual changes made to the bitmap. Also, we can't really scan the
4220 * new portion of the bitmap before it has been allocated. The BlockMarkAllocated
4221 * routines are smart enough to avoid the r/b tree if the portion they are manipulating is
4222 * not currently controlled by the tree.
4224 * We only update hfsmp->allocLimit if totalBlocks actually increased.
4228 UpdateAllocLimit(hfsmp
, hfsmp
->totalBlocks
);
4231 /* Log successful extending */
4232 printf("hfs_extendfs: extended \"%s\" to %d blocks (was %d blocks)\n",
4233 hfsmp
->vcbVN
, hfsmp
->totalBlocks
, (u_int32_t
)(oldsize
/hfsmp
->blockSize
));
4237 /* Restore allocation fork. */
4238 bcopy(&forkdata
, &fp
->ff_data
, sizeof(forkdata
));
4239 VTOC(vp
)->c_blocks
= fp
->ff_blocks
;
4243 HFS_MOUNT_LOCK(hfsmp
, TRUE
);
4244 hfsmp
->hfs_flags
&= ~HFS_RESIZE_IN_PROGRESS
;
4245 HFS_MOUNT_UNLOCK(hfsmp
, TRUE
);
4247 hfs_systemfile_unlock(hfsmp
, lockflags
);
4249 if (transaction_begun
) {
4250 hfs_end_transaction(hfsmp
);
4251 hfs_journal_flush(hfsmp
, FALSE
);
4252 /* Just to be sure, sync all data to the disk */
4253 (void) VNOP_IOCTL(hfsmp
->hfs_devvp
, DKIOCSYNCHRONIZECACHE
, NULL
, FWRITE
, context
);
4256 return MacToVFSError(error
);
4259 #define HFS_MIN_SIZE (32LL * 1024LL * 1024LL)
4262 * Truncate a file system (while still mounted).
4265 hfs_truncatefs(struct hfsmount
*hfsmp
, u_int64_t newsize
, vfs_context_t context
)
4267 struct buf
*bp
= NULL
;
4269 u_int32_t newblkcnt
;
4270 u_int32_t reclaimblks
= 0;
4272 int transaction_begun
= 0;
4273 Boolean updateFreeBlocks
= false;
4274 Boolean disable_sparse
= false;
4277 lck_mtx_lock(&hfsmp
->hfs_mutex
);
4278 if (hfsmp
->hfs_flags
& HFS_RESIZE_IN_PROGRESS
) {
4279 lck_mtx_unlock(&hfsmp
->hfs_mutex
);
4282 hfsmp
->hfs_flags
|= HFS_RESIZE_IN_PROGRESS
;
4283 hfsmp
->hfs_resize_blocksmoved
= 0;
4284 hfsmp
->hfs_resize_totalblocks
= 0;
4285 hfsmp
->hfs_resize_progress
= 0;
4286 lck_mtx_unlock(&hfsmp
->hfs_mutex
);
4289 * - Journaled HFS Plus volumes only.
4290 * - No embedded volumes.
4292 if ((hfsmp
->jnl
== NULL
) ||
4293 (hfsmp
->hfsPlusIOPosOffset
!= 0)) {
4297 oldsize
= (u_int64_t
)hfsmp
->totalBlocks
* (u_int64_t
)hfsmp
->blockSize
;
4298 newblkcnt
= newsize
/ hfsmp
->blockSize
;
4299 reclaimblks
= hfsmp
->totalBlocks
- newblkcnt
;
4301 if (hfs_resize_debug
) {
4302 printf ("hfs_truncatefs: old: size=%qu, blkcnt=%u, freeblks=%u\n", oldsize
, hfsmp
->totalBlocks
, hfs_freeblks(hfsmp
, 1));
4303 printf ("hfs_truncatefs: new: size=%qu, blkcnt=%u, reclaimblks=%u\n", newsize
, newblkcnt
, reclaimblks
);
4306 /* Make sure new size is valid. */
4307 if ((newsize
< HFS_MIN_SIZE
) ||
4308 (newsize
>= oldsize
) ||
4309 (newsize
% hfsmp
->hfs_logical_block_size
) ||
4310 (newsize
% hfsmp
->hfs_physical_block_size
)) {
4311 printf ("hfs_truncatefs: invalid size (newsize=%qu, oldsize=%qu)\n", newsize
, oldsize
);
4317 * Make sure that the file system has enough free blocks reclaim.
4319 * Before resize, the disk is divided into four zones -
4320 * A. Allocated_Stationary - These are allocated blocks that exist
4321 * before the new end of disk. These blocks will not be
4322 * relocated or modified during resize.
4323 * B. Free_Stationary - These are free blocks that exist before the
4324 * new end of disk. These blocks can be used for any new
4325 * allocations during resize, including allocation for relocating
4326 * data from the area of disk being reclaimed.
4327 * C. Allocated_To-Reclaim - These are allocated blocks that exist
4328 * beyond the new end of disk. These blocks need to be reclaimed
4329 * during resize by allocating equal number of blocks in Free
4330 * Stationary zone and copying the data.
4331 * D. Free_To-Reclaim - These are free blocks that exist beyond the
4332 * new end of disk. Nothing special needs to be done to reclaim
4335 * Total number of blocks on the disk before resize:
4336 * ------------------------------------------------
4337 * Total Blocks = Allocated_Stationary + Free_Stationary +
4338 * Allocated_To-Reclaim + Free_To-Reclaim
4340 * Total number of blocks that need to be reclaimed:
4341 * ------------------------------------------------
4342 * Blocks to Reclaim = Allocated_To-Reclaim + Free_To-Reclaim
4344 * Note that the check below also makes sure that we have enough space
4345 * to relocate data from Allocated_To-Reclaim to Free_Stationary.
4346 * Therefore we do not need to check total number of blocks to relocate
4347 * later in the code.
4349 * The condition below gets converted to:
4351 * Allocated To-Reclaim + Free To-Reclaim >= Free Stationary + Free To-Reclaim
4353 * which is equivalent to:
4355 * Allocated To-Reclaim >= Free Stationary
4357 if (reclaimblks
>= hfs_freeblks(hfsmp
, 1)) {
4358 printf("hfs_truncatefs: insufficient space (need %u blocks; have %u free blocks)\n", reclaimblks
, hfs_freeblks(hfsmp
, 1));
4363 /* Start with a clean journal. */
4364 hfs_journal_flush(hfsmp
, TRUE
);
4366 if (hfs_start_transaction(hfsmp
) != 0) {
4370 transaction_begun
= 1;
4372 /* Take the bitmap lock to update the alloc limit field */
4373 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_BITMAP
, HFS_EXCLUSIVE_LOCK
);
4376 * Prevent new allocations from using the part we're trying to truncate.
4378 * NOTE: allocLimit is set to the allocation block number where the new
4379 * alternate volume header will be. That way there will be no files to
4380 * interfere with allocating the new alternate volume header, and no files
4381 * in the allocation blocks beyond (i.e. the blocks we're trying to
4384 * Also shrink the red-black tree if needed.
4386 if (hfsmp
->blockSize
== 512) {
4387 error
= UpdateAllocLimit (hfsmp
, newblkcnt
- 2);
4390 error
= UpdateAllocLimit (hfsmp
, newblkcnt
- 1);
4393 /* Sparse devices use first fit allocation which is not ideal
4394 * for volume resize which requires best fit allocation. If a
4395 * sparse device is being truncated, disable the sparse device
4396 * property temporarily for the duration of resize. Also reset
4397 * the free extent cache so that it is rebuilt as sorted by
4398 * totalBlocks instead of startBlock.
4400 * Note that this will affect all allocations on the volume and
4401 * ideal fix would be just to modify resize-related allocations,
4402 * but it will result in complexity like handling of two free
4403 * extent caches sorted differently, etc. So we stick to this
4406 HFS_MOUNT_LOCK(hfsmp
, TRUE
);
4407 if (hfsmp
->hfs_flags
& HFS_HAS_SPARSE_DEVICE
) {
4408 hfsmp
->hfs_flags
&= ~HFS_HAS_SPARSE_DEVICE
;
4409 ResetVCBFreeExtCache(hfsmp
);
4410 disable_sparse
= true;
4414 * Update the volume free block count to reflect the total number
4415 * of free blocks that will exist after a successful resize.
4416 * Relocation of extents will result in no net change in the total
4417 * free space on the disk. Therefore the code that allocates
4418 * space for new extent and deallocates the old extent explicitly
4419 * prevents updating the volume free block count. It will also
4420 * prevent false disk full error when the number of blocks in
4421 * an extent being relocated is more than the free blocks that
4422 * will exist after the volume is resized.
4424 hfsmp
->freeBlocks
-= reclaimblks
;
4425 updateFreeBlocks
= true;
4426 HFS_MOUNT_UNLOCK(hfsmp
, TRUE
);
4429 hfs_systemfile_unlock(hfsmp
, lockflags
);
4434 * Update the metadata zone size to match the new volume size,
4435 * and if it too less, metadata zone might be disabled.
4437 hfs_metadatazone_init(hfsmp
, false);
4440 * If some files have blocks at or beyond the location of the
4441 * new alternate volume header, recalculate free blocks and
4442 * reclaim blocks. Otherwise just update free blocks count.
4444 * The current allocLimit is set to the location of new alternate
4445 * volume header, and reclaimblks are the total number of blocks
4446 * that need to be reclaimed. So the check below is really
4447 * ignoring the blocks allocated for old alternate volume header.
4449 if (hfs_isallocated(hfsmp
, hfsmp
->allocLimit
, reclaimblks
)) {
4451 * hfs_reclaimspace will use separate transactions when
4452 * relocating files (so we don't overwhelm the journal).
4454 hfs_end_transaction(hfsmp
);
4455 transaction_begun
= 0;
4457 /* Attempt to reclaim some space. */
4458 error
= hfs_reclaimspace(hfsmp
, hfsmp
->allocLimit
, reclaimblks
, context
);
4460 printf("hfs_truncatefs: couldn't reclaim space on %s (error=%d)\n", hfsmp
->vcbVN
, error
);
4464 if (hfs_start_transaction(hfsmp
) != 0) {
4468 transaction_begun
= 1;
4470 /* Check if we're clear now. */
4471 error
= hfs_isallocated(hfsmp
, hfsmp
->allocLimit
, reclaimblks
);
4473 printf("hfs_truncatefs: didn't reclaim enough space on %s (error=%d)\n", hfsmp
->vcbVN
, error
);
4474 error
= EAGAIN
; /* tell client to try again */
4480 * Note: we take the attributes lock in case we have an attribute data vnode
4481 * which needs to change size.
4483 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_ATTRIBUTE
| SFL_EXTENTS
| SFL_BITMAP
, HFS_EXCLUSIVE_LOCK
);
4486 * Allocate last 1KB for alternate volume header.
4488 error
= BlockMarkAllocated(hfsmp
, hfsmp
->allocLimit
, (hfsmp
->blockSize
== 512) ? 2 : 1);
4490 printf("hfs_truncatefs: Error %d allocating new alternate volume header\n", error
);
4495 * Mark the old alternate volume header as free.
4496 * We don't bother shrinking allocation bitmap file.
4498 if (hfsmp
->blockSize
== 512)
4499 (void) BlockMarkFree(hfsmp
, hfsmp
->totalBlocks
- 2, 2);
4501 (void) BlockMarkFree(hfsmp
, hfsmp
->totalBlocks
- 1, 1);
4504 * Invalidate the existing alternate volume header.
4506 * Don't include this in a transaction (don't call journal_modify_block)
4507 * since this block will be outside of the truncated file system!
4509 if (hfsmp
->hfs_alt_id_sector
) {
4510 error
= buf_meta_bread(hfsmp
->hfs_devvp
,
4511 HFS_PHYSBLK_ROUNDDOWN(hfsmp
->hfs_alt_id_sector
, hfsmp
->hfs_log_per_phys
),
4512 hfsmp
->hfs_physical_block_size
, NOCRED
, &bp
);
4514 bzero((void*)((char *)buf_dataptr(bp
) + HFS_ALT_OFFSET(hfsmp
->hfs_physical_block_size
)), kMDBSize
);
4515 (void) VNOP_BWRITE(bp
);
4524 /* Log successful shrinking. */
4525 printf("hfs_truncatefs: shrank \"%s\" to %d blocks (was %d blocks)\n",
4526 hfsmp
->vcbVN
, newblkcnt
, hfsmp
->totalBlocks
);
4529 * Adjust file system variables and flush them to disk.
4531 hfsmp
->totalBlocks
= newblkcnt
;
4532 hfsmp
->hfs_logical_block_count
= newsize
/ hfsmp
->hfs_logical_block_size
;
4533 hfsmp
->hfs_alt_id_sector
= HFS_ALT_SECTOR(hfsmp
->hfs_logical_block_size
, hfsmp
->hfs_logical_block_count
);
4534 MarkVCBDirty(hfsmp
);
4535 error
= hfs_flushvolumeheader(hfsmp
, MNT_WAIT
, HFS_ALTFLUSH
);
4537 panic("hfs_truncatefs: unexpected error flushing volume header (%d)\n", error
);
4540 * Adjust the size of hfsmp->hfs_attrdata_vp
4542 if (hfsmp
->hfs_attrdata_vp
) {
4544 struct filefork
*fp
;
4546 if (vnode_get(hfsmp
->hfs_attrdata_vp
) == 0) {
4547 cp
= VTOC(hfsmp
->hfs_attrdata_vp
);
4548 fp
= VTOF(hfsmp
->hfs_attrdata_vp
);
4550 cp
->c_blocks
= newblkcnt
;
4551 fp
->ff_blocks
= newblkcnt
;
4552 fp
->ff_extents
[0].blockCount
= newblkcnt
;
4553 fp
->ff_size
= (off_t
) newblkcnt
* hfsmp
->blockSize
;
4554 ubc_setsize(hfsmp
->hfs_attrdata_vp
, fp
->ff_size
);
4555 vnode_put(hfsmp
->hfs_attrdata_vp
);
4561 * Update the allocLimit to acknowledge the last one or two blocks now.
4562 * Add it to the tree as well if necessary.
4564 UpdateAllocLimit (hfsmp
, hfsmp
->totalBlocks
);
4566 HFS_MOUNT_LOCK(hfsmp
, TRUE
);
4567 if (disable_sparse
== true) {
4568 /* Now that resize is completed, set the volume to be sparse
4569 * device again so that all further allocations will be first
4570 * fit instead of best fit. Reset free extent cache so that
4573 hfsmp
->hfs_flags
|= HFS_HAS_SPARSE_DEVICE
;
4574 ResetVCBFreeExtCache(hfsmp
);
4577 if (error
&& (updateFreeBlocks
== true)) {
4578 hfsmp
->freeBlocks
+= reclaimblks
;
4581 if (hfsmp
->nextAllocation
>= hfsmp
->allocLimit
) {
4582 hfsmp
->nextAllocation
= hfsmp
->hfs_metazone_end
+ 1;
4584 hfsmp
->hfs_flags
&= ~HFS_RESIZE_IN_PROGRESS
;
4585 HFS_MOUNT_UNLOCK(hfsmp
, TRUE
);
4587 /* On error, reset the metadata zone for original volume size */
4588 if (error
&& (updateFreeBlocks
== true)) {
4589 hfs_metadatazone_init(hfsmp
, false);
4593 hfs_systemfile_unlock(hfsmp
, lockflags
);
4595 if (transaction_begun
) {
4596 hfs_end_transaction(hfsmp
);
4597 hfs_journal_flush(hfsmp
, FALSE
);
4598 /* Just to be sure, sync all data to the disk */
4599 (void) VNOP_IOCTL(hfsmp
->hfs_devvp
, DKIOCSYNCHRONIZECACHE
, NULL
, FWRITE
, context
);
4602 return MacToVFSError(error
);
4607 * Invalidate the physical block numbers associated with buffer cache blocks
4608 * in the given extent of the given vnode.
4610 struct hfs_inval_blk_no
{
4611 daddr64_t sectorStart
;
4612 daddr64_t sectorCount
;
4615 hfs_invalidate_block_numbers_callback(buf_t bp
, void *args_in
)
4618 struct hfs_inval_blk_no
*args
;
4620 blkno
= buf_blkno(bp
);
4623 if (blkno
>= args
->sectorStart
&& blkno
< args
->sectorStart
+args
->sectorCount
)
4624 buf_setblkno(bp
, buf_lblkno(bp
));
4626 return BUF_RETURNED
;
4629 hfs_invalidate_sectors(struct vnode
*vp
, daddr64_t sectorStart
, daddr64_t sectorCount
)
4631 struct hfs_inval_blk_no args
;
4632 args
.sectorStart
= sectorStart
;
4633 args
.sectorCount
= sectorCount
;
4635 buf_iterate(vp
, hfs_invalidate_block_numbers_callback
, BUF_SCAN_DIRTY
|BUF_SCAN_CLEAN
, &args
);
4640 * Copy the contents of an extent to a new location. Also invalidates the
4641 * physical block number of any buffer cache block in the copied extent
4642 * (so that if the block is written, it will go through VNOP_BLOCKMAP to
4643 * determine the new physical block number).
4647 struct hfsmount
*hfsmp
,
4648 struct vnode
*vp
, /* The file whose extent is being copied. */
4649 u_int32_t oldStart
, /* The start of the source extent. */
4650 u_int32_t newStart
, /* The start of the destination extent. */
4651 u_int32_t blockCount
, /* The number of allocation blocks to copy. */
4652 vfs_context_t context
)
4656 void *buffer
= NULL
;
4657 struct vfsioattr ioattr
;
4661 u_int32_t ioSizeSectors
; /* Device sectors in this I/O */
4662 daddr64_t srcSector
, destSector
;
4663 u_int32_t sectorsPerBlock
= hfsmp
->blockSize
/ hfsmp
->hfs_logical_block_size
;
4669 * Sanity check that we have locked the vnode of the file we're copying.
4671 * But since hfs_systemfile_lock() doesn't actually take the lock on
4672 * the allocation file if a journal is active, ignore the check if the
4673 * file being copied is the allocation file.
4675 struct cnode
*cp
= VTOC(vp
);
4676 if (cp
!= hfsmp
->hfs_allocation_cp
&& cp
->c_lockowner
!= current_thread())
4677 panic("hfs_copy_extent: vp=%p (cp=%p) not owned?\n", vp
, cp
);
4680 /* Prepare the CP blob and get it ready for use */
4681 if (!vnode_issystem (vp
) && vnode_isreg(vp
) &&
4682 cp_fs_protected (hfsmp
->hfs_mp
)) {
4684 cp_err
= cp_handle_relocate (cp
);
4687 * can't copy the file because we couldn't set up keys.
4699 * Determine the I/O size to use
4701 * NOTE: Many external drives will result in an ioSize of 128KB.
4702 * TODO: Should we use a larger buffer, doing several consecutive
4703 * reads, then several consecutive writes?
4705 vfs_ioattr(hfsmp
->hfs_mp
, &ioattr
);
4706 bufferSize
= MIN(ioattr
.io_maxreadcnt
, ioattr
.io_maxwritecnt
);
4707 if (kmem_alloc(kernel_map
, (vm_offset_t
*) &buffer
, bufferSize
))
4710 /* Get a buffer for doing the I/O */
4711 bp
= buf_alloc(hfsmp
->hfs_devvp
);
4712 buf_setdataptr(bp
, (uintptr_t)buffer
);
4714 resid
= (off_t
) blockCount
* (off_t
) hfsmp
->blockSize
;
4715 srcSector
= (daddr64_t
) oldStart
* hfsmp
->blockSize
/ hfsmp
->hfs_logical_block_size
;
4716 destSector
= (daddr64_t
) newStart
* hfsmp
->blockSize
/ hfsmp
->hfs_logical_block_size
;
4718 ioSize
= MIN(bufferSize
, (size_t) resid
);
4719 ioSizeSectors
= ioSize
/ hfsmp
->hfs_logical_block_size
;
4721 /* Prepare the buffer for reading */
4722 buf_reset(bp
, B_READ
);
4723 buf_setsize(bp
, ioSize
);
4724 buf_setcount(bp
, ioSize
);
4725 buf_setblkno(bp
, srcSector
);
4726 buf_setlblkno(bp
, srcSector
);
4728 /* Attach the CP to the buffer */
4731 buf_setcpaddr (bp
, cp
->c_cpentry
);
4736 err
= VNOP_STRATEGY(bp
);
4738 err
= buf_biowait(bp
);
4740 printf("hfs_copy_extent: Error %d from VNOP_STRATEGY (read)\n", err
);
4744 /* Prepare the buffer for writing */
4745 buf_reset(bp
, B_WRITE
);
4746 buf_setsize(bp
, ioSize
);
4747 buf_setcount(bp
, ioSize
);
4748 buf_setblkno(bp
, destSector
);
4749 buf_setlblkno(bp
, destSector
);
4750 if (vnode_issystem(vp
) && journal_uses_fua(hfsmp
->jnl
))
4754 /* Attach the CP to the buffer */
4756 buf_setcpaddr (bp
, cp
->c_cpentry
);
4761 vnode_startwrite(hfsmp
->hfs_devvp
);
4762 err
= VNOP_STRATEGY(bp
);
4764 err
= buf_biowait(bp
);
4766 printf("hfs_copy_extent: Error %d from VNOP_STRATEGY (write)\n", err
);
4771 srcSector
+= ioSizeSectors
;
4772 destSector
+= ioSizeSectors
;
4777 kmem_free(kernel_map
, (vm_offset_t
)buffer
, bufferSize
);
4779 /* Make sure all writes have been flushed to disk. */
4780 if (vnode_issystem(vp
) && !journal_uses_fua(hfsmp
->jnl
)) {
4781 err
= VNOP_IOCTL(hfsmp
->hfs_devvp
, DKIOCSYNCHRONIZECACHE
, NULL
, FWRITE
, context
);
4783 printf("hfs_copy_extent: DKIOCSYNCHRONIZECACHE failed (%d)\n", err
);
4784 err
= 0; /* Don't fail the copy. */
4789 hfs_invalidate_sectors(vp
, (daddr64_t
)oldStart
*sectorsPerBlock
, (daddr64_t
)blockCount
*sectorsPerBlock
);
4795 /* Structure to store state of reclaiming extents from a
4796 * given file. hfs_reclaim_file()/hfs_reclaim_xattr()
4797 * initializes the values in this structure which are then
4798 * used by code that reclaims and splits the extents.
4800 struct hfs_reclaim_extent_info
{
4804 u_int8_t is_dirlink
; /* Extent belongs to directory hard link */
4805 u_int8_t is_sysfile
; /* Extent belongs to system file */
4806 u_int8_t is_xattr
; /* Extent belongs to extent-based xattr */
4807 u_int8_t extent_index
;
4808 int lockflags
; /* Locks that reclaim and split code should grab before modifying the extent record */
4809 u_int32_t blocks_relocated
; /* Total blocks relocated for this file till now */
4810 u_int32_t recStartBlock
; /* File allocation block number (FABN) for current extent record */
4811 u_int32_t cur_blockCount
; /* Number of allocation blocks that have been checked for reclaim */
4812 struct filefork
*catalog_fp
; /* If non-NULL, extent is from catalog record */
4814 HFSPlusExtentRecord overflow
;/* Extent record from overflow extents btree */
4815 HFSPlusAttrRecord xattr
; /* Attribute record for large EAs */
4817 HFSPlusExtentDescriptor
*extents
; /* Pointer to current extent record being processed.
4818 * For catalog extent record, points to the correct
4819 * extent information in filefork. For overflow extent
4820 * record, or xattr record, points to extent record
4821 * in the structure above
4823 struct cat_desc
*dirlink_desc
;
4824 struct cat_attr
*dirlink_attr
;
4825 struct filefork
*dirlink_fork
; /* For directory hard links, fp points actually to this */
4826 struct BTreeIterator
*iterator
; /* Shared read/write iterator, hfs_reclaim_file/xattr()
4827 * use it for reading and hfs_reclaim_extent()/hfs_split_extent()
4828 * use it for writing updated extent record
4830 struct FSBufferDescriptor btdata
; /* Shared btdata for reading/writing extent record, same as iterator above */
4831 u_int16_t recordlen
;
4832 int overflow_count
; /* For debugging, counter for overflow extent record */
4833 FCB
*fcb
; /* Pointer to the current btree being traversed */
4837 * Split the current extent into two extents, with first extent
4838 * to contain given number of allocation blocks. Splitting of
4839 * extent creates one new extent entry which can result in
4840 * shifting of many entries through all the extent records of a
4841 * file, and/or creating a new extent record in the overflow
4845 * The diagram below represents two consecutive extent records,
4846 * for simplicity, lets call them record X and X+1 respectively.
4847 * Interesting extent entries have been denoted by letters.
4848 * If the letter is unchanged before and after split, it means
4849 * that the extent entry was not modified during the split.
4850 * A '.' means that the entry remains unchanged after the split
4851 * and is not relevant for our example. A '0' means that the
4852 * extent entry is empty.
4854 * If there isn't sufficient contiguous free space to relocate
4855 * an extent (extent "C" below), we will have to break the one
4856 * extent into multiple smaller extents, and relocate each of
4857 * the smaller extents individually. The way we do this is by
4858 * finding the largest contiguous free space that is currently
4859 * available (N allocation blocks), and then convert extent "C"
4860 * into two extents, C1 and C2, that occupy exactly the same
4861 * allocation blocks as extent C. Extent C1 is the first
4862 * N allocation blocks of extent C, and extent C2 is the remainder
4863 * of extent C. Then we can relocate extent C1 since we know
4864 * we have enough contiguous free space to relocate it in its
4865 * entirety. We then repeat the process starting with extent C2.
4867 * In record X, only the entries following entry C are shifted, and
4868 * the original entry C is replaced with two entries C1 and C2 which
4869 * are actually two extent entries for contiguous allocation blocks.
4871 * Note that the entry E from record X is shifted into record X+1 as
4872 * the new first entry. Since the first entry of record X+1 is updated,
4873 * the FABN will also get updated with the blockCount of entry E.
4874 * This also results in shifting of all extent entries in record X+1.
4875 * Note that the number of empty entries after the split has been
4876 * changed from 3 to 2.
4879 * record X record X+1
4880 * ---------------------===--------- ---------------------------------
4881 * | A | . | . | . | B | C | D | E | | F | . | . | . | G | 0 | 0 | 0 |
4882 * ---------------------===--------- ---------------------------------
4885 * ---------------------=======----- ---------------------------------
4886 * | A | . | . | . | B | C1| C2| D | | E | F | . | . | . | G | 0 | 0 |
4887 * ---------------------=======----- ---------------------------------
4889 * C1.startBlock = C.startBlock
4892 * C2.startBlock = C.startBlock + N
4893 * C2.blockCount = C.blockCount - N
4895 * FABN = old FABN - E.blockCount
4898 * extent_info - This is the structure that contains state about
4899 * the current file, extent, and extent record that
4900 * is being relocated. This structure is shared
4901 * among code that traverses through all the extents
4902 * of the file, code that relocates extents, and
4903 * code that splits the extent.
4905 * Zero on success, non-zero on failure.
4908 hfs_split_extent(struct hfs_reclaim_extent_info
*extent_info
, uint32_t newBlockCount
)
4911 int index
= extent_info
->extent_index
;
4913 HFSPlusExtentDescriptor shift_extent
; /* Extent entry that should be shifted into next extent record */
4914 HFSPlusExtentDescriptor last_extent
;
4915 HFSPlusExtentDescriptor
*extents
; /* Pointer to current extent record being manipulated */
4916 HFSPlusExtentRecord
*extents_rec
= NULL
;
4917 HFSPlusExtentKey
*extents_key
= NULL
;
4918 HFSPlusAttrRecord
*xattr_rec
= NULL
;
4919 HFSPlusAttrKey
*xattr_key
= NULL
;
4920 struct BTreeIterator iterator
;
4921 struct FSBufferDescriptor btdata
;
4923 uint32_t read_recStartBlock
; /* Starting allocation block number to read old extent record */
4924 uint32_t write_recStartBlock
; /* Starting allocation block number to insert newly updated extent record */
4925 Boolean create_record
= false;
4929 is_xattr
= extent_info
->is_xattr
;
4930 extents
= extent_info
->extents
;
4931 cp
= VTOC(extent_info
->vp
);
4933 if (hfs_resize_debug
) {
4934 printf ("hfs_split_extent: Split record:%u recStartBlock=%u %u:(%u,%u) for %u blocks\n", extent_info
->overflow_count
, extent_info
->recStartBlock
, index
, extents
[index
].startBlock
, extents
[index
].blockCount
, newBlockCount
);
4937 /* Extents overflow btree can not have more than 8 extents.
4938 * No split allowed if the 8th extent is already used.
4940 if ((extent_info
->fileID
== kHFSExtentsFileID
) && (extents
[kHFSPlusExtentDensity
- 1].blockCount
!= 0)) {
4941 printf ("hfs_split_extent: Maximum 8 extents allowed for extents overflow btree, cannot split further.\n");
4946 /* Determine the starting allocation block number for the following
4947 * overflow extent record, if any, before the current record
4950 read_recStartBlock
= extent_info
->recStartBlock
;
4951 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
4952 if (extents
[i
].blockCount
== 0) {
4955 read_recStartBlock
+= extents
[i
].blockCount
;
4958 /* Shift and split */
4959 if (index
== kHFSPlusExtentDensity
-1) {
4960 /* The new extent created after split will go into following overflow extent record */
4961 shift_extent
.startBlock
= extents
[index
].startBlock
+ newBlockCount
;
4962 shift_extent
.blockCount
= extents
[index
].blockCount
- newBlockCount
;
4964 /* Last extent in the record will be split, so nothing to shift */
4966 /* Splitting of extents can result in at most of one
4967 * extent entry to be shifted into following overflow extent
4968 * record. So, store the last extent entry for later.
4970 shift_extent
= extents
[kHFSPlusExtentDensity
-1];
4971 if ((hfs_resize_debug
) && (shift_extent
.blockCount
!= 0)) {
4972 printf ("hfs_split_extent: Save 7:(%u,%u) to shift into overflow record\n", shift_extent
.startBlock
, shift_extent
.blockCount
);
4975 /* Start shifting extent information from the end of the extent
4976 * record to the index where we want to insert the new extent.
4977 * Note that kHFSPlusExtentDensity-1 is already saved above, and
4978 * does not need to be shifted. The extent entry that is being
4979 * split does not get shifted.
4981 for (i
= kHFSPlusExtentDensity
-2; i
> index
; i
--) {
4982 if (hfs_resize_debug
) {
4983 if (extents
[i
].blockCount
) {
4984 printf ("hfs_split_extent: Shift %u:(%u,%u) to %u:(%u,%u)\n", i
, extents
[i
].startBlock
, extents
[i
].blockCount
, i
+1, extents
[i
].startBlock
, extents
[i
].blockCount
);
4987 extents
[i
+1] = extents
[i
];
4991 if (index
== kHFSPlusExtentDensity
-1) {
4992 /* The second half of the extent being split will be the overflow
4993 * entry that will go into following overflow extent record. The
4994 * value has been stored in 'shift_extent' above, so there is
4995 * nothing to be done here.
4998 /* Update the values in the second half of the extent being split
4999 * before updating the first half of the split. Note that the
5000 * extent to split or first half of the split is at index 'index'
5001 * and a new extent or second half of the split will be inserted at
5002 * 'index+1' or into following overflow extent record.
5004 extents
[index
+1].startBlock
= extents
[index
].startBlock
+ newBlockCount
;
5005 extents
[index
+1].blockCount
= extents
[index
].blockCount
- newBlockCount
;
5007 /* Update the extent being split, only the block count will change */
5008 extents
[index
].blockCount
= newBlockCount
;
5010 if (hfs_resize_debug
) {
5011 printf ("hfs_split_extent: Split %u:(%u,%u) and ", index
, extents
[index
].startBlock
, extents
[index
].blockCount
);
5012 if (index
!= kHFSPlusExtentDensity
-1) {
5013 printf ("%u:(%u,%u)\n", index
+1, extents
[index
+1].startBlock
, extents
[index
+1].blockCount
);
5015 printf ("overflow:(%u,%u)\n", shift_extent
.startBlock
, shift_extent
.blockCount
);
5019 /* Write out information about the newly split extent to the disk */
5020 if (extent_info
->catalog_fp
) {
5021 /* (extent_info->catalog_fp != NULL) means the newly split
5022 * extent exists in the catalog record. This means that
5023 * the cnode was updated. Therefore, to write out the changes,
5024 * mark the cnode as modified. We cannot call hfs_update()
5025 * in this function because the caller hfs_reclaim_extent()
5026 * is holding the catalog lock currently.
5028 cp
->c_flag
|= C_MODIFIED
;
5030 /* The newly split extent is for large EAs or is in overflow
5031 * extent record, so update it directly in the btree using the
5032 * iterator information from the shared extent_info structure
5034 error
= BTReplaceRecord(extent_info
->fcb
, extent_info
->iterator
,
5035 &(extent_info
->btdata
), extent_info
->recordlen
);
5037 printf ("hfs_split_extent: fileID=%u BTReplaceRecord returned error=%d\n", extent_info
->fileID
, error
);
5042 /* No extent entry to be shifted into another extent overflow record */
5043 if (shift_extent
.blockCount
== 0) {
5044 if (hfs_resize_debug
) {
5045 printf ("hfs_split_extent: No extent entry to be shifted into overflow records\n");
5051 /* The overflow extent entry has to be shifted into an extent
5052 * overflow record. This means that we might have to shift
5053 * extent entries from all subsequent overflow records by one.
5054 * We start iteration from the first record to the last record,
5055 * and shift the extent entry from one record to another.
5056 * We might have to create a new extent record for the last
5057 * extent entry for the file.
5060 /* Initialize iterator to search the next record */
5061 bzero(&iterator
, sizeof(iterator
));
5063 /* Copy the key from the iterator that was used to update the modified attribute record. */
5064 xattr_key
= (HFSPlusAttrKey
*)&(iterator
.key
);
5065 bcopy((HFSPlusAttrKey
*)&(extent_info
->iterator
->key
), xattr_key
, sizeof(HFSPlusAttrKey
));
5066 /* Note: xattr_key->startBlock will be initialized later in the iteration loop */
5068 MALLOC(xattr_rec
, HFSPlusAttrRecord
*,
5069 sizeof(HFSPlusAttrRecord
), M_TEMP
, M_WAITOK
);
5070 if (xattr_rec
== NULL
) {
5074 btdata
.bufferAddress
= xattr_rec
;
5075 btdata
.itemSize
= sizeof(HFSPlusAttrRecord
);
5076 btdata
.itemCount
= 1;
5077 extents
= xattr_rec
->overflowExtents
.extents
;
5079 /* Initialize the extent key for the current file */
5080 extents_key
= (HFSPlusExtentKey
*) &(iterator
.key
);
5081 extents_key
->keyLength
= kHFSPlusExtentKeyMaximumLength
;
5082 extents_key
->forkType
= extent_info
->forkType
;
5083 extents_key
->fileID
= extent_info
->fileID
;
5084 /* Note: extents_key->startBlock will be initialized later in the iteration loop */
5086 MALLOC(extents_rec
, HFSPlusExtentRecord
*,
5087 sizeof(HFSPlusExtentRecord
), M_TEMP
, M_WAITOK
);
5088 if (extents_rec
== NULL
) {
5092 btdata
.bufferAddress
= extents_rec
;
5093 btdata
.itemSize
= sizeof(HFSPlusExtentRecord
);
5094 btdata
.itemCount
= 1;
5095 extents
= extents_rec
[0];
5098 /* The overflow extent entry has to be shifted into an extent
5099 * overflow record. This means that we might have to shift
5100 * extent entries from all subsequent overflow records by one.
5101 * We start iteration from the first record to the last record,
5102 * examine one extent record in each iteration and shift one
5103 * extent entry from one record to another. We might have to
5104 * create a new extent record for the last extent entry for the
5107 * If shift_extent.blockCount is non-zero, it means that there is
5108 * an extent entry that needs to be shifted into the next
5109 * overflow extent record. We keep on going till there are no such
5110 * entries left to be shifted. This will also change the starting
5111 * allocation block number of the extent record which is part of
5112 * the key for the extent record in each iteration. Note that
5113 * because the extent record key is changing while we are searching,
5114 * the record can not be updated directly, instead it has to be
5115 * deleted and inserted again.
5117 while (shift_extent
.blockCount
) {
5118 if (hfs_resize_debug
) {
5119 printf ("hfs_split_extent: Will shift (%u,%u) into overflow record with startBlock=%u\n", shift_extent
.startBlock
, shift_extent
.blockCount
, read_recStartBlock
);
5122 /* Search if there is any existing overflow extent record
5123 * that matches the current file and the logical start block
5126 * For this, the logical start block number in the key is
5127 * the value calculated based on the logical start block
5128 * number of the current extent record and the total number
5129 * of blocks existing in the current extent record.
5132 xattr_key
->startBlock
= read_recStartBlock
;
5134 extents_key
->startBlock
= read_recStartBlock
;
5136 error
= BTSearchRecord(extent_info
->fcb
, &iterator
, &btdata
, &reclen
, &iterator
);
5138 if (error
!= btNotFound
) {
5139 printf ("hfs_split_extent: fileID=%u startBlock=%u BTSearchRecord error=%d\n", extent_info
->fileID
, read_recStartBlock
, error
);
5142 /* No matching record was found, so create a new extent record.
5143 * Note: Since no record was found, we can't rely on the
5144 * btree key in the iterator any longer. This will be initialized
5145 * later before we insert the record.
5147 create_record
= true;
5150 /* The extra extent entry from the previous record is being inserted
5151 * as the first entry in the current extent record. This will change
5152 * the file allocation block number (FABN) of the current extent
5153 * record, which is the startBlock value from the extent record key.
5154 * Since one extra entry is being inserted in the record, the new
5155 * FABN for the record will less than old FABN by the number of blocks
5156 * in the new extent entry being inserted at the start. We have to
5157 * do this before we update read_recStartBlock to point at the
5158 * startBlock of the following record.
5160 write_recStartBlock
= read_recStartBlock
- shift_extent
.blockCount
;
5161 if (hfs_resize_debug
) {
5162 if (create_record
) {
5163 printf ("hfs_split_extent: No records found for startBlock=%u, will create new with startBlock=%u\n", read_recStartBlock
, write_recStartBlock
);
5167 /* Now update the read_recStartBlock to account for total number
5168 * of blocks in this extent record. It will now point to the
5169 * starting allocation block number for the next extent record.
5171 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
5172 if (extents
[i
].blockCount
== 0) {
5175 read_recStartBlock
+= extents
[i
].blockCount
;
5178 if (create_record
== true) {
5179 /* Initialize new record content with only one extent entry */
5180 bzero(extents
, sizeof(HFSPlusExtentRecord
));
5181 /* The new record will contain only one extent entry */
5182 extents
[0] = shift_extent
;
5183 /* There are no more overflow extents to be shifted */
5184 shift_extent
.startBlock
= shift_extent
.blockCount
= 0;
5187 /* BTSearchRecord above returned btNotFound,
5188 * but since the attribute btree is never empty
5189 * if we are trying to insert new overflow
5190 * record for the xattrs, the extents_key will
5191 * contain correct data. So we don't need to
5192 * re-initialize it again like below.
5195 /* Initialize the new xattr record */
5196 xattr_rec
->recordType
= kHFSPlusAttrExtents
;
5197 xattr_rec
->overflowExtents
.reserved
= 0;
5198 reclen
= sizeof(HFSPlusAttrExtents
);
5200 /* BTSearchRecord above returned btNotFound,
5201 * which means that extents_key content might
5202 * not correspond to the record that we are
5203 * trying to create, especially when the extents
5204 * overflow btree is empty. So we reinitialize
5205 * the extents_key again always.
5207 extents_key
->keyLength
= kHFSPlusExtentKeyMaximumLength
;
5208 extents_key
->forkType
= extent_info
->forkType
;
5209 extents_key
->fileID
= extent_info
->fileID
;
5211 /* Initialize the new extent record */
5212 reclen
= sizeof(HFSPlusExtentRecord
);
5215 /* The overflow extent entry from previous record will be
5216 * the first entry in this extent record. If the last
5217 * extent entry in this record is valid, it will be shifted
5218 * into the following extent record as its first entry. So
5219 * save the last entry before shifting entries in current
5222 last_extent
= extents
[kHFSPlusExtentDensity
-1];
5224 /* Shift all entries by one index towards the end */
5225 for (i
= kHFSPlusExtentDensity
-2; i
>= 0; i
--) {
5226 extents
[i
+1] = extents
[i
];
5229 /* Overflow extent entry saved from previous record
5230 * is now the first entry in the current record.
5232 extents
[0] = shift_extent
;
5234 if (hfs_resize_debug
) {
5235 printf ("hfs_split_extent: Shift overflow=(%u,%u) to record with updated startBlock=%u\n", shift_extent
.startBlock
, shift_extent
.blockCount
, write_recStartBlock
);
5238 /* The last entry from current record will be the
5239 * overflow entry which will be the first entry for
5240 * the following extent record.
5242 shift_extent
= last_extent
;
5244 /* Since the key->startBlock is being changed for this record,
5245 * it should be deleted and inserted with the new key.
5247 error
= BTDeleteRecord(extent_info
->fcb
, &iterator
);
5249 printf ("hfs_split_extent: fileID=%u startBlock=%u BTDeleteRecord error=%d\n", extent_info
->fileID
, read_recStartBlock
, error
);
5252 if (hfs_resize_debug
) {
5253 printf ("hfs_split_extent: Deleted record with startBlock=%u\n", (is_xattr
? xattr_key
->startBlock
: extents_key
->startBlock
));
5257 /* Insert the newly created or modified extent record */
5258 bzero(&iterator
.hint
, sizeof(iterator
.hint
));
5260 xattr_key
->startBlock
= write_recStartBlock
;
5262 extents_key
->startBlock
= write_recStartBlock
;
5264 error
= BTInsertRecord(extent_info
->fcb
, &iterator
, &btdata
, reclen
);
5266 printf ("hfs_split_extent: fileID=%u, startBlock=%u BTInsertRecord error=%d\n", extent_info
->fileID
, write_recStartBlock
, error
);
5269 if (hfs_resize_debug
) {
5270 printf ("hfs_split_extent: Inserted extent record with startBlock=%u\n", write_recStartBlock
);
5273 BTFlushPath(extent_info
->fcb
);
5276 FREE (extents_rec
, M_TEMP
);
5279 FREE (xattr_rec
, M_TEMP
);
5286 * Relocate an extent if it lies beyond the expected end of volume.
5288 * This function is called for every extent of the file being relocated.
5289 * It allocates space for relocation, copies the data, deallocates
5290 * the old extent, and update corresponding on-disk extent. If the function
5291 * does not find contiguous space to relocate an extent, it splits the
5292 * extent in smaller size to be able to relocate it out of the area of
5293 * disk being reclaimed. As an optimization, if an extent lies partially
5294 * in the area of the disk being reclaimed, it is split so that we only
5295 * have to relocate the area that was overlapping with the area of disk
5298 * Note that every extent is relocated in its own transaction so that
5299 * they do not overwhelm the journal. This function handles the extent
5300 * record that exists in the catalog record, extent record from overflow
5301 * extents btree, and extents for large EAs.
5304 * extent_info - This is the structure that contains state about
5305 * the current file, extent, and extent record that
5306 * is being relocated. This structure is shared
5307 * among code that traverses through all the extents
5308 * of the file, code that relocates extents, and
5309 * code that splits the extent.
5312 hfs_reclaim_extent(struct hfsmount
*hfsmp
, const u_long allocLimit
, struct hfs_reclaim_extent_info
*extent_info
, vfs_context_t context
)
5317 u_int32_t oldStartBlock
;
5318 u_int32_t oldBlockCount
;
5319 u_int32_t newStartBlock
;
5320 u_int32_t newBlockCount
;
5321 u_int32_t roundedBlockCount
;
5323 uint32_t remainder_blocks
;
5324 u_int32_t alloc_flags
;
5325 int blocks_allocated
= false;
5327 index
= extent_info
->extent_index
;
5328 cp
= VTOC(extent_info
->vp
);
5330 oldStartBlock
= extent_info
->extents
[index
].startBlock
;
5331 oldBlockCount
= extent_info
->extents
[index
].blockCount
;
5333 if (0 && hfs_resize_debug
) {
5334 printf ("hfs_reclaim_extent: Examine record:%u recStartBlock=%u, %u:(%u,%u)\n", extent_info
->overflow_count
, extent_info
->recStartBlock
, index
, oldStartBlock
, oldBlockCount
);
5337 /* If the current extent lies completely within allocLimit,
5338 * it does not require any relocation.
5340 if ((oldStartBlock
+ oldBlockCount
) <= allocLimit
) {
5341 extent_info
->cur_blockCount
+= oldBlockCount
;
5345 /* Every extent should be relocated in its own transaction
5346 * to make sure that we don't overflow the journal buffer.
5348 error
= hfs_start_transaction(hfsmp
);
5352 extent_info
->lockflags
= hfs_systemfile_lock(hfsmp
, extent_info
->lockflags
, HFS_EXCLUSIVE_LOCK
);
5354 /* Check if the extent lies partially in the area to reclaim,
5355 * i.e. it starts before allocLimit and ends beyond allocLimit.
5356 * We have already skipped extents that lie completely within
5357 * allocLimit in the check above, so we only check for the
5358 * startBlock. If it lies partially, split it so that we
5359 * only relocate part of the extent.
5361 if (oldStartBlock
< allocLimit
) {
5362 newBlockCount
= allocLimit
- oldStartBlock
;
5364 /* If the extent belongs to a btree, check and trim
5365 * it to be multiple of the node size.
5367 if (extent_info
->is_sysfile
) {
5368 node_size
= get_btree_nodesize(extent_info
->vp
);
5369 /* If the btree node size is less than the block size,
5370 * splitting this extent will not split a node across
5371 * different extents. So we only check and trim if
5372 * node size is more than the allocation block size.
5374 if (node_size
> hfsmp
->blockSize
) {
5375 remainder_blocks
= newBlockCount
% (node_size
/ hfsmp
->blockSize
);
5376 if (remainder_blocks
) {
5377 newBlockCount
-= remainder_blocks
;
5378 if (hfs_resize_debug
) {
5379 printf ("hfs_reclaim_extent: Fixing extent block count, node_blks=%u, old=%u, new=%u\n", node_size
/hfsmp
->blockSize
, newBlockCount
+ remainder_blocks
, newBlockCount
);
5385 if (hfs_resize_debug
) {
5386 int idx
= extent_info
->extent_index
;
5387 printf ("hfs_reclaim_extent: Split straddling extent %u:(%u,%u) for %u blocks\n", idx
, extent_info
->extents
[idx
].startBlock
, extent_info
->extents
[idx
].blockCount
, newBlockCount
);
5390 /* Split the extents into two parts --- the first extent lies
5391 * completely within allocLimit and therefore does not require
5392 * relocation. The second extent will require relocation which
5393 * will be handled when the caller calls this function again
5394 * for the next extent.
5396 error
= hfs_split_extent(extent_info
, newBlockCount
);
5398 /* Split success, no relocation required */
5401 /* Split failed, so try to relocate entire extent */
5402 if (hfs_resize_debug
) {
5403 printf ("hfs_reclaim_extent: Split straddling extent failed, reclocate full extent\n");
5407 /* At this point, the current extent requires relocation.
5408 * We will try to allocate space equal to the size of the extent
5409 * being relocated first to try to relocate it without splitting.
5410 * If the allocation fails, we will try to allocate contiguous
5411 * blocks out of metadata zone. If that allocation also fails,
5412 * then we will take a whatever contiguous block run is returned
5413 * by the allocation, split the extent into two parts, and then
5414 * relocate the first splitted extent.
5416 alloc_flags
= HFS_ALLOC_FORCECONTIG
| HFS_ALLOC_SKIPFREEBLKS
;
5417 if (extent_info
->is_sysfile
) {
5418 alloc_flags
|= HFS_ALLOC_METAZONE
;
5421 error
= BlockAllocate(hfsmp
, 1, oldBlockCount
, oldBlockCount
, alloc_flags
,
5422 &newStartBlock
, &newBlockCount
);
5423 if ((extent_info
->is_sysfile
== false) &&
5424 ((error
== dskFulErr
) || (error
== ENOSPC
))) {
5425 /* For non-system files, try reallocating space in metadata zone */
5426 alloc_flags
|= HFS_ALLOC_METAZONE
;
5427 error
= BlockAllocate(hfsmp
, 1, oldBlockCount
, oldBlockCount
,
5428 alloc_flags
, &newStartBlock
, &newBlockCount
);
5430 if ((error
== dskFulErr
) || (error
== ENOSPC
)) {
5431 /* We did not find desired contiguous space for this extent.
5432 * So try to allocate the maximum contiguous space available.
5434 alloc_flags
&= ~HFS_ALLOC_FORCECONTIG
;
5436 error
= BlockAllocate(hfsmp
, 1, oldBlockCount
, oldBlockCount
,
5437 alloc_flags
, &newStartBlock
, &newBlockCount
);
5439 printf ("hfs_reclaim_extent: fileID=%u start=%u, %u:(%u,%u) BlockAllocate error=%d\n", extent_info
->fileID
, extent_info
->recStartBlock
, index
, oldStartBlock
, oldBlockCount
, error
);
5442 blocks_allocated
= true;
5444 /* The number of blocks allocated is less than the requested
5445 * number of blocks. For btree extents, check and trim the
5446 * extent to be multiple of the node size.
5448 if (extent_info
->is_sysfile
) {
5449 node_size
= get_btree_nodesize(extent_info
->vp
);
5450 if (node_size
> hfsmp
->blockSize
) {
5451 remainder_blocks
= newBlockCount
% (node_size
/ hfsmp
->blockSize
);
5452 if (remainder_blocks
) {
5453 roundedBlockCount
= newBlockCount
- remainder_blocks
;
5454 /* Free tail-end blocks of the newly allocated extent */
5455 BlockDeallocate(hfsmp
, newStartBlock
+ roundedBlockCount
,
5456 newBlockCount
- roundedBlockCount
,
5457 HFS_ALLOC_SKIPFREEBLKS
);
5458 newBlockCount
= roundedBlockCount
;
5459 if (hfs_resize_debug
) {
5460 printf ("hfs_reclaim_extent: Fixing extent block count, node_blks=%u, old=%u, new=%u\n", node_size
/hfsmp
->blockSize
, newBlockCount
+ remainder_blocks
, newBlockCount
);
5462 if (newBlockCount
== 0) {
5463 printf ("hfs_reclaim_extent: Not enough contiguous blocks available to relocate fileID=%d\n", extent_info
->fileID
);
5471 /* The number of blocks allocated is less than the number of
5472 * blocks requested, so split this extent --- the first extent
5473 * will be relocated as part of this function call and the caller
5474 * will handle relocating the second extent by calling this
5475 * function again for the second extent.
5477 error
= hfs_split_extent(extent_info
, newBlockCount
);
5479 printf ("hfs_reclaim_extent: fileID=%u start=%u, %u:(%u,%u) split error=%d\n", extent_info
->fileID
, extent_info
->recStartBlock
, index
, oldStartBlock
, oldBlockCount
, error
);
5482 oldBlockCount
= newBlockCount
;
5485 printf ("hfs_reclaim_extent: fileID=%u start=%u, %u:(%u,%u) contig BlockAllocate error=%d\n", extent_info
->fileID
, extent_info
->recStartBlock
, index
, oldStartBlock
, oldBlockCount
, error
);
5488 blocks_allocated
= true;
5490 /* Copy data from old location to new location */
5491 error
= hfs_copy_extent(hfsmp
, extent_info
->vp
, oldStartBlock
,
5492 newStartBlock
, newBlockCount
, context
);
5494 printf ("hfs_reclaim_extent: fileID=%u start=%u, %u:(%u,%u)=>(%u,%u) hfs_copy_extent error=%d\n", extent_info
->fileID
, extent_info
->recStartBlock
, index
, oldStartBlock
, oldBlockCount
, newStartBlock
, newBlockCount
, error
);
5498 /* Update the extent record with the new start block information */
5499 extent_info
->extents
[index
].startBlock
= newStartBlock
;
5501 /* Sync the content back to the disk */
5502 if (extent_info
->catalog_fp
) {
5503 /* Update the extents in catalog record */
5504 if (extent_info
->is_dirlink
) {
5505 error
= cat_update_dirlink(hfsmp
, extent_info
->forkType
,
5506 extent_info
->dirlink_desc
, extent_info
->dirlink_attr
,
5507 &(extent_info
->dirlink_fork
->ff_data
));
5509 cp
->c_flag
|= C_MODIFIED
;
5510 /* If this is a system file, sync volume headers on disk */
5511 if (extent_info
->is_sysfile
) {
5512 error
= hfs_flushvolumeheader(hfsmp
, MNT_WAIT
, HFS_ALTFLUSH
);
5516 /* Replace record for extents overflow or extents-based xattrs */
5517 error
= BTReplaceRecord(extent_info
->fcb
, extent_info
->iterator
,
5518 &(extent_info
->btdata
), extent_info
->recordlen
);
5521 printf ("hfs_reclaim_extent: fileID=%u, update record error=%u\n", extent_info
->fileID
, error
);
5525 /* Deallocate the old extent */
5526 error
= BlockDeallocate(hfsmp
, oldStartBlock
, oldBlockCount
, HFS_ALLOC_SKIPFREEBLKS
);
5528 printf ("hfs_reclaim_extent: fileID=%u start=%u, %u:(%u,%u) BlockDeallocate error=%d\n", extent_info
->fileID
, extent_info
->recStartBlock
, index
, oldStartBlock
, oldBlockCount
, error
);
5531 extent_info
->blocks_relocated
+= newBlockCount
;
5533 if (hfs_resize_debug
) {
5534 printf ("hfs_reclaim_extent: Relocated record:%u %u:(%u,%u) to (%u,%u)\n", extent_info
->overflow_count
, index
, oldStartBlock
, oldBlockCount
, newStartBlock
, newBlockCount
);
5539 if (blocks_allocated
== true) {
5540 BlockDeallocate(hfsmp
, newStartBlock
, newBlockCount
, HFS_ALLOC_SKIPFREEBLKS
);
5543 /* On success, increment the total allocation blocks processed */
5544 extent_info
->cur_blockCount
+= newBlockCount
;
5547 hfs_systemfile_unlock(hfsmp
, extent_info
->lockflags
);
5549 /* For a non-system file, if an extent entry from catalog record
5550 * was modified, sync the in-memory changes to the catalog record
5551 * on disk before ending the transaction.
5553 if ((extent_info
->catalog_fp
) &&
5554 (extent_info
->is_sysfile
== false)) {
5555 (void) hfs_update(extent_info
->vp
, MNT_WAIT
);
5558 hfs_end_transaction(hfsmp
);
5563 /* Report intermediate progress during volume resize */
5565 hfs_truncatefs_progress(struct hfsmount
*hfsmp
)
5567 u_int32_t cur_progress
;
5569 hfs_resize_progress(hfsmp
, &cur_progress
);
5570 if (cur_progress
> (hfsmp
->hfs_resize_progress
+ 9)) {
5571 printf("hfs_truncatefs: %d%% done...\n", cur_progress
);
5572 hfsmp
->hfs_resize_progress
= cur_progress
;
5578 * Reclaim space at the end of a volume for given file and forktype.
5580 * This routine attempts to move any extent which contains allocation blocks
5581 * at or after "allocLimit." A separate transaction is used for every extent
5582 * that needs to be moved. If there is not contiguous space available for
5583 * moving an extent, it can be split into smaller extents. The contents of
5584 * any moved extents are read and written via the volume's device vnode --
5585 * NOT via "vp." During the move, moved blocks which are part of a transaction
5586 * have their physical block numbers invalidated so they will eventually be
5587 * written to their new locations.
5589 * This function is also called for directory hard links. Directory hard links
5590 * are regular files with no data fork and resource fork that contains alias
5591 * information for backward compatibility with pre-Leopard systems. However
5592 * non-Mac OS X implementation can add/modify data fork or resource fork
5593 * information to directory hard links, so we check, and if required, relocate
5594 * both data fork and resource fork.
5597 * hfsmp The volume being resized.
5598 * vp The vnode for the system file.
5599 * fileID ID of the catalog record that needs to be relocated
5600 * forktype The type of fork that needs relocated,
5601 * kHFSResourceForkType for resource fork,
5602 * kHFSDataForkType for data fork
5603 * allocLimit Allocation limit for the new volume size,
5604 * do not use this block or beyond. All extents
5605 * that use this block or any blocks beyond this limit
5606 * will be relocated.
5609 * hfsmp->hfs_resize_blocksmoved is incremented by the number of allocation
5610 * blocks that were relocated.
5613 hfs_reclaim_file(struct hfsmount
*hfsmp
, struct vnode
*vp
, u_int32_t fileID
,
5614 u_int8_t forktype
, u_long allocLimit
, vfs_context_t context
)
5617 struct hfs_reclaim_extent_info
*extent_info
;
5621 struct filefork
*fp
;
5622 int took_truncate_lock
= false;
5623 int release_desc
= false;
5624 HFSPlusExtentKey
*key
;
5626 /* If there is no vnode for this file, then there's nothing to do. */
5633 MALLOC(extent_info
, struct hfs_reclaim_extent_info
*,
5634 sizeof(struct hfs_reclaim_extent_info
), M_TEMP
, M_WAITOK
);
5635 if (extent_info
== NULL
) {
5638 bzero(extent_info
, sizeof(struct hfs_reclaim_extent_info
));
5639 extent_info
->vp
= vp
;
5640 extent_info
->fileID
= fileID
;
5641 extent_info
->forkType
= forktype
;
5642 extent_info
->is_sysfile
= vnode_issystem(vp
);
5643 if (vnode_isdir(vp
) && (cp
->c_flag
& C_HARDLINK
)) {
5644 extent_info
->is_dirlink
= true;
5646 /* We always need allocation bitmap and extent btree lock */
5647 lockflags
= SFL_BITMAP
| SFL_EXTENTS
;
5648 if ((fileID
== kHFSCatalogFileID
) || (extent_info
->is_dirlink
== true)) {
5649 lockflags
|= SFL_CATALOG
;
5650 } else if (fileID
== kHFSAttributesFileID
) {
5651 lockflags
|= SFL_ATTRIBUTE
;
5652 } else if (fileID
== kHFSStartupFileID
) {
5653 lockflags
|= SFL_STARTUP
;
5655 extent_info
->lockflags
= lockflags
;
5656 extent_info
->fcb
= VTOF(hfsmp
->hfs_extents_vp
);
5658 /* Flush data associated with current file on disk.
5660 * If the current vnode is directory hard link, no flushing of
5661 * journal or vnode is required. The current kernel does not
5662 * modify data/resource fork of directory hard links, so nothing
5663 * will be in the cache. If a directory hard link is newly created,
5664 * the resource fork data is written directly using devvp and
5665 * the code that actually relocates data (hfs_copy_extent()) also
5666 * uses devvp for its I/O --- so they will see a consistent copy.
5668 if (extent_info
->is_sysfile
) {
5669 /* If the current vnode is system vnode, flush journal
5670 * to make sure that all data is written to the disk.
5672 error
= hfs_journal_flush(hfsmp
, TRUE
);
5674 printf ("hfs_reclaim_file: journal_flush returned %d\n", error
);
5677 } else if (extent_info
->is_dirlink
== false) {
5678 /* Flush all blocks associated with this regular file vnode.
5679 * Normally there should not be buffer cache blocks for regular
5680 * files, but for objects like symlinks, we can have buffer cache
5681 * blocks associated with the vnode. Therefore we call
5682 * buf_flushdirtyblks() also.
5684 buf_flushdirtyblks(vp
, 0, BUF_SKIP_LOCKED
, "hfs_reclaim_file");
5687 hfs_lock_truncate(cp
, HFS_EXCLUSIVE_LOCK
);
5688 took_truncate_lock
= true;
5689 (void) cluster_push(vp
, 0);
5690 error
= hfs_lock(cp
, HFS_FORCE_LOCK
);
5695 /* If the file no longer exists, nothing left to do */
5696 if (cp
->c_flag
& C_NOEXISTS
) {
5701 /* Wait for any in-progress writes to this vnode to complete, so that we'll
5702 * be copying consistent bits. (Otherwise, it's possible that an async
5703 * write will complete to the old extent after we read from it. That
5704 * could lead to corruption.)
5706 error
= vnode_waitforwrites(vp
, 0, 0, 0, "hfs_reclaim_file");
5712 if (hfs_resize_debug
) {
5713 printf("hfs_reclaim_file: === Start reclaiming %sfork for %sid=%u ===\n", (forktype
? "rsrc" : "data"), (extent_info
->is_dirlink
? "dirlink" : "file"), fileID
);
5716 if (extent_info
->is_dirlink
) {
5717 MALLOC(extent_info
->dirlink_desc
, struct cat_desc
*,
5718 sizeof(struct cat_desc
), M_TEMP
, M_WAITOK
);
5719 MALLOC(extent_info
->dirlink_attr
, struct cat_attr
*,
5720 sizeof(struct cat_attr
), M_TEMP
, M_WAITOK
);
5721 MALLOC(extent_info
->dirlink_fork
, struct filefork
*,
5722 sizeof(struct filefork
), M_TEMP
, M_WAITOK
);
5723 if ((extent_info
->dirlink_desc
== NULL
) ||
5724 (extent_info
->dirlink_attr
== NULL
) ||
5725 (extent_info
->dirlink_fork
== NULL
)) {
5730 /* Lookup catalog record for directory hard link and
5731 * create a fake filefork for the value looked up from
5734 fp
= extent_info
->dirlink_fork
;
5735 bzero(extent_info
->dirlink_fork
, sizeof(struct filefork
));
5736 extent_info
->dirlink_fork
->ff_cp
= cp
;
5737 lockflags
= hfs_systemfile_lock(hfsmp
, lockflags
, HFS_EXCLUSIVE_LOCK
);
5738 error
= cat_lookup_dirlink(hfsmp
, fileID
, forktype
,
5739 extent_info
->dirlink_desc
, extent_info
->dirlink_attr
,
5740 &(extent_info
->dirlink_fork
->ff_data
));
5741 hfs_systemfile_unlock(hfsmp
, lockflags
);
5743 printf ("hfs_reclaim_file: cat_lookup_dirlink for fileID=%u returned error=%u\n", fileID
, error
);
5746 release_desc
= true;
5751 extent_info
->catalog_fp
= fp
;
5752 extent_info
->recStartBlock
= 0;
5753 extent_info
->extents
= extent_info
->catalog_fp
->ff_extents
;
5754 /* Relocate extents from the catalog record */
5755 for (i
= 0; i
< kHFSPlusExtentDensity
; ++i
) {
5756 if (fp
->ff_extents
[i
].blockCount
== 0) {
5759 extent_info
->extent_index
= i
;
5760 error
= hfs_reclaim_extent(hfsmp
, allocLimit
, extent_info
, context
);
5762 printf ("hfs_reclaim_file: fileID=%u #%d %u:(%u,%u) hfs_reclaim_extent error=%d\n", fileID
, extent_info
->overflow_count
, i
, fp
->ff_extents
[i
].startBlock
, fp
->ff_extents
[i
].blockCount
, error
);
5767 /* If the number of allocation blocks processed for reclaiming
5768 * are less than total number of blocks for the file, continuing
5769 * working on overflow extents record.
5771 if (fp
->ff_blocks
<= extent_info
->cur_blockCount
) {
5772 if (0 && hfs_resize_debug
) {
5773 printf ("hfs_reclaim_file: Nothing more to relocate, offset=%d, ff_blocks=%u, cur_blockCount=%u\n", i
, fp
->ff_blocks
, extent_info
->cur_blockCount
);
5778 if (hfs_resize_debug
) {
5779 printf ("hfs_reclaim_file: Will check overflow records, offset=%d, ff_blocks=%u, cur_blockCount=%u\n", i
, fp
->ff_blocks
, extent_info
->cur_blockCount
);
5782 MALLOC(extent_info
->iterator
, struct BTreeIterator
*, sizeof(struct BTreeIterator
), M_TEMP
, M_WAITOK
);
5783 if (extent_info
->iterator
== NULL
) {
5787 bzero(extent_info
->iterator
, sizeof(struct BTreeIterator
));
5788 key
= (HFSPlusExtentKey
*) &(extent_info
->iterator
->key
);
5789 key
->keyLength
= kHFSPlusExtentKeyMaximumLength
;
5790 key
->forkType
= forktype
;
5791 key
->fileID
= fileID
;
5792 key
->startBlock
= extent_info
->cur_blockCount
;
5794 extent_info
->btdata
.bufferAddress
= extent_info
->record
.overflow
;
5795 extent_info
->btdata
.itemSize
= sizeof(HFSPlusExtentRecord
);
5796 extent_info
->btdata
.itemCount
= 1;
5798 extent_info
->catalog_fp
= NULL
;
5800 /* Search the first overflow extent with expected startBlock as 'cur_blockCount' */
5801 lockflags
= hfs_systemfile_lock(hfsmp
, lockflags
, HFS_EXCLUSIVE_LOCK
);
5802 error
= BTSearchRecord(extent_info
->fcb
, extent_info
->iterator
,
5803 &(extent_info
->btdata
), &(extent_info
->recordlen
),
5804 extent_info
->iterator
);
5805 hfs_systemfile_unlock(hfsmp
, lockflags
);
5806 while (error
== 0) {
5807 extent_info
->overflow_count
++;
5808 extent_info
->recStartBlock
= key
->startBlock
;
5809 extent_info
->extents
= extent_info
->record
.overflow
;
5810 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
5811 if (extent_info
->record
.overflow
[i
].blockCount
== 0) {
5814 extent_info
->extent_index
= i
;
5815 error
= hfs_reclaim_extent(hfsmp
, allocLimit
, extent_info
, context
);
5817 printf ("hfs_reclaim_file: fileID=%u #%d %u:(%u,%u) hfs_reclaim_extent error=%d\n", fileID
, extent_info
->overflow_count
, i
, extent_info
->record
.overflow
[i
].startBlock
, extent_info
->record
.overflow
[i
].blockCount
, error
);
5822 /* Look for more overflow records */
5823 lockflags
= hfs_systemfile_lock(hfsmp
, lockflags
, HFS_EXCLUSIVE_LOCK
);
5824 error
= BTIterateRecord(extent_info
->fcb
, kBTreeNextRecord
,
5825 extent_info
->iterator
, &(extent_info
->btdata
),
5826 &(extent_info
->recordlen
));
5827 hfs_systemfile_unlock(hfsmp
, lockflags
);
5831 /* Stop when we encounter a different file or fork. */
5832 if ((key
->fileID
!= fileID
) || (key
->forkType
!= forktype
)) {
5836 if (error
== fsBTRecordNotFoundErr
|| error
== fsBTEndOfIterationErr
) {
5841 /* If any blocks were relocated, account them and report progress */
5842 if (extent_info
->blocks_relocated
) {
5843 hfsmp
->hfs_resize_blocksmoved
+= extent_info
->blocks_relocated
;
5844 hfs_truncatefs_progress(hfsmp
);
5845 if (fileID
< kHFSFirstUserCatalogNodeID
) {
5846 printf ("hfs_reclaim_file: Relocated %u blocks from fileID=%u on \"%s\"\n",
5847 extent_info
->blocks_relocated
, fileID
, hfsmp
->vcbVN
);
5850 if (extent_info
->iterator
) {
5851 FREE(extent_info
->iterator
, M_TEMP
);
5853 if (release_desc
== true) {
5854 cat_releasedesc(extent_info
->dirlink_desc
);
5856 if (extent_info
->dirlink_desc
) {
5857 FREE(extent_info
->dirlink_desc
, M_TEMP
);
5859 if (extent_info
->dirlink_attr
) {
5860 FREE(extent_info
->dirlink_attr
, M_TEMP
);
5862 if (extent_info
->dirlink_fork
) {
5863 FREE(extent_info
->dirlink_fork
, M_TEMP
);
5865 if ((extent_info
->blocks_relocated
!= 0) && (extent_info
->is_sysfile
== false)) {
5866 (void) hfs_update(vp
, MNT_WAIT
);
5868 if (took_truncate_lock
) {
5869 hfs_unlock_truncate(cp
, 0);
5872 FREE(extent_info
, M_TEMP
);
5874 if (hfs_resize_debug
) {
5875 printf("hfs_reclaim_file: === Finished relocating %sfork for fileid=%u (error=%d) ===\n", (forktype
? "rsrc" : "data"), fileID
, error
);
5883 * This journal_relocate callback updates the journal info block to point
5884 * at the new journal location. This write must NOT be done using the
5885 * transaction. We must write the block immediately. We must also force
5886 * it to get to the media so that the new journal location will be seen by
5887 * the replay code before we can safely let journaled blocks be written
5888 * to their normal locations.
5890 * The tests for journal_uses_fua below are mildly hacky. Since the journal
5891 * and the file system are both on the same device, I'm leveraging what
5892 * the journal has decided about FUA.
5894 struct hfs_journal_relocate_args
{
5895 struct hfsmount
*hfsmp
;
5896 vfs_context_t context
;
5897 u_int32_t newStartBlock
;
5901 hfs_journal_relocate_callback(void *_args
)
5904 struct hfs_journal_relocate_args
*args
= _args
;
5905 struct hfsmount
*hfsmp
= args
->hfsmp
;
5907 JournalInfoBlock
*jibp
;
5909 error
= buf_meta_bread(hfsmp
->hfs_devvp
,
5910 hfsmp
->vcbJinfoBlock
* (hfsmp
->blockSize
/hfsmp
->hfs_logical_block_size
),
5911 hfsmp
->blockSize
, vfs_context_ucred(args
->context
), &bp
);
5913 printf("hfs_reclaim_journal_file: failed to read JIB (%d)\n", error
);
5919 jibp
= (JournalInfoBlock
*) buf_dataptr(bp
);
5920 jibp
->offset
= SWAP_BE64((u_int64_t
)args
->newStartBlock
* hfsmp
->blockSize
);
5921 jibp
->size
= SWAP_BE64(hfsmp
->jnl_size
);
5922 if (journal_uses_fua(hfsmp
->jnl
))
5924 error
= buf_bwrite(bp
);
5926 printf("hfs_reclaim_journal_file: failed to write JIB (%d)\n", error
);
5929 if (!journal_uses_fua(hfsmp
->jnl
)) {
5930 error
= VNOP_IOCTL(hfsmp
->hfs_devvp
, DKIOCSYNCHRONIZECACHE
, NULL
, FWRITE
, args
->context
);
5932 printf("hfs_reclaim_journal_file: DKIOCSYNCHRONIZECACHE failed (%d)\n", error
);
5933 error
= 0; /* Don't fail the operation. */
5942 hfs_reclaim_journal_file(struct hfsmount
*hfsmp
, u_int32_t allocLimit
, vfs_context_t context
)
5947 u_int32_t oldStartBlock
;
5948 u_int32_t newStartBlock
;
5949 u_int32_t oldBlockCount
;
5950 u_int32_t newBlockCount
;
5951 struct cat_desc journal_desc
;
5952 struct cat_attr journal_attr
;
5953 struct cat_fork journal_fork
;
5954 struct hfs_journal_relocate_args callback_args
;
5956 if (hfsmp
->jnl_start
+ (hfsmp
->jnl_size
/ hfsmp
->blockSize
) <= allocLimit
) {
5957 /* The journal does not require relocation */
5961 error
= hfs_start_transaction(hfsmp
);
5963 printf("hfs_reclaim_journal_file: hfs_start_transaction returned %d\n", error
);
5966 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
| SFL_BITMAP
, HFS_EXCLUSIVE_LOCK
);
5968 oldBlockCount
= hfsmp
->jnl_size
/ hfsmp
->blockSize
;
5970 /* TODO: Allow the journal to change size based on the new volume size. */
5971 error
= BlockAllocate(hfsmp
, 1, oldBlockCount
, oldBlockCount
,
5972 HFS_ALLOC_METAZONE
| HFS_ALLOC_FORCECONTIG
| HFS_ALLOC_SKIPFREEBLKS
,
5973 &newStartBlock
, &newBlockCount
);
5975 printf("hfs_reclaim_journal_file: BlockAllocate returned %d\n", error
);
5978 if (newBlockCount
!= oldBlockCount
) {
5979 printf("hfs_reclaim_journal_file: newBlockCount != oldBlockCount (%u, %u)\n", newBlockCount
, oldBlockCount
);
5983 error
= BlockDeallocate(hfsmp
, hfsmp
->jnl_start
, oldBlockCount
, HFS_ALLOC_SKIPFREEBLKS
);
5985 printf("hfs_reclaim_journal_file: BlockDeallocate returned %d\n", error
);
5989 /* Update the catalog record for .journal */
5990 error
= cat_idlookup(hfsmp
, hfsmp
->hfs_jnlfileid
, 1, &journal_desc
, &journal_attr
, &journal_fork
);
5992 printf("hfs_reclaim_journal_file: cat_idlookup returned %d\n", error
);
5995 oldStartBlock
= journal_fork
.cf_extents
[0].startBlock
;
5996 journal_fork
.cf_size
= newBlockCount
* hfsmp
->blockSize
;
5997 journal_fork
.cf_extents
[0].startBlock
= newStartBlock
;
5998 journal_fork
.cf_extents
[0].blockCount
= newBlockCount
;
5999 journal_fork
.cf_blocks
= newBlockCount
;
6000 error
= cat_update(hfsmp
, &journal_desc
, &journal_attr
, &journal_fork
, NULL
);
6001 cat_releasedesc(&journal_desc
); /* all done with cat descriptor */
6003 printf("hfs_reclaim_journal_file: cat_update returned %d\n", error
);
6006 callback_args
.hfsmp
= hfsmp
;
6007 callback_args
.context
= context
;
6008 callback_args
.newStartBlock
= newStartBlock
;
6010 error
= journal_relocate(hfsmp
->jnl
, (off_t
)newStartBlock
*hfsmp
->blockSize
,
6011 (off_t
)newBlockCount
*hfsmp
->blockSize
, 0,
6012 hfs_journal_relocate_callback
, &callback_args
);
6014 /* NOTE: journal_relocate will mark the journal invalid. */
6015 printf("hfs_reclaim_journal_file: journal_relocate returned %d\n", error
);
6018 hfsmp
->jnl_start
= newStartBlock
;
6019 hfsmp
->jnl_size
= (off_t
)newBlockCount
* hfsmp
->blockSize
;
6021 hfs_systemfile_unlock(hfsmp
, lockflags
);
6022 error
= hfs_end_transaction(hfsmp
);
6024 printf("hfs_reclaim_journal_file: hfs_end_transaction returned %d\n", error
);
6027 /* Account for the blocks relocated and print progress */
6028 hfsmp
->hfs_resize_blocksmoved
+= oldBlockCount
;
6029 hfs_truncatefs_progress(hfsmp
);
6031 printf ("hfs_reclaim_journal_file: Relocated %u blocks from journal on \"%s\"\n",
6032 oldBlockCount
, hfsmp
->vcbVN
);
6033 if (hfs_resize_debug
) {
6034 printf ("hfs_reclaim_journal_file: Successfully relocated journal from (%u,%u) to (%u,%u)\n", oldStartBlock
, oldBlockCount
, newStartBlock
, newBlockCount
);
6040 journal_err
= BlockDeallocate(hfsmp
, newStartBlock
, newBlockCount
, HFS_ALLOC_SKIPFREEBLKS
);
6042 printf("hfs_reclaim_journal_file: BlockDeallocate returned %d\n", error
);
6043 hfs_mark_volume_inconsistent(hfsmp
);
6046 hfs_systemfile_unlock(hfsmp
, lockflags
);
6047 (void) hfs_end_transaction(hfsmp
);
6048 if (hfs_resize_debug
) {
6049 printf ("hfs_reclaim_journal_file: Error relocating journal file (error=%d)\n", error
);
6056 * Move the journal info block to a new location. We have to make sure the
6057 * new copy of the journal info block gets to the media first, then change
6058 * the field in the volume header and the catalog record.
6061 hfs_reclaim_journal_info_block(struct hfsmount
*hfsmp
, u_int32_t allocLimit
, vfs_context_t context
)
6068 u_int32_t blockCount
;
6069 struct cat_desc jib_desc
;
6070 struct cat_attr jib_attr
;
6071 struct cat_fork jib_fork
;
6072 buf_t old_bp
, new_bp
;
6074 if (hfsmp
->vcbJinfoBlock
<= allocLimit
) {
6075 /* The journal info block does not require relocation */
6079 error
= hfs_start_transaction(hfsmp
);
6081 printf("hfs_reclaim_journal_info_block: hfs_start_transaction returned %d\n", error
);
6084 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
| SFL_BITMAP
, HFS_EXCLUSIVE_LOCK
);
6086 error
= BlockAllocate(hfsmp
, 1, 1, 1,
6087 HFS_ALLOC_METAZONE
| HFS_ALLOC_FORCECONTIG
| HFS_ALLOC_SKIPFREEBLKS
,
6088 &newBlock
, &blockCount
);
6090 printf("hfs_reclaim_journal_info_block: BlockAllocate returned %d\n", error
);
6093 if (blockCount
!= 1) {
6094 printf("hfs_reclaim_journal_info_block: blockCount != 1 (%u)\n", blockCount
);
6097 error
= BlockDeallocate(hfsmp
, hfsmp
->vcbJinfoBlock
, 1, HFS_ALLOC_SKIPFREEBLKS
);
6099 printf("hfs_reclaim_journal_info_block: BlockDeallocate returned %d\n", error
);
6103 /* Copy the old journal info block content to the new location */
6104 error
= buf_meta_bread(hfsmp
->hfs_devvp
,
6105 hfsmp
->vcbJinfoBlock
* (hfsmp
->blockSize
/hfsmp
->hfs_logical_block_size
),
6106 hfsmp
->blockSize
, vfs_context_ucred(context
), &old_bp
);
6108 printf("hfs_reclaim_journal_info_block: failed to read JIB (%d)\n", error
);
6114 new_bp
= buf_getblk(hfsmp
->hfs_devvp
,
6115 newBlock
* (hfsmp
->blockSize
/hfsmp
->hfs_logical_block_size
),
6116 hfsmp
->blockSize
, 0, 0, BLK_META
);
6117 bcopy((char*)buf_dataptr(old_bp
), (char*)buf_dataptr(new_bp
), hfsmp
->blockSize
);
6119 if (journal_uses_fua(hfsmp
->jnl
))
6120 buf_markfua(new_bp
);
6121 error
= buf_bwrite(new_bp
);
6123 printf("hfs_reclaim_journal_info_block: failed to write new JIB (%d)\n", error
);
6126 if (!journal_uses_fua(hfsmp
->jnl
)) {
6127 error
= VNOP_IOCTL(hfsmp
->hfs_devvp
, DKIOCSYNCHRONIZECACHE
, NULL
, FWRITE
, context
);
6129 printf("hfs_reclaim_journal_info_block: DKIOCSYNCHRONIZECACHE failed (%d)\n", error
);
6130 /* Don't fail the operation. */
6134 /* Update the catalog record for .journal_info_block */
6135 error
= cat_idlookup(hfsmp
, hfsmp
->hfs_jnlinfoblkid
, 1, &jib_desc
, &jib_attr
, &jib_fork
);
6137 printf("hfs_reclaim_journal_file: cat_idlookup returned %d\n", error
);
6140 oldBlock
= jib_fork
.cf_extents
[0].startBlock
;
6141 jib_fork
.cf_size
= hfsmp
->blockSize
;
6142 jib_fork
.cf_extents
[0].startBlock
= newBlock
;
6143 jib_fork
.cf_extents
[0].blockCount
= 1;
6144 jib_fork
.cf_blocks
= 1;
6145 error
= cat_update(hfsmp
, &jib_desc
, &jib_attr
, &jib_fork
, NULL
);
6146 cat_releasedesc(&jib_desc
); /* all done with cat descriptor */
6148 printf("hfs_reclaim_journal_info_block: cat_update returned %d\n", error
);
6152 /* Update the pointer to the journal info block in the volume header. */
6153 hfsmp
->vcbJinfoBlock
= newBlock
;
6154 error
= hfs_flushvolumeheader(hfsmp
, MNT_WAIT
, HFS_ALTFLUSH
);
6156 printf("hfs_reclaim_journal_info_block: hfs_flushvolumeheader returned %d\n", error
);
6159 hfs_systemfile_unlock(hfsmp
, lockflags
);
6160 error
= hfs_end_transaction(hfsmp
);
6162 printf("hfs_reclaim_journal_info_block: hfs_end_transaction returned %d\n", error
);
6164 error
= hfs_journal_flush(hfsmp
, FALSE
);
6166 printf("hfs_reclaim_journal_info_block: journal_flush returned %d\n", error
);
6169 /* Account for the block relocated and print progress */
6170 hfsmp
->hfs_resize_blocksmoved
+= 1;
6171 hfs_truncatefs_progress(hfsmp
);
6173 printf ("hfs_reclaim_journal_info: Relocated 1 block from journal info on \"%s\"\n",
6175 if (hfs_resize_debug
) {
6176 printf ("hfs_reclaim_journal_info_block: Successfully relocated journal info block from (%u,%u) to (%u,%u)\n", oldBlock
, blockCount
, newBlock
, blockCount
);
6182 journal_err
= BlockDeallocate(hfsmp
, newBlock
, blockCount
, HFS_ALLOC_SKIPFREEBLKS
);
6184 printf("hfs_reclaim_journal_info_block: BlockDeallocate returned %d\n", error
);
6185 hfs_mark_volume_inconsistent(hfsmp
);
6189 hfs_systemfile_unlock(hfsmp
, lockflags
);
6190 (void) hfs_end_transaction(hfsmp
);
6191 if (hfs_resize_debug
) {
6192 printf ("hfs_reclaim_journal_info_block: Error relocating journal info block (error=%d)\n", error
);
6199 * This function traverses through all extended attribute records for a given
6200 * fileID, and calls function that reclaims data blocks that exist in the
6201 * area of the disk being reclaimed which in turn is responsible for allocating
6202 * new space, copying extent data, deallocating new space, and if required,
6203 * splitting the extent.
6205 * Note: The caller has already acquired the cnode lock on the file. Therefore
6206 * we are assured that no other thread would be creating/deleting/modifying
6207 * extended attributes for this file.
6210 * hfsmp->hfs_resize_blocksmoved is incremented by the number of allocation
6211 * blocks that were relocated.
6214 * 0 on success, non-zero on failure.
6217 hfs_reclaim_xattr(struct hfsmount
*hfsmp
, struct vnode
*vp
, u_int32_t fileID
, u_int32_t allocLimit
, vfs_context_t context
)
6220 struct hfs_reclaim_extent_info
*extent_info
;
6222 HFSPlusAttrKey
*key
;
6225 if (hfs_resize_debug
) {
6226 printf("hfs_reclaim_xattr: === Start reclaiming xattr for id=%u ===\n", fileID
);
6229 MALLOC(extent_info
, struct hfs_reclaim_extent_info
*,
6230 sizeof(struct hfs_reclaim_extent_info
), M_TEMP
, M_WAITOK
);
6231 if (extent_info
== NULL
) {
6234 bzero(extent_info
, sizeof(struct hfs_reclaim_extent_info
));
6235 extent_info
->vp
= vp
;
6236 extent_info
->fileID
= fileID
;
6237 extent_info
->is_xattr
= true;
6238 extent_info
->is_sysfile
= vnode_issystem(vp
);
6239 extent_info
->fcb
= VTOF(hfsmp
->hfs_attribute_vp
);
6240 lockflags
= &(extent_info
->lockflags
);
6241 *lockflags
= SFL_ATTRIBUTE
| SFL_BITMAP
;
6243 /* Initialize iterator from the extent_info structure */
6244 MALLOC(extent_info
->iterator
, struct BTreeIterator
*,
6245 sizeof(struct BTreeIterator
), M_TEMP
, M_WAITOK
);
6246 if (extent_info
->iterator
== NULL
) {
6250 bzero(extent_info
->iterator
, sizeof(struct BTreeIterator
));
6252 /* Build attribute key */
6253 key
= (HFSPlusAttrKey
*)&(extent_info
->iterator
->key
);
6254 error
= hfs_buildattrkey(fileID
, NULL
, key
);
6259 /* Initialize btdata from extent_info structure. Note that the
6260 * buffer pointer actually points to the xattr record from the
6261 * extent_info structure itself.
6263 extent_info
->btdata
.bufferAddress
= &(extent_info
->record
.xattr
);
6264 extent_info
->btdata
.itemSize
= sizeof(HFSPlusAttrRecord
);
6265 extent_info
->btdata
.itemCount
= 1;
6268 * Sync all extent-based attribute data to the disk.
6270 * All extent-based attribute data I/O is performed via cluster
6271 * I/O using a virtual file that spans across entire file system
6274 hfs_lock_truncate(VTOC(hfsmp
->hfs_attrdata_vp
), HFS_EXCLUSIVE_LOCK
);
6275 (void)cluster_push(hfsmp
->hfs_attrdata_vp
, 0);
6276 error
= vnode_waitforwrites(hfsmp
->hfs_attrdata_vp
, 0, 0, 0, "hfs_reclaim_xattr");
6277 hfs_unlock_truncate(VTOC(hfsmp
->hfs_attrdata_vp
), 0);
6282 /* Search for extended attribute for current file. This
6283 * will place the iterator before the first matching record.
6285 *lockflags
= hfs_systemfile_lock(hfsmp
, *lockflags
, HFS_EXCLUSIVE_LOCK
);
6286 error
= BTSearchRecord(extent_info
->fcb
, extent_info
->iterator
,
6287 &(extent_info
->btdata
), &(extent_info
->recordlen
),
6288 extent_info
->iterator
);
6289 hfs_systemfile_unlock(hfsmp
, *lockflags
);
6291 if (error
!= btNotFound
) {
6294 /* btNotFound is expected here, so just mask it */
6299 /* Iterate to the next record */
6300 *lockflags
= hfs_systemfile_lock(hfsmp
, *lockflags
, HFS_EXCLUSIVE_LOCK
);
6301 error
= BTIterateRecord(extent_info
->fcb
, kBTreeNextRecord
,
6302 extent_info
->iterator
, &(extent_info
->btdata
),
6303 &(extent_info
->recordlen
));
6304 hfs_systemfile_unlock(hfsmp
, *lockflags
);
6306 /* Stop the iteration if we encounter end of btree or xattr with different fileID */
6307 if (error
|| key
->fileID
!= fileID
) {
6308 if (error
== fsBTRecordNotFoundErr
|| error
== fsBTEndOfIterationErr
) {
6314 /* We only care about extent-based EAs */
6315 if ((extent_info
->record
.xattr
.recordType
!= kHFSPlusAttrForkData
) &&
6316 (extent_info
->record
.xattr
.recordType
!= kHFSPlusAttrExtents
)) {
6320 if (extent_info
->record
.xattr
.recordType
== kHFSPlusAttrForkData
) {
6321 extent_info
->overflow_count
= 0;
6322 extent_info
->extents
= extent_info
->record
.xattr
.forkData
.theFork
.extents
;
6323 } else if (extent_info
->record
.xattr
.recordType
== kHFSPlusAttrExtents
) {
6324 extent_info
->overflow_count
++;
6325 extent_info
->extents
= extent_info
->record
.xattr
.overflowExtents
.extents
;
6328 extent_info
->recStartBlock
= key
->startBlock
;
6329 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
6330 if (extent_info
->extents
[i
].blockCount
== 0) {
6333 extent_info
->extent_index
= i
;
6334 error
= hfs_reclaim_extent(hfsmp
, allocLimit
, extent_info
, context
);
6336 printf ("hfs_reclaim_xattr: fileID=%u hfs_reclaim_extent error=%d\n", fileID
, error
);
6343 /* If any blocks were relocated, account them and report progress */
6344 if (extent_info
->blocks_relocated
) {
6345 hfsmp
->hfs_resize_blocksmoved
+= extent_info
->blocks_relocated
;
6346 hfs_truncatefs_progress(hfsmp
);
6348 if (extent_info
->iterator
) {
6349 FREE(extent_info
->iterator
, M_TEMP
);
6352 FREE(extent_info
, M_TEMP
);
6354 if (hfs_resize_debug
) {
6355 printf("hfs_reclaim_xattr: === Finished relocating xattr for fileid=%u (error=%d) ===\n", fileID
, error
);
6361 * Reclaim any extent-based extended attributes allocation blocks from
6362 * the area of the disk that is being truncated.
6364 * The function traverses the attribute btree to find out the fileIDs
6365 * of the extended attributes that need to be relocated. For every
6366 * file whose large EA requires relocation, it looks up the cnode and
6367 * calls hfs_reclaim_xattr() to do all the work for allocating
6368 * new space, copying data, deallocating old space, and if required,
6369 * splitting the extents.
6372 * allocLimit - starting block of the area being reclaimed
6375 * returns 0 on success, non-zero on failure.
6378 hfs_reclaim_xattrspace(struct hfsmount
*hfsmp
, u_int32_t allocLimit
, vfs_context_t context
)
6382 struct BTreeIterator
*iterator
= NULL
;
6383 struct FSBufferDescriptor btdata
;
6384 HFSPlusAttrKey
*key
;
6385 HFSPlusAttrRecord rec
;
6387 cnid_t prev_fileid
= 0;
6390 int btree_operation
;
6391 u_int32_t files_moved
= 0;
6392 u_int32_t prev_blocksmoved
;
6395 fcb
= VTOF(hfsmp
->hfs_attribute_vp
);
6396 /* Store the value to print total blocks moved by this function in end */
6397 prev_blocksmoved
= hfsmp
->hfs_resize_blocksmoved
;
6399 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&iterator
, sizeof(*iterator
))) {
6402 bzero(iterator
, sizeof(*iterator
));
6403 key
= (HFSPlusAttrKey
*)&iterator
->key
;
6404 btdata
.bufferAddress
= &rec
;
6405 btdata
.itemSize
= sizeof(rec
);
6406 btdata
.itemCount
= 1;
6408 need_relocate
= false;
6409 btree_operation
= kBTreeFirstRecord
;
6410 /* Traverse the attribute btree to find extent-based EAs to reclaim */
6412 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_ATTRIBUTE
, HFS_SHARED_LOCK
);
6413 error
= BTIterateRecord(fcb
, btree_operation
, iterator
, &btdata
, NULL
);
6414 hfs_systemfile_unlock(hfsmp
, lockflags
);
6416 if (error
== fsBTRecordNotFoundErr
|| error
== fsBTEndOfIterationErr
) {
6421 btree_operation
= kBTreeNextRecord
;
6423 /* If the extents of current fileID were already relocated, skip it */
6424 if (prev_fileid
== key
->fileID
) {
6428 /* Check if any of the extents in the current record need to be relocated */
6429 need_relocate
= false;
6430 switch(rec
.recordType
) {
6431 case kHFSPlusAttrForkData
:
6432 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
6433 if (rec
.forkData
.theFork
.extents
[i
].blockCount
== 0) {
6436 if ((rec
.forkData
.theFork
.extents
[i
].startBlock
+
6437 rec
.forkData
.theFork
.extents
[i
].blockCount
) > allocLimit
) {
6438 need_relocate
= true;
6444 case kHFSPlusAttrExtents
:
6445 for (i
= 0; i
< kHFSPlusExtentDensity
; i
++) {
6446 if (rec
.overflowExtents
.extents
[i
].blockCount
== 0) {
6449 if ((rec
.overflowExtents
.extents
[i
].startBlock
+
6450 rec
.overflowExtents
.extents
[i
].blockCount
) > allocLimit
) {
6451 need_relocate
= true;
6458 /* Continue iterating to next attribute record */
6459 if (need_relocate
== false) {
6463 /* Look up the vnode for corresponding file. The cnode
6464 * will be locked which will ensure that no one modifies
6465 * the xattrs when we are relocating them.
6467 * We want to allow open-unlinked files to be moved,
6468 * so provide allow_deleted == 1 for hfs_vget().
6470 if (hfs_vget(hfsmp
, key
->fileID
, &vp
, 0, 1) != 0) {
6474 error
= hfs_reclaim_xattr(hfsmp
, vp
, key
->fileID
, allocLimit
, context
);
6475 hfs_unlock(VTOC(vp
));
6478 printf ("hfs_reclaim_xattrspace: Error relocating xattrs for fileid=%u (error=%d)\n", key
->fileID
, error
);
6481 prev_fileid
= key
->fileID
;
6486 printf("hfs_reclaim_xattrspace: Relocated %u xattr blocks from %u files on \"%s\"\n",
6487 (hfsmp
->hfs_resize_blocksmoved
- prev_blocksmoved
),
6488 files_moved
, hfsmp
->vcbVN
);
6491 kmem_free(kernel_map
, (vm_offset_t
)iterator
, sizeof(*iterator
));
6496 * Reclaim blocks from regular files.
6498 * This function iterates over all the record in catalog btree looking
6499 * for files with extents that overlap into the space we're trying to
6500 * free up. If a file extent requires relocation, it looks up the vnode
6501 * and calls function to relocate the data.
6504 * Zero on success, non-zero on failure.
6507 hfs_reclaim_filespace(struct hfsmount
*hfsmp
, u_int32_t allocLimit
, vfs_context_t context
)
6511 struct BTreeIterator
*iterator
= NULL
;
6512 struct FSBufferDescriptor btdata
;
6513 int btree_operation
;
6515 struct HFSPlusCatalogFile filerec
;
6518 struct filefork
*datafork
;
6519 u_int32_t files_moved
= 0;
6520 u_int32_t prev_blocksmoved
;
6522 fcb
= VTOF(hfsmp
->hfs_catalog_vp
);
6523 /* Store the value to print total blocks moved by this function at the end */
6524 prev_blocksmoved
= hfsmp
->hfs_resize_blocksmoved
;
6526 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&iterator
, sizeof(*iterator
))) {
6529 bzero(iterator
, sizeof(*iterator
));
6531 btdata
.bufferAddress
= &filerec
;
6532 btdata
.itemSize
= sizeof(filerec
);
6533 btdata
.itemCount
= 1;
6535 btree_operation
= kBTreeFirstRecord
;
6537 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
, HFS_SHARED_LOCK
);
6538 error
= BTIterateRecord(fcb
, btree_operation
, iterator
, &btdata
, NULL
);
6539 hfs_systemfile_unlock(hfsmp
, lockflags
);
6541 if (error
== fsBTRecordNotFoundErr
|| error
== fsBTEndOfIterationErr
) {
6546 btree_operation
= kBTreeNextRecord
;
6548 if (filerec
.recordType
!= kHFSPlusFileRecord
) {
6552 /* Check if any of the extents require relocation */
6553 if (hfs_file_extent_overlaps(hfsmp
, allocLimit
, &filerec
) == false) {
6557 /* We want to allow open-unlinked files to be moved, so allow_deleted == 1 */
6558 if (hfs_vget(hfsmp
, filerec
.fileID
, &vp
, 0, 1) != 0) {
6562 /* If data fork exists or item is a directory hard link, relocate blocks */
6563 datafork
= VTOF(vp
);
6564 if ((datafork
&& datafork
->ff_blocks
> 0) || vnode_isdir(vp
)) {
6565 error
= hfs_reclaim_file(hfsmp
, vp
, filerec
.fileID
,
6566 kHFSDataForkType
, allocLimit
, context
);
6568 printf ("hfs_reclaimspace: Error reclaiming datafork blocks of fileid=%u (error=%d)\n", filerec
.fileID
, error
);
6569 hfs_unlock(VTOC(vp
));
6575 /* If resource fork exists or item is a directory hard link, relocate blocks */
6576 if (((VTOC(vp
)->c_blocks
- (datafork
? datafork
->ff_blocks
: 0)) > 0) || vnode_isdir(vp
)) {
6577 if (vnode_isdir(vp
)) {
6578 /* Resource fork vnode lookup is invalid for directory hard link.
6579 * So we fake data fork vnode as resource fork vnode.
6583 error
= hfs_vgetrsrc(hfsmp
, vp
, &rvp
, TRUE
, FALSE
);
6585 printf ("hfs_reclaimspace: Error looking up rvp for fileid=%u (error=%d)\n", filerec
.fileID
, error
);
6586 hfs_unlock(VTOC(vp
));
6590 VTOC(rvp
)->c_flag
|= C_NEED_RVNODE_PUT
;
6593 error
= hfs_reclaim_file(hfsmp
, rvp
, filerec
.fileID
,
6594 kHFSResourceForkType
, allocLimit
, context
);
6596 printf ("hfs_reclaimspace: Error reclaiming rsrcfork blocks of fileid=%u (error=%d)\n", filerec
.fileID
, error
);
6597 hfs_unlock(VTOC(vp
));
6603 /* The file forks were relocated successfully, now drop the
6604 * cnode lock and vnode reference, and continue iterating to
6605 * next catalog record.
6607 hfs_unlock(VTOC(vp
));
6613 printf("hfs_reclaim_filespace: Relocated %u blocks from %u files on \"%s\"\n",
6614 (hfsmp
->hfs_resize_blocksmoved
- prev_blocksmoved
),
6615 files_moved
, hfsmp
->vcbVN
);
6618 kmem_free(kernel_map
, (vm_offset_t
)iterator
, sizeof(*iterator
));
6623 * Reclaim space at the end of a file system.
6626 * allocLimit - start block of the space being reclaimed
6627 * reclaimblks - number of allocation blocks to reclaim
6630 hfs_reclaimspace(struct hfsmount
*hfsmp
, u_int32_t allocLimit
, u_int32_t reclaimblks
, vfs_context_t context
)
6635 * Preflight the bitmap to find out total number of blocks that need
6638 * Note: Since allocLimit is set to the location of new alternate volume
6639 * header, the check below does not account for blocks allocated for old
6640 * alternate volume header.
6642 error
= hfs_count_allocated(hfsmp
, allocLimit
, reclaimblks
, &(hfsmp
->hfs_resize_totalblocks
));
6644 printf ("hfs_reclaimspace: Unable to determine total blocks to reclaim error=%d\n", error
);
6647 if (hfs_resize_debug
) {
6648 printf ("hfs_reclaimspace: Total number of blocks to reclaim = %u\n", hfsmp
->hfs_resize_totalblocks
);
6651 /* Just to be safe, sync the content of the journal to the disk before we proceed */
6652 hfs_journal_flush(hfsmp
, TRUE
);
6654 /* First, relocate journal file blocks if they're in the way.
6655 * Doing this first will make sure that journal relocate code
6656 * gets access to contiguous blocks on disk first. The journal
6657 * file has to be contiguous on the disk, otherwise resize will
6660 error
= hfs_reclaim_journal_file(hfsmp
, allocLimit
, context
);
6662 printf("hfs_reclaimspace: hfs_reclaim_journal_file failed (%d)\n", error
);
6666 /* Relocate journal info block blocks if they're in the way. */
6667 error
= hfs_reclaim_journal_info_block(hfsmp
, allocLimit
, context
);
6669 printf("hfs_reclaimspace: hfs_reclaim_journal_info_block failed (%d)\n", error
);
6673 /* Relocate extents of the Extents B-tree if they're in the way.
6674 * Relocating extents btree before other btrees is important as
6675 * this will provide access to largest contiguous block range on
6676 * the disk for relocating extents btree. Note that extents btree
6677 * can only have maximum of 8 extents.
6679 error
= hfs_reclaim_file(hfsmp
, hfsmp
->hfs_extents_vp
, kHFSExtentsFileID
,
6680 kHFSDataForkType
, allocLimit
, context
);
6682 printf("hfs_reclaimspace: reclaim extents b-tree returned %d\n", error
);
6686 /* Relocate extents of the Allocation file if they're in the way. */
6687 error
= hfs_reclaim_file(hfsmp
, hfsmp
->hfs_allocation_vp
, kHFSAllocationFileID
,
6688 kHFSDataForkType
, allocLimit
, context
);
6690 printf("hfs_reclaimspace: reclaim allocation file returned %d\n", error
);
6694 /* Relocate extents of the Catalog B-tree if they're in the way. */
6695 error
= hfs_reclaim_file(hfsmp
, hfsmp
->hfs_catalog_vp
, kHFSCatalogFileID
,
6696 kHFSDataForkType
, allocLimit
, context
);
6698 printf("hfs_reclaimspace: reclaim catalog b-tree returned %d\n", error
);
6702 /* Relocate extents of the Attributes B-tree if they're in the way. */
6703 error
= hfs_reclaim_file(hfsmp
, hfsmp
->hfs_attribute_vp
, kHFSAttributesFileID
,
6704 kHFSDataForkType
, allocLimit
, context
);
6706 printf("hfs_reclaimspace: reclaim attribute b-tree returned %d\n", error
);
6710 /* Relocate extents of the Startup File if there is one and they're in the way. */
6711 error
= hfs_reclaim_file(hfsmp
, hfsmp
->hfs_startup_vp
, kHFSStartupFileID
,
6712 kHFSDataForkType
, allocLimit
, context
);
6714 printf("hfs_reclaimspace: reclaim startup file returned %d\n", error
);
6719 * We need to make sure the alternate volume header gets flushed if we moved
6720 * any extents in the volume header. But we need to do that before
6721 * shrinking the size of the volume, or else the journal code will panic
6722 * with an invalid (too large) block number.
6724 * Note that blks_moved will be set if ANY extent was moved, even
6725 * if it was just an overflow extent. In this case, the journal_flush isn't
6726 * strictly required, but shouldn't hurt.
6728 if (hfsmp
->hfs_resize_blocksmoved
) {
6729 hfs_journal_flush(hfsmp
, TRUE
);
6732 /* Reclaim extents from catalog file records */
6733 error
= hfs_reclaim_filespace(hfsmp
, allocLimit
, context
);
6735 printf ("hfs_reclaimspace: hfs_reclaim_filespace returned error=%d\n", error
);
6739 /* Reclaim extents from extent-based extended attributes, if any */
6740 error
= hfs_reclaim_xattrspace(hfsmp
, allocLimit
, context
);
6742 printf ("hfs_reclaimspace: hfs_reclaim_xattrspace returned error=%d\n", error
);
6751 * Check if there are any extents (including overflow extents) that overlap
6752 * into the disk space that is being reclaimed.
6755 * true - One of the extents need to be relocated
6756 * false - No overflow extents need to be relocated, or there was an error
6759 hfs_file_extent_overlaps(struct hfsmount
*hfsmp
, u_int32_t allocLimit
, struct HFSPlusCatalogFile
*filerec
)
6761 struct BTreeIterator
* iterator
= NULL
;
6762 struct FSBufferDescriptor btdata
;
6763 HFSPlusExtentRecord extrec
;
6764 HFSPlusExtentKey
*extkeyptr
;
6766 int overlapped
= false;
6772 /* Check if data fork overlaps the target space */
6773 for (i
= 0; i
< kHFSPlusExtentDensity
; ++i
) {
6774 if (filerec
->dataFork
.extents
[i
].blockCount
== 0) {
6777 endblock
= filerec
->dataFork
.extents
[i
].startBlock
+
6778 filerec
->dataFork
.extents
[i
].blockCount
;
6779 if (endblock
> allocLimit
) {
6785 /* Check if resource fork overlaps the target space */
6786 for (j
= 0; j
< kHFSPlusExtentDensity
; ++j
) {
6787 if (filerec
->resourceFork
.extents
[j
].blockCount
== 0) {
6790 endblock
= filerec
->resourceFork
.extents
[j
].startBlock
+
6791 filerec
->resourceFork
.extents
[j
].blockCount
;
6792 if (endblock
> allocLimit
) {
6798 /* Return back if there are no overflow extents for this file */
6799 if ((i
< kHFSPlusExtentDensity
) && (j
< kHFSPlusExtentDensity
)) {
6803 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&iterator
, sizeof(*iterator
))) {
6806 bzero(iterator
, sizeof(*iterator
));
6807 extkeyptr
= (HFSPlusExtentKey
*)&iterator
->key
;
6808 extkeyptr
->keyLength
= kHFSPlusExtentKeyMaximumLength
;
6809 extkeyptr
->forkType
= 0;
6810 extkeyptr
->fileID
= filerec
->fileID
;
6811 extkeyptr
->startBlock
= 0;
6813 btdata
.bufferAddress
= &extrec
;
6814 btdata
.itemSize
= sizeof(extrec
);
6815 btdata
.itemCount
= 1;
6817 fcb
= VTOF(hfsmp
->hfs_extents_vp
);
6819 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_EXTENTS
, HFS_SHARED_LOCK
);
6821 /* This will position the iterator just before the first overflow
6822 * extent record for given fileID. It will always return btNotFound,
6823 * so we special case the error code.
6825 error
= BTSearchRecord(fcb
, iterator
, &btdata
, NULL
, iterator
);
6826 if (error
&& (error
!= btNotFound
)) {
6830 /* BTIterateRecord() might return error if the btree is empty, and
6831 * therefore we return that the extent does not overflow to the caller
6833 error
= BTIterateRecord(fcb
, kBTreeNextRecord
, iterator
, &btdata
, NULL
);
6834 while (error
== 0) {
6835 /* Stop when we encounter a different file. */
6836 if (extkeyptr
->fileID
!= filerec
->fileID
) {
6839 /* Check if any of the forks exist in the target space. */
6840 for (i
= 0; i
< kHFSPlusExtentDensity
; ++i
) {
6841 if (extrec
[i
].blockCount
== 0) {
6844 endblock
= extrec
[i
].startBlock
+ extrec
[i
].blockCount
;
6845 if (endblock
> allocLimit
) {
6850 /* Look for more records. */
6851 error
= BTIterateRecord(fcb
, kBTreeNextRecord
, iterator
, &btdata
, NULL
);
6856 hfs_systemfile_unlock(hfsmp
, lockflags
);
6859 kmem_free(kernel_map
, (vm_offset_t
)iterator
, sizeof(*iterator
));
6866 * Calculate the progress of a file system resize operation.
6870 hfs_resize_progress(struct hfsmount
*hfsmp
, u_int32_t
*progress
)
6872 if ((hfsmp
->hfs_flags
& HFS_RESIZE_IN_PROGRESS
) == 0) {
6876 if (hfsmp
->hfs_resize_totalblocks
> 0) {
6877 *progress
= (u_int32_t
)((hfsmp
->hfs_resize_blocksmoved
* 100ULL) / hfsmp
->hfs_resize_totalblocks
);
6887 * Creates a UUID from a unique "name" in the HFS UUID Name space.
6888 * See version 3 UUID.
6891 hfs_getvoluuid(struct hfsmount
*hfsmp
, uuid_t result
)
6896 ((uint32_t *)rawUUID
)[0] = hfsmp
->vcbFndrInfo
[6];
6897 ((uint32_t *)rawUUID
)[1] = hfsmp
->vcbFndrInfo
[7];
6900 MD5Update( &md5c
, HFS_UUID_NAMESPACE_ID
, sizeof( uuid_t
) );
6901 MD5Update( &md5c
, rawUUID
, sizeof (rawUUID
) );
6902 MD5Final( result
, &md5c
);
6904 result
[6] = 0x30 | ( result
[6] & 0x0F );
6905 result
[8] = 0x80 | ( result
[8] & 0x3F );
6909 * Get file system attributes.
6912 hfs_vfs_getattr(struct mount
*mp
, struct vfs_attr
*fsap
, __unused vfs_context_t context
)
6914 #define HFS_ATTR_CMN_VALIDMASK (ATTR_CMN_VALIDMASK & ~(ATTR_CMN_NAMEDATTRCOUNT | ATTR_CMN_NAMEDATTRLIST))
6915 #define HFS_ATTR_FILE_VALIDMASK (ATTR_FILE_VALIDMASK & ~(ATTR_FILE_FILETYPE | ATTR_FILE_FORKCOUNT | ATTR_FILE_FORKLIST))
6916 #define HFS_ATTR_CMN_VOL_VALIDMASK (ATTR_CMN_VALIDMASK & ~(ATTR_CMN_NAMEDATTRCOUNT | ATTR_CMN_NAMEDATTRLIST | ATTR_CMN_ACCTIME))
6918 ExtendedVCB
*vcb
= VFSTOVCB(mp
);
6919 struct hfsmount
*hfsmp
= VFSTOHFS(mp
);
6920 u_int32_t freeCNIDs
;
6922 freeCNIDs
= (u_int32_t
)0xFFFFFFFF - (u_int32_t
)hfsmp
->vcbNxtCNID
;
6924 VFSATTR_RETURN(fsap
, f_objcount
, (u_int64_t
)hfsmp
->vcbFilCnt
+ (u_int64_t
)hfsmp
->vcbDirCnt
);
6925 VFSATTR_RETURN(fsap
, f_filecount
, (u_int64_t
)hfsmp
->vcbFilCnt
);
6926 VFSATTR_RETURN(fsap
, f_dircount
, (u_int64_t
)hfsmp
->vcbDirCnt
);
6927 VFSATTR_RETURN(fsap
, f_maxobjcount
, (u_int64_t
)0xFFFFFFFF);
6928 VFSATTR_RETURN(fsap
, f_iosize
, (size_t)cluster_max_io_size(mp
, 0));
6929 VFSATTR_RETURN(fsap
, f_blocks
, (u_int64_t
)hfsmp
->totalBlocks
);
6930 VFSATTR_RETURN(fsap
, f_bfree
, (u_int64_t
)hfs_freeblks(hfsmp
, 0));
6931 VFSATTR_RETURN(fsap
, f_bavail
, (u_int64_t
)hfs_freeblks(hfsmp
, 1));
6932 VFSATTR_RETURN(fsap
, f_bsize
, (u_int32_t
)vcb
->blockSize
);
6933 /* XXX needs clarification */
6934 VFSATTR_RETURN(fsap
, f_bused
, hfsmp
->totalBlocks
- hfs_freeblks(hfsmp
, 1));
6935 /* Maximum files is constrained by total blocks. */
6936 VFSATTR_RETURN(fsap
, f_files
, (u_int64_t
)(hfsmp
->totalBlocks
- 2));
6937 VFSATTR_RETURN(fsap
, f_ffree
, MIN((u_int64_t
)freeCNIDs
, (u_int64_t
)hfs_freeblks(hfsmp
, 1)));
6939 fsap
->f_fsid
.val
[0] = hfsmp
->hfs_raw_dev
;
6940 fsap
->f_fsid
.val
[1] = vfs_typenum(mp
);
6941 VFSATTR_SET_SUPPORTED(fsap
, f_fsid
);
6943 VFSATTR_RETURN(fsap
, f_signature
, vcb
->vcbSigWord
);
6944 VFSATTR_RETURN(fsap
, f_carbon_fsid
, 0);
6946 if (VFSATTR_IS_ACTIVE(fsap
, f_capabilities
)) {
6947 vol_capabilities_attr_t
*cap
;
6949 cap
= &fsap
->f_capabilities
;
6951 if (hfsmp
->hfs_flags
& HFS_STANDARD
) {
6952 cap
->capabilities
[VOL_CAPABILITIES_FORMAT
] =
6953 VOL_CAP_FMT_PERSISTENTOBJECTIDS
|
6954 VOL_CAP_FMT_CASE_PRESERVING
|
6955 VOL_CAP_FMT_FAST_STATFS
|
6956 VOL_CAP_FMT_HIDDEN_FILES
|
6957 VOL_CAP_FMT_PATH_FROM_ID
;
6959 cap
->capabilities
[VOL_CAPABILITIES_FORMAT
] =
6960 VOL_CAP_FMT_PERSISTENTOBJECTIDS
|
6961 VOL_CAP_FMT_SYMBOLICLINKS
|
6962 VOL_CAP_FMT_HARDLINKS
|
6963 VOL_CAP_FMT_JOURNAL
|
6964 VOL_CAP_FMT_ZERO_RUNS
|
6965 (hfsmp
->jnl
? VOL_CAP_FMT_JOURNAL_ACTIVE
: 0) |
6966 (hfsmp
->hfs_flags
& HFS_CASE_SENSITIVE
? VOL_CAP_FMT_CASE_SENSITIVE
: 0) |
6967 VOL_CAP_FMT_CASE_PRESERVING
|
6968 VOL_CAP_FMT_FAST_STATFS
|
6969 VOL_CAP_FMT_2TB_FILESIZE
|
6970 VOL_CAP_FMT_HIDDEN_FILES
|
6972 VOL_CAP_FMT_PATH_FROM_ID
|
6973 VOL_CAP_FMT_DECMPFS_COMPRESSION
;
6975 VOL_CAP_FMT_PATH_FROM_ID
;
6978 cap
->capabilities
[VOL_CAPABILITIES_INTERFACES
] =
6979 VOL_CAP_INT_SEARCHFS
|
6980 VOL_CAP_INT_ATTRLIST
|
6981 VOL_CAP_INT_NFSEXPORT
|
6982 VOL_CAP_INT_READDIRATTR
|
6983 VOL_CAP_INT_EXCHANGEDATA
|
6984 VOL_CAP_INT_ALLOCATE
|
6985 VOL_CAP_INT_VOL_RENAME
|
6986 VOL_CAP_INT_ADVLOCK
|
6989 VOL_CAP_INT_EXTENDED_ATTR
|
6990 VOL_CAP_INT_NAMEDSTREAMS
;
6992 VOL_CAP_INT_EXTENDED_ATTR
;
6994 cap
->capabilities
[VOL_CAPABILITIES_RESERVED1
] = 0;
6995 cap
->capabilities
[VOL_CAPABILITIES_RESERVED2
] = 0;
6997 cap
->valid
[VOL_CAPABILITIES_FORMAT
] =
6998 VOL_CAP_FMT_PERSISTENTOBJECTIDS
|
6999 VOL_CAP_FMT_SYMBOLICLINKS
|
7000 VOL_CAP_FMT_HARDLINKS
|
7001 VOL_CAP_FMT_JOURNAL
|
7002 VOL_CAP_FMT_JOURNAL_ACTIVE
|
7003 VOL_CAP_FMT_NO_ROOT_TIMES
|
7004 VOL_CAP_FMT_SPARSE_FILES
|
7005 VOL_CAP_FMT_ZERO_RUNS
|
7006 VOL_CAP_FMT_CASE_SENSITIVE
|
7007 VOL_CAP_FMT_CASE_PRESERVING
|
7008 VOL_CAP_FMT_FAST_STATFS
|
7009 VOL_CAP_FMT_2TB_FILESIZE
|
7010 VOL_CAP_FMT_OPENDENYMODES
|
7011 VOL_CAP_FMT_HIDDEN_FILES
|
7013 VOL_CAP_FMT_PATH_FROM_ID
|
7014 VOL_CAP_FMT_DECMPFS_COMPRESSION
;
7016 VOL_CAP_FMT_PATH_FROM_ID
;
7018 cap
->valid
[VOL_CAPABILITIES_INTERFACES
] =
7019 VOL_CAP_INT_SEARCHFS
|
7020 VOL_CAP_INT_ATTRLIST
|
7021 VOL_CAP_INT_NFSEXPORT
|
7022 VOL_CAP_INT_READDIRATTR
|
7023 VOL_CAP_INT_EXCHANGEDATA
|
7024 VOL_CAP_INT_COPYFILE
|
7025 VOL_CAP_INT_ALLOCATE
|
7026 VOL_CAP_INT_VOL_RENAME
|
7027 VOL_CAP_INT_ADVLOCK
|
7029 VOL_CAP_INT_MANLOCK
|
7031 VOL_CAP_INT_EXTENDED_ATTR
|
7032 VOL_CAP_INT_NAMEDSTREAMS
;
7034 VOL_CAP_INT_EXTENDED_ATTR
;
7036 cap
->valid
[VOL_CAPABILITIES_RESERVED1
] = 0;
7037 cap
->valid
[VOL_CAPABILITIES_RESERVED2
] = 0;
7038 VFSATTR_SET_SUPPORTED(fsap
, f_capabilities
);
7040 if (VFSATTR_IS_ACTIVE(fsap
, f_attributes
)) {
7041 vol_attributes_attr_t
*attrp
= &fsap
->f_attributes
;
7043 attrp
->validattr
.commonattr
= HFS_ATTR_CMN_VOL_VALIDMASK
;
7044 attrp
->validattr
.volattr
= ATTR_VOL_VALIDMASK
& ~ATTR_VOL_INFO
;
7045 attrp
->validattr
.dirattr
= ATTR_DIR_VALIDMASK
;
7046 attrp
->validattr
.fileattr
= HFS_ATTR_FILE_VALIDMASK
;
7047 attrp
->validattr
.forkattr
= 0;
7049 attrp
->nativeattr
.commonattr
= HFS_ATTR_CMN_VOL_VALIDMASK
;
7050 attrp
->nativeattr
.volattr
= ATTR_VOL_VALIDMASK
& ~ATTR_VOL_INFO
;
7051 attrp
->nativeattr
.dirattr
= ATTR_DIR_VALIDMASK
;
7052 attrp
->nativeattr
.fileattr
= HFS_ATTR_FILE_VALIDMASK
;
7053 attrp
->nativeattr
.forkattr
= 0;
7054 VFSATTR_SET_SUPPORTED(fsap
, f_attributes
);
7056 fsap
->f_create_time
.tv_sec
= hfsmp
->hfs_itime
;
7057 fsap
->f_create_time
.tv_nsec
= 0;
7058 VFSATTR_SET_SUPPORTED(fsap
, f_create_time
);
7059 fsap
->f_modify_time
.tv_sec
= hfsmp
->vcbLsMod
;
7060 fsap
->f_modify_time
.tv_nsec
= 0;
7061 VFSATTR_SET_SUPPORTED(fsap
, f_modify_time
);
7063 fsap
->f_backup_time
.tv_sec
= hfsmp
->vcbVolBkUp
;
7064 fsap
->f_backup_time
.tv_nsec
= 0;
7065 VFSATTR_SET_SUPPORTED(fsap
, f_backup_time
);
7066 if (VFSATTR_IS_ACTIVE(fsap
, f_fssubtype
)) {
7067 u_int16_t subtype
= 0;
7070 * Subtypes (flavors) for HFS
7071 * 0: Mac OS Extended
7072 * 1: Mac OS Extended (Journaled)
7073 * 2: Mac OS Extended (Case Sensitive)
7074 * 3: Mac OS Extended (Case Sensitive, Journaled)
7076 * 128: Mac OS Standard
7079 if (hfsmp
->hfs_flags
& HFS_STANDARD
) {
7080 subtype
= HFS_SUBTYPE_STANDARDHFS
;
7081 } else /* HFS Plus */ {
7083 subtype
|= HFS_SUBTYPE_JOURNALED
;
7084 if (hfsmp
->hfs_flags
& HFS_CASE_SENSITIVE
)
7085 subtype
|= HFS_SUBTYPE_CASESENSITIVE
;
7087 fsap
->f_fssubtype
= subtype
;
7088 VFSATTR_SET_SUPPORTED(fsap
, f_fssubtype
);
7091 if (VFSATTR_IS_ACTIVE(fsap
, f_vol_name
)) {
7092 strlcpy(fsap
->f_vol_name
, (char *) hfsmp
->vcbVN
, MAXPATHLEN
);
7093 VFSATTR_SET_SUPPORTED(fsap
, f_vol_name
);
7095 if (VFSATTR_IS_ACTIVE(fsap
, f_uuid
)) {
7096 hfs_getvoluuid(hfsmp
, fsap
->f_uuid
);
7097 VFSATTR_SET_SUPPORTED(fsap
, f_uuid
);
7103 * Perform a volume rename. Requires the FS' root vp.
7106 hfs_rename_volume(struct vnode
*vp
, const char *name
, proc_t p
)
7108 ExtendedVCB
*vcb
= VTOVCB(vp
);
7109 struct cnode
*cp
= VTOC(vp
);
7110 struct hfsmount
*hfsmp
= VTOHFS(vp
);
7111 struct cat_desc to_desc
;
7112 struct cat_desc todir_desc
;
7113 struct cat_desc new_desc
;
7114 cat_cookie_t cookie
;
7117 char converted_volname
[256];
7118 size_t volname_length
= 0;
7119 size_t conv_volname_length
= 0;
7123 * Ignore attempts to rename a volume to a zero-length name.
7128 bzero(&to_desc
, sizeof(to_desc
));
7129 bzero(&todir_desc
, sizeof(todir_desc
));
7130 bzero(&new_desc
, sizeof(new_desc
));
7131 bzero(&cookie
, sizeof(cookie
));
7133 todir_desc
.cd_parentcnid
= kHFSRootParentID
;
7134 todir_desc
.cd_cnid
= kHFSRootFolderID
;
7135 todir_desc
.cd_flags
= CD_ISDIR
;
7137 to_desc
.cd_nameptr
= (const u_int8_t
*)name
;
7138 to_desc
.cd_namelen
= strlen(name
);
7139 to_desc
.cd_parentcnid
= kHFSRootParentID
;
7140 to_desc
.cd_cnid
= cp
->c_cnid
;
7141 to_desc
.cd_flags
= CD_ISDIR
;
7143 if ((error
= hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
)) == 0) {
7144 if ((error
= hfs_start_transaction(hfsmp
)) == 0) {
7145 if ((error
= cat_preflight(hfsmp
, CAT_RENAME
, &cookie
, p
)) == 0) {
7146 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
, HFS_EXCLUSIVE_LOCK
);
7148 error
= cat_rename(hfsmp
, &cp
->c_desc
, &todir_desc
, &to_desc
, &new_desc
);
7151 * If successful, update the name in the VCB, ensure it's terminated.
7154 strlcpy((char *)vcb
->vcbVN
, name
, sizeof(vcb
->vcbVN
));
7155 volname_length
= strlen ((const char*)vcb
->vcbVN
);
7156 #define DKIOCCSSETLVNAME _IOW('d', 198, char[1024])
7157 /* Send the volume name down to CoreStorage if necessary */
7158 error
= utf8_normalizestr(vcb
->vcbVN
, volname_length
, (u_int8_t
*)converted_volname
, &conv_volname_length
, 256, UTF_PRECOMPOSED
);
7160 (void) VNOP_IOCTL (hfsmp
->hfs_devvp
, DKIOCCSSETLVNAME
, converted_volname
, 0, vfs_context_current());
7165 hfs_systemfile_unlock(hfsmp
, lockflags
);
7166 cat_postflight(hfsmp
, &cookie
, p
);
7170 (void) hfs_flushvolumeheader(hfsmp
, MNT_WAIT
, 0);
7172 hfs_end_transaction(hfsmp
);
7175 /* Release old allocated name buffer */
7176 if (cp
->c_desc
.cd_flags
& CD_HASBUF
) {
7177 const char *tmp_name
= (const char *)cp
->c_desc
.cd_nameptr
;
7179 cp
->c_desc
.cd_nameptr
= 0;
7180 cp
->c_desc
.cd_namelen
= 0;
7181 cp
->c_desc
.cd_flags
&= ~CD_HASBUF
;
7182 vfs_removename(tmp_name
);
7184 /* Update cnode's catalog descriptor */
7185 replace_desc(cp
, &new_desc
);
7186 vcb
->volumeNameEncodingHint
= new_desc
.cd_encoding
;
7187 cp
->c_touch_chgtime
= TRUE
;
7197 * Get file system attributes.
7200 hfs_vfs_setattr(struct mount
*mp
, struct vfs_attr
*fsap
, __unused vfs_context_t context
)
7202 kauth_cred_t cred
= vfs_context_ucred(context
);
7206 * Must be superuser or owner of filesystem to change volume attributes
7208 if (!kauth_cred_issuser(cred
) && (kauth_cred_getuid(cred
) != vfs_statfs(mp
)->f_owner
))
7211 if (VFSATTR_IS_ACTIVE(fsap
, f_vol_name
)) {
7214 error
= hfs_vfs_root(mp
, &root_vp
, context
);
7218 error
= hfs_rename_volume(root_vp
, fsap
->f_vol_name
, vfs_context_proc(context
));
7219 (void) vnode_put(root_vp
);
7223 VFSATTR_SET_SUPPORTED(fsap
, f_vol_name
);
7230 /* If a runtime corruption is detected, set the volume inconsistent
7231 * bit in the volume attributes. The volume inconsistent bit is a persistent
7232 * bit which represents that the volume is corrupt and needs repair.
7233 * The volume inconsistent bit can be set from the kernel when it detects
7234 * runtime corruption or from file system repair utilities like fsck_hfs when
7235 * a repair operation fails. The bit should be cleared only from file system
7236 * verify/repair utility like fsck_hfs when a verify/repair succeeds.
7238 void hfs_mark_volume_inconsistent(struct hfsmount
*hfsmp
)
7240 HFS_MOUNT_LOCK(hfsmp
, TRUE
);
7241 if ((hfsmp
->vcbAtrb
& kHFSVolumeInconsistentMask
) == 0) {
7242 hfsmp
->vcbAtrb
|= kHFSVolumeInconsistentMask
;
7243 MarkVCBDirty(hfsmp
);
7245 if ((hfsmp
->hfs_flags
& HFS_READ_ONLY
)==0) {
7246 /* Log information to ASL log */
7247 fslog_fs_corrupt(hfsmp
->hfs_mp
);
7248 printf("hfs: Runtime corruption detected on %s, fsck will be forced on next mount.\n", hfsmp
->vcbVN
);
7250 HFS_MOUNT_UNLOCK(hfsmp
, TRUE
);
7253 /* Replay the journal on the device node provided. Returns zero if
7254 * journal replay succeeded or no journal was supposed to be replayed.
7256 static int hfs_journal_replay(vnode_t devvp
, vfs_context_t context
)
7259 struct mount
*mp
= NULL
;
7260 struct hfs_mount_args
*args
= NULL
;
7262 /* Replay allowed only on raw devices */
7263 if (!vnode_ischr(devvp
) && !vnode_isblk(devvp
)) {
7268 /* Create dummy mount structures */
7269 MALLOC(mp
, struct mount
*, sizeof(struct mount
), M_TEMP
, M_WAITOK
);
7274 bzero(mp
, sizeof(struct mount
));
7275 mount_lock_init(mp
);
7277 MALLOC(args
, struct hfs_mount_args
*, sizeof(struct hfs_mount_args
), M_TEMP
, M_WAITOK
);
7282 bzero(args
, sizeof(struct hfs_mount_args
));
7284 retval
= hfs_mountfs(devvp
, mp
, args
, 1, context
);
7285 buf_flushdirtyblks(devvp
, TRUE
, 0, "hfs_journal_replay");
7287 /* FSYNC the devnode to be sure all data has been flushed */
7288 retval
= VNOP_FSYNC(devvp
, MNT_WAIT
, context
);
7292 mount_lock_destroy(mp
);
7302 * hfs vfs operations.
7304 struct vfsops hfs_vfsops
= {
7310 hfs_vfs_getattr
, /* was hfs_statfs */