]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_vfsops.c
xnu-1228.12.14.tar.gz
[apple/xnu.git] / bsd / hfs / hfs_vfsops.c
1 /*
2 * Copyright (c) 1999-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1991, 1993, 1994
30 * The Regents of the University of California. All rights reserved.
31 * (c) UNIX System Laboratories, Inc.
32 * All or some portions of this file are derived from material licensed
33 * to the University of California by American Telephone and Telegraph
34 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
35 * the permission of UNIX System Laboratories, Inc.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * hfs_vfsops.c
66 * derived from @(#)ufs_vfsops.c 8.8 (Berkeley) 5/20/95
67 *
68 * (c) Copyright 1997-2002 Apple Computer, Inc. All rights reserved.
69 *
70 * hfs_vfsops.c -- VFS layer for loadable HFS file system.
71 *
72 */
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/kauth.h>
76
77 #include <sys/ubc.h>
78 #include <sys/ubc_internal.h>
79 #include <sys/vnode_internal.h>
80 #include <sys/mount_internal.h>
81 #include <sys/sysctl.h>
82 #include <sys/malloc.h>
83 #include <sys/stat.h>
84 #include <sys/quota.h>
85 #include <sys/disk.h>
86 #include <sys/paths.h>
87 #include <sys/utfconv.h>
88 #include <sys/kdebug.h>
89 #include <sys/fslog.h>
90
91 #include <kern/locks.h>
92
93 #include <vfs/vfs_journal.h>
94
95 #include <miscfs/specfs/specdev.h>
96 #include <hfs/hfs_mount.h>
97
98 #include "hfs.h"
99 #include "hfs_catalog.h"
100 #include "hfs_cnode.h"
101 #include "hfs_dbg.h"
102 #include "hfs_endian.h"
103 #include "hfs_hotfiles.h"
104 #include "hfs_quota.h"
105
106 #include "hfscommon/headers/FileMgrInternal.h"
107 #include "hfscommon/headers/BTreesInternal.h"
108
109 #if HFS_DIAGNOSTIC
110 int hfs_dbg_all = 0;
111 int hfs_dbg_err = 0;
112 #endif
113
114
115 lck_grp_attr_t * hfs_group_attr;
116 lck_attr_t * hfs_lock_attr;
117 lck_grp_t * hfs_mutex_group;
118 lck_grp_t * hfs_rwlock_group;
119
120 extern struct vnodeopv_desc hfs_vnodeop_opv_desc;
121 /* not static so we can re-use in hfs_readwrite.c for build_path */
122 int hfs_vfs_vget(struct mount *mp, ino64_t ino, struct vnode **vpp, vfs_context_t context);
123
124
125 static int hfs_changefs(struct mount *mp, struct hfs_mount_args *args);
126 static int hfs_fhtovp(struct mount *mp, int fhlen, unsigned char *fhp, struct vnode **vpp, vfs_context_t context);
127 static int hfs_flushfiles(struct mount *, int, struct proc *);
128 static int hfs_flushMDB(struct hfsmount *hfsmp, int waitfor, int altflush);
129 static int hfs_getmountpoint(struct vnode *vp, struct hfsmount **hfsmpp);
130 static int hfs_init(struct vfsconf *vfsp);
131 static int hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t context);
132 static int hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, int journal_replay_only, vfs_context_t context);
133 static int hfs_reload(struct mount *mp);
134 static int hfs_vfs_root(struct mount *mp, struct vnode **vpp, vfs_context_t context);
135 static int hfs_quotactl(struct mount *, int, uid_t, caddr_t, vfs_context_t context);
136 static int hfs_start(struct mount *mp, int flags, vfs_context_t context);
137 static int hfs_statfs(struct mount *mp, register struct vfsstatfs *sbp, vfs_context_t context);
138 static int hfs_sync(struct mount *mp, int waitfor, vfs_context_t context);
139 static int hfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
140 user_addr_t newp, size_t newlen, vfs_context_t context);
141 static int hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context);
142 static int hfs_vptofh(struct vnode *vp, int *fhlenp, unsigned char *fhp, vfs_context_t context);
143
144 static int hfs_reclaimspace(struct hfsmount *hfsmp, u_long startblk, u_long reclaimblks, vfs_context_t context);
145 static int hfs_overlapped_overflow_extents(struct hfsmount *hfsmp, u_int32_t startblk,
146 u_int32_t catblks, u_int32_t fileID, int rsrcfork);
147 static int hfs_journal_replay(const char *devnode, vfs_context_t context);
148
149
150 /*
151 * Called by vfs_mountroot when mounting HFS Plus as root.
152 */
153
154 __private_extern__
155 int
156 hfs_mountroot(mount_t mp, vnode_t rvp, vfs_context_t context)
157 {
158 struct hfsmount *hfsmp;
159 ExtendedVCB *vcb;
160 struct vfsstatfs *vfsp;
161 int error;
162
163 hfs_chashinit_finish();
164
165 if ((error = hfs_mountfs(rvp, mp, NULL, 0, context)))
166 return (error);
167
168 /* Init hfsmp */
169 hfsmp = VFSTOHFS(mp);
170
171 hfsmp->hfs_uid = UNKNOWNUID;
172 hfsmp->hfs_gid = UNKNOWNGID;
173 hfsmp->hfs_dir_mask = (S_IRWXU | S_IRGRP|S_IXGRP | S_IROTH|S_IXOTH); /* 0755 */
174 hfsmp->hfs_file_mask = (S_IRWXU | S_IRGRP|S_IXGRP | S_IROTH|S_IXOTH); /* 0755 */
175
176 /* Establish the free block reserve. */
177 vcb = HFSTOVCB(hfsmp);
178 vcb->reserveBlocks = ((u_int64_t)vcb->totalBlocks * HFS_MINFREE) / 100;
179 vcb->reserveBlocks = MIN(vcb->reserveBlocks, HFS_MAXRESERVE / vcb->blockSize);
180
181 vfsp = vfs_statfs(mp);
182 (void)hfs_statfs(mp, vfsp, NULL);
183
184 return (0);
185 }
186
187
188 /*
189 * VFS Operations.
190 *
191 * mount system call
192 */
193
194 static int
195 hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t context)
196 {
197 struct proc *p = vfs_context_proc(context);
198 struct hfsmount *hfsmp = NULL;
199 struct hfs_mount_args args;
200 int retval = E_NONE;
201 u_int32_t cmdflags;
202
203 if ((retval = copyin(data, (caddr_t)&args, sizeof(args)))) {
204 return (retval);
205 }
206 cmdflags = (u_int32_t)vfs_flags(mp) & MNT_CMDFLAGS;
207 if (cmdflags & MNT_UPDATE) {
208 hfsmp = VFSTOHFS(mp);
209
210 /* Reload incore data after an fsck. */
211 if (cmdflags & MNT_RELOAD) {
212 if (vfs_isrdonly(mp))
213 return hfs_reload(mp);
214 else
215 return (EINVAL);
216 }
217
218 /* Change to a read-only file system. */
219 if (((hfsmp->hfs_flags & HFS_READ_ONLY) == 0) &&
220 vfs_isrdonly(mp)) {
221 int flags;
222
223 /* Set flag to indicate that a downgrade to read-only
224 * is in progress and therefore block any further
225 * modifications to the file system.
226 */
227 hfs_global_exclusive_lock_acquire(hfsmp);
228 hfsmp->hfs_flags |= HFS_RDONLY_DOWNGRADE;
229 hfsmp->hfs_downgrading_proc = current_thread();
230 hfs_global_exclusive_lock_release(hfsmp);
231
232 /* use VFS_SYNC to push out System (btree) files */
233 retval = VFS_SYNC(mp, MNT_WAIT, context);
234 if (retval && ((cmdflags & MNT_FORCE) == 0)) {
235 hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
236 hfsmp->hfs_downgrading_proc = NULL;
237 goto out;
238 }
239
240 flags = WRITECLOSE;
241 if (cmdflags & MNT_FORCE)
242 flags |= FORCECLOSE;
243
244 if ((retval = hfs_flushfiles(mp, flags, p))) {
245 hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
246 hfsmp->hfs_downgrading_proc = NULL;
247 goto out;
248 }
249
250 /* mark the volume cleanly unmounted */
251 hfsmp->vcbAtrb |= kHFSVolumeUnmountedMask;
252 retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
253 hfsmp->hfs_flags |= HFS_READ_ONLY;
254
255 /* also get the volume bitmap blocks */
256 if (!retval) {
257 if (vnode_mount(hfsmp->hfs_devvp) == mp) {
258 retval = hfs_fsync(hfsmp->hfs_devvp, MNT_WAIT, 0, p);
259 } else {
260 vnode_get(hfsmp->hfs_devvp);
261 retval = VNOP_FSYNC(hfsmp->hfs_devvp, MNT_WAIT, context);
262 vnode_put(hfsmp->hfs_devvp);
263 }
264 }
265 if (retval) {
266 hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
267 hfsmp->hfs_downgrading_proc = NULL;
268 hfsmp->hfs_flags &= ~HFS_READ_ONLY;
269 goto out;
270 }
271 if (hfsmp->jnl) {
272 hfs_global_exclusive_lock_acquire(hfsmp);
273
274 journal_close(hfsmp->jnl);
275 hfsmp->jnl = NULL;
276
277 // Note: we explicitly don't want to shutdown
278 // access to the jvp because we may need
279 // it later if we go back to being read-write.
280
281 hfs_global_exclusive_lock_release(hfsmp);
282 }
283
284 hfsmp->hfs_downgrading_proc = NULL;
285 }
286
287 /* Change to a writable file system. */
288 if (vfs_iswriteupgrade(mp)) {
289
290 /*
291 * On inconsistent disks, do not allow read-write mount
292 * unless it is the boot volume being mounted.
293 */
294 if (!(vfs_flags(mp) & MNT_ROOTFS) &&
295 (hfsmp->vcbAtrb & kHFSVolumeInconsistentMask)) {
296 retval = EINVAL;
297 goto out;
298 }
299
300 // If the journal was shut-down previously because we were
301 // asked to be read-only, let's start it back up again now
302
303 if ( (HFSTOVCB(hfsmp)->vcbAtrb & kHFSVolumeJournaledMask)
304 && hfsmp->jnl == NULL
305 && hfsmp->jvp != NULL) {
306 int jflags;
307
308 if (hfsmp->hfs_flags & HFS_NEED_JNL_RESET) {
309 jflags = JOURNAL_RESET;
310 } else {
311 jflags = 0;
312 }
313
314 hfs_global_exclusive_lock_acquire(hfsmp);
315
316 hfsmp->jnl = journal_open(hfsmp->jvp,
317 (hfsmp->jnl_start * HFSTOVCB(hfsmp)->blockSize) + (off_t)HFSTOVCB(hfsmp)->hfsPlusIOPosOffset,
318 hfsmp->jnl_size,
319 hfsmp->hfs_devvp,
320 hfsmp->hfs_logical_block_size,
321 jflags,
322 0,
323 hfs_sync_metadata, hfsmp->hfs_mp);
324
325 hfs_global_exclusive_lock_release(hfsmp);
326
327 if (hfsmp->jnl == NULL) {
328 retval = EINVAL;
329 goto out;
330 } else {
331 hfsmp->hfs_flags &= ~HFS_NEED_JNL_RESET;
332 }
333
334 }
335
336 /* Only clear HFS_READ_ONLY after a successfull write */
337 hfsmp->hfs_flags &= ~HFS_READ_ONLY;
338
339 /* If this mount point was downgraded from read-write
340 * to read-only, clear that information as we are now
341 * moving back to read-write.
342 */
343 hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
344 hfsmp->hfs_downgrading_proc = NULL;
345
346 /* mark the volume dirty (clear clean unmount bit) */
347 hfsmp->vcbAtrb &= ~kHFSVolumeUnmountedMask;
348
349 retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
350 if (retval != E_NONE)
351 goto out;
352
353 if (!(hfsmp->hfs_flags & (HFS_READ_ONLY | HFS_STANDARD))) {
354 /* Setup private/hidden directories for hardlinks. */
355 hfs_privatedir_init(hfsmp, FILE_HARDLINKS);
356 hfs_privatedir_init(hfsmp, DIR_HARDLINKS);
357
358 hfs_remove_orphans(hfsmp);
359
360 /*
361 * Allow hot file clustering if conditions allow.
362 */
363 if (hfsmp->hfs_flags & HFS_METADATA_ZONE) {
364 (void) hfs_recording_init(hfsmp);
365 }
366 /* Force ACLs on HFS+ file systems. */
367 if (vfs_extendedsecurity(HFSTOVFS(hfsmp)) == 0) {
368 vfs_setextendedsecurity(HFSTOVFS(hfsmp));
369 }
370 }
371 }
372
373 /* Update file system parameters. */
374 retval = hfs_changefs(mp, &args);
375
376 } else /* not an update request */ {
377
378 /* Set the mount flag to indicate that we support volfs */
379 vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_DOVOLFS));
380
381 hfs_chashinit_finish();
382
383 retval = hfs_mountfs(devvp, mp, &args, 0, context);
384 }
385 out:
386 if (retval == 0) {
387 (void)hfs_statfs(mp, vfs_statfs(mp), context);
388 }
389 return (retval);
390 }
391
392
393 struct hfs_changefs_cargs {
394 struct hfsmount *hfsmp;
395 int namefix;
396 int permfix;
397 int permswitch;
398 };
399
400 static int
401 hfs_changefs_callback(struct vnode *vp, void *cargs)
402 {
403 ExtendedVCB *vcb;
404 struct cnode *cp;
405 struct cat_desc cndesc;
406 struct cat_attr cnattr;
407 struct hfs_changefs_cargs *args;
408 int lockflags;
409 int error;
410
411 args = (struct hfs_changefs_cargs *)cargs;
412
413 cp = VTOC(vp);
414 vcb = HFSTOVCB(args->hfsmp);
415
416 lockflags = hfs_systemfile_lock(args->hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
417 error = cat_lookup(args->hfsmp, &cp->c_desc, 0, &cndesc, &cnattr, NULL, NULL);
418 hfs_systemfile_unlock(args->hfsmp, lockflags);
419 if (error) {
420 /*
421 * If we couldn't find this guy skip to the next one
422 */
423 if (args->namefix)
424 cache_purge(vp);
425
426 return (VNODE_RETURNED);
427 }
428 /*
429 * Get the real uid/gid and perm mask from disk.
430 */
431 if (args->permswitch || args->permfix) {
432 cp->c_uid = cnattr.ca_uid;
433 cp->c_gid = cnattr.ca_gid;
434 cp->c_mode = cnattr.ca_mode;
435 }
436 /*
437 * If we're switching name converters then...
438 * Remove the existing entry from the namei cache.
439 * Update name to one based on new encoder.
440 */
441 if (args->namefix) {
442 cache_purge(vp);
443 replace_desc(cp, &cndesc);
444
445 if (cndesc.cd_cnid == kHFSRootFolderID) {
446 strlcpy((char *)vcb->vcbVN, (const char *)cp->c_desc.cd_nameptr, NAME_MAX+1);
447 cp->c_desc.cd_encoding = args->hfsmp->hfs_encoding;
448 }
449 } else {
450 cat_releasedesc(&cndesc);
451 }
452 return (VNODE_RETURNED);
453 }
454
455 /* Change fs mount parameters */
456 static int
457 hfs_changefs(struct mount *mp, struct hfs_mount_args *args)
458 {
459 int retval = 0;
460 int namefix, permfix, permswitch;
461 struct hfsmount *hfsmp;
462 ExtendedVCB *vcb;
463 hfs_to_unicode_func_t get_unicode_func;
464 unicode_to_hfs_func_t get_hfsname_func;
465 u_long old_encoding = 0;
466 struct hfs_changefs_cargs cargs;
467 u_int32_t mount_flags;
468
469 hfsmp = VFSTOHFS(mp);
470 vcb = HFSTOVCB(hfsmp);
471 mount_flags = (unsigned int)vfs_flags(mp);
472
473 hfsmp->hfs_flags |= HFS_IN_CHANGEFS;
474
475 permswitch = (((hfsmp->hfs_flags & HFS_UNKNOWN_PERMS) &&
476 ((mount_flags & MNT_UNKNOWNPERMISSIONS) == 0)) ||
477 (((hfsmp->hfs_flags & HFS_UNKNOWN_PERMS) == 0) &&
478 (mount_flags & MNT_UNKNOWNPERMISSIONS)));
479
480 /* The root filesystem must operate with actual permissions: */
481 if (permswitch && (mount_flags & MNT_ROOTFS) && (mount_flags & MNT_UNKNOWNPERMISSIONS)) {
482 vfs_clearflags(mp, (u_int64_t)((unsigned int)MNT_UNKNOWNPERMISSIONS)); /* Just say "No". */
483 retval = EINVAL;
484 goto exit;
485 }
486 if (mount_flags & MNT_UNKNOWNPERMISSIONS)
487 hfsmp->hfs_flags |= HFS_UNKNOWN_PERMS;
488 else
489 hfsmp->hfs_flags &= ~HFS_UNKNOWN_PERMS;
490
491 namefix = permfix = 0;
492
493 /*
494 * Tracking of hot files requires up-to-date access times. So if
495 * access time updates are disabled, we must also disable hot files.
496 */
497 if (mount_flags & MNT_NOATIME) {
498 (void) hfs_recording_suspend(hfsmp);
499 }
500
501 /* Change the timezone (Note: this affects all hfs volumes and hfs+ volume create dates) */
502 if (args->hfs_timezone.tz_minuteswest != VNOVAL) {
503 gTimeZone = args->hfs_timezone;
504 }
505
506 /* Change the default uid, gid and/or mask */
507 if ((args->hfs_uid != (uid_t)VNOVAL) && (hfsmp->hfs_uid != args->hfs_uid)) {
508 hfsmp->hfs_uid = args->hfs_uid;
509 if (vcb->vcbSigWord == kHFSPlusSigWord)
510 ++permfix;
511 }
512 if ((args->hfs_gid != (gid_t)VNOVAL) && (hfsmp->hfs_gid != args->hfs_gid)) {
513 hfsmp->hfs_gid = args->hfs_gid;
514 if (vcb->vcbSigWord == kHFSPlusSigWord)
515 ++permfix;
516 }
517 if (args->hfs_mask != (mode_t)VNOVAL) {
518 if (hfsmp->hfs_dir_mask != (args->hfs_mask & ALLPERMS)) {
519 hfsmp->hfs_dir_mask = args->hfs_mask & ALLPERMS;
520 hfsmp->hfs_file_mask = args->hfs_mask & ALLPERMS;
521 if ((args->flags != VNOVAL) && (args->flags & HFSFSMNT_NOXONFILES))
522 hfsmp->hfs_file_mask = (args->hfs_mask & DEFFILEMODE);
523 if (vcb->vcbSigWord == kHFSPlusSigWord)
524 ++permfix;
525 }
526 }
527
528 /* Change the hfs encoding value (hfs only) */
529 if ((vcb->vcbSigWord == kHFSSigWord) &&
530 (args->hfs_encoding != (u_long)VNOVAL) &&
531 (hfsmp->hfs_encoding != args->hfs_encoding)) {
532
533 retval = hfs_getconverter(args->hfs_encoding, &get_unicode_func, &get_hfsname_func);
534 if (retval)
535 goto exit;
536
537 /*
538 * Connect the new hfs_get_unicode converter but leave
539 * the old hfs_get_hfsname converter in place so that
540 * we can lookup existing vnodes to get their correctly
541 * encoded names.
542 *
543 * When we're all finished, we can then connect the new
544 * hfs_get_hfsname converter and release our interest
545 * in the old converters.
546 */
547 hfsmp->hfs_get_unicode = get_unicode_func;
548 old_encoding = hfsmp->hfs_encoding;
549 hfsmp->hfs_encoding = args->hfs_encoding;
550 ++namefix;
551 }
552
553 if (!(namefix || permfix || permswitch))
554 goto exit;
555
556 /* XXX 3762912 hack to support HFS filesystem 'owner' */
557 if (permfix)
558 vfs_setowner(mp,
559 hfsmp->hfs_uid == UNKNOWNUID ? KAUTH_UID_NONE : hfsmp->hfs_uid,
560 hfsmp->hfs_gid == UNKNOWNGID ? KAUTH_GID_NONE : hfsmp->hfs_gid);
561
562 /*
563 * For each active vnode fix things that changed
564 *
565 * Note that we can visit a vnode more than once
566 * and we can race with fsync.
567 *
568 * hfs_changefs_callback will be called for each vnode
569 * hung off of this mount point
570 *
571 * The vnode will be properly referenced and unreferenced
572 * around the callback
573 */
574 cargs.hfsmp = hfsmp;
575 cargs.namefix = namefix;
576 cargs.permfix = permfix;
577 cargs.permswitch = permswitch;
578
579 vnode_iterate(mp, 0, hfs_changefs_callback, (void *)&cargs);
580
581 /*
582 * If we're switching name converters we can now
583 * connect the new hfs_get_hfsname converter and
584 * release our interest in the old converters.
585 */
586 if (namefix) {
587 hfsmp->hfs_get_hfsname = get_hfsname_func;
588 vcb->volumeNameEncodingHint = args->hfs_encoding;
589 (void) hfs_relconverter(old_encoding);
590 }
591 exit:
592 hfsmp->hfs_flags &= ~HFS_IN_CHANGEFS;
593 return (retval);
594 }
595
596
597 struct hfs_reload_cargs {
598 struct hfsmount *hfsmp;
599 int error;
600 };
601
602 static int
603 hfs_reload_callback(struct vnode *vp, void *cargs)
604 {
605 struct cnode *cp;
606 struct hfs_reload_cargs *args;
607 int lockflags;
608
609 args = (struct hfs_reload_cargs *)cargs;
610 /*
611 * flush all the buffers associated with this node
612 */
613 (void) buf_invalidateblks(vp, 0, 0, 0);
614
615 cp = VTOC(vp);
616 /*
617 * Remove any directory hints
618 */
619 if (vnode_isdir(vp))
620 hfs_reldirhints(cp, 0);
621
622 /*
623 * Re-read cnode data for all active vnodes (non-metadata files).
624 */
625 if (!vnode_issystem(vp) && !VNODE_IS_RSRC(vp)) {
626 struct cat_fork *datafork;
627 struct cat_desc desc;
628
629 datafork = cp->c_datafork ? &cp->c_datafork->ff_data : NULL;
630
631 /* lookup by fileID since name could have changed */
632 lockflags = hfs_systemfile_lock(args->hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
633 args->error = cat_idlookup(args->hfsmp, cp->c_fileid, 0, &desc, &cp->c_attr, datafork);
634 hfs_systemfile_unlock(args->hfsmp, lockflags);
635 if (args->error) {
636 return (VNODE_RETURNED_DONE);
637 }
638
639 /* update cnode's catalog descriptor */
640 (void) replace_desc(cp, &desc);
641 }
642 return (VNODE_RETURNED);
643 }
644
645 /*
646 * Reload all incore data for a filesystem (used after running fsck on
647 * the root filesystem and finding things to fix). The filesystem must
648 * be mounted read-only.
649 *
650 * Things to do to update the mount:
651 * invalidate all cached meta-data.
652 * invalidate all inactive vnodes.
653 * invalidate all cached file data.
654 * re-read volume header from disk.
655 * re-load meta-file info (extents, file size).
656 * re-load B-tree header data.
657 * re-read cnode data for all active vnodes.
658 */
659 static int
660 hfs_reload(struct mount *mountp)
661 {
662 register struct vnode *devvp;
663 struct buf *bp;
664 int error, i;
665 struct hfsmount *hfsmp;
666 struct HFSPlusVolumeHeader *vhp;
667 ExtendedVCB *vcb;
668 struct filefork *forkp;
669 struct cat_desc cndesc;
670 struct hfs_reload_cargs args;
671 daddr64_t priIDSector;
672
673 hfsmp = VFSTOHFS(mountp);
674 vcb = HFSTOVCB(hfsmp);
675
676 if (vcb->vcbSigWord == kHFSSigWord)
677 return (EINVAL); /* rooting from HFS is not supported! */
678
679 /*
680 * Invalidate all cached meta-data.
681 */
682 devvp = hfsmp->hfs_devvp;
683 if (buf_invalidateblks(devvp, 0, 0, 0))
684 panic("hfs_reload: dirty1");
685
686 args.hfsmp = hfsmp;
687 args.error = 0;
688 /*
689 * hfs_reload_callback will be called for each vnode
690 * hung off of this mount point that can't be recycled...
691 * vnode_iterate will recycle those that it can (the VNODE_RELOAD option)
692 * the vnode will be in an 'unbusy' state (VNODE_WAIT) and
693 * properly referenced and unreferenced around the callback
694 */
695 vnode_iterate(mountp, VNODE_RELOAD | VNODE_WAIT, hfs_reload_callback, (void *)&args);
696
697 if (args.error)
698 return (args.error);
699
700 /*
701 * Re-read VolumeHeader from disk.
702 */
703 priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
704 HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size));
705
706 error = (int)buf_meta_bread(hfsmp->hfs_devvp,
707 HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys),
708 hfsmp->hfs_physical_block_size, NOCRED, &bp);
709 if (error) {
710 if (bp != NULL)
711 buf_brelse(bp);
712 return (error);
713 }
714
715 vhp = (HFSPlusVolumeHeader *) (buf_dataptr(bp) + HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size));
716
717 /* Do a quick sanity check */
718 if ((SWAP_BE16(vhp->signature) != kHFSPlusSigWord &&
719 SWAP_BE16(vhp->signature) != kHFSXSigWord) ||
720 (SWAP_BE16(vhp->version) != kHFSPlusVersion &&
721 SWAP_BE16(vhp->version) != kHFSXVersion) ||
722 SWAP_BE32(vhp->blockSize) != vcb->blockSize) {
723 buf_brelse(bp);
724 return (EIO);
725 }
726
727 vcb->vcbLsMod = to_bsd_time(SWAP_BE32(vhp->modifyDate));
728 vcb->vcbAtrb = SWAP_BE32 (vhp->attributes);
729 vcb->vcbJinfoBlock = SWAP_BE32(vhp->journalInfoBlock);
730 vcb->vcbClpSiz = SWAP_BE32 (vhp->rsrcClumpSize);
731 vcb->vcbNxtCNID = SWAP_BE32 (vhp->nextCatalogID);
732 vcb->vcbVolBkUp = to_bsd_time(SWAP_BE32(vhp->backupDate));
733 vcb->vcbWrCnt = SWAP_BE32 (vhp->writeCount);
734 vcb->vcbFilCnt = SWAP_BE32 (vhp->fileCount);
735 vcb->vcbDirCnt = SWAP_BE32 (vhp->folderCount);
736 HFS_UPDATE_NEXT_ALLOCATION(vcb, SWAP_BE32 (vhp->nextAllocation));
737 vcb->totalBlocks = SWAP_BE32 (vhp->totalBlocks);
738 vcb->freeBlocks = SWAP_BE32 (vhp->freeBlocks);
739 vcb->encodingsBitmap = SWAP_BE64 (vhp->encodingsBitmap);
740 bcopy(vhp->finderInfo, vcb->vcbFndrInfo, sizeof(vhp->finderInfo));
741 vcb->localCreateDate = SWAP_BE32 (vhp->createDate); /* hfs+ create date is in local time */
742
743 /*
744 * Re-load meta-file vnode data (extent info, file size, etc).
745 */
746 forkp = VTOF((struct vnode *)vcb->extentsRefNum);
747 for (i = 0; i < kHFSPlusExtentDensity; i++) {
748 forkp->ff_extents[i].startBlock =
749 SWAP_BE32 (vhp->extentsFile.extents[i].startBlock);
750 forkp->ff_extents[i].blockCount =
751 SWAP_BE32 (vhp->extentsFile.extents[i].blockCount);
752 }
753 forkp->ff_size = SWAP_BE64 (vhp->extentsFile.logicalSize);
754 forkp->ff_blocks = SWAP_BE32 (vhp->extentsFile.totalBlocks);
755 forkp->ff_clumpsize = SWAP_BE32 (vhp->extentsFile.clumpSize);
756
757
758 forkp = VTOF((struct vnode *)vcb->catalogRefNum);
759 for (i = 0; i < kHFSPlusExtentDensity; i++) {
760 forkp->ff_extents[i].startBlock =
761 SWAP_BE32 (vhp->catalogFile.extents[i].startBlock);
762 forkp->ff_extents[i].blockCount =
763 SWAP_BE32 (vhp->catalogFile.extents[i].blockCount);
764 }
765 forkp->ff_size = SWAP_BE64 (vhp->catalogFile.logicalSize);
766 forkp->ff_blocks = SWAP_BE32 (vhp->catalogFile.totalBlocks);
767 forkp->ff_clumpsize = SWAP_BE32 (vhp->catalogFile.clumpSize);
768
769 if (hfsmp->hfs_attribute_vp) {
770 forkp = VTOF(hfsmp->hfs_attribute_vp);
771 for (i = 0; i < kHFSPlusExtentDensity; i++) {
772 forkp->ff_extents[i].startBlock =
773 SWAP_BE32 (vhp->attributesFile.extents[i].startBlock);
774 forkp->ff_extents[i].blockCount =
775 SWAP_BE32 (vhp->attributesFile.extents[i].blockCount);
776 }
777 forkp->ff_size = SWAP_BE64 (vhp->attributesFile.logicalSize);
778 forkp->ff_blocks = SWAP_BE32 (vhp->attributesFile.totalBlocks);
779 forkp->ff_clumpsize = SWAP_BE32 (vhp->attributesFile.clumpSize);
780 }
781
782 forkp = VTOF((struct vnode *)vcb->allocationsRefNum);
783 for (i = 0; i < kHFSPlusExtentDensity; i++) {
784 forkp->ff_extents[i].startBlock =
785 SWAP_BE32 (vhp->allocationFile.extents[i].startBlock);
786 forkp->ff_extents[i].blockCount =
787 SWAP_BE32 (vhp->allocationFile.extents[i].blockCount);
788 }
789 forkp->ff_size = SWAP_BE64 (vhp->allocationFile.logicalSize);
790 forkp->ff_blocks = SWAP_BE32 (vhp->allocationFile.totalBlocks);
791 forkp->ff_clumpsize = SWAP_BE32 (vhp->allocationFile.clumpSize);
792
793 buf_brelse(bp);
794 vhp = NULL;
795
796 /*
797 * Re-load B-tree header data
798 */
799 forkp = VTOF((struct vnode *)vcb->extentsRefNum);
800 if ( (error = MacToVFSError( BTReloadData((FCB*)forkp) )) )
801 return (error);
802
803 forkp = VTOF((struct vnode *)vcb->catalogRefNum);
804 if ( (error = MacToVFSError( BTReloadData((FCB*)forkp) )) )
805 return (error);
806
807 if (hfsmp->hfs_attribute_vp) {
808 forkp = VTOF(hfsmp->hfs_attribute_vp);
809 if ( (error = MacToVFSError( BTReloadData((FCB*)forkp) )) )
810 return (error);
811 }
812
813 /* Reload the volume name */
814 if ((error = cat_idlookup(hfsmp, kHFSRootFolderID, 0, &cndesc, NULL, NULL)))
815 return (error);
816 vcb->volumeNameEncodingHint = cndesc.cd_encoding;
817 bcopy(cndesc.cd_nameptr, vcb->vcbVN, min(255, cndesc.cd_namelen));
818 cat_releasedesc(&cndesc);
819
820 /* Re-establish private/hidden directories. */
821 hfs_privatedir_init(hfsmp, FILE_HARDLINKS);
822 hfs_privatedir_init(hfsmp, DIR_HARDLINKS);
823
824 /* In case any volume information changed to trigger a notification */
825 hfs_generate_volume_notifications(hfsmp);
826
827 return (0);
828 }
829
830
831 /*
832 * Common code for mount and mountroot
833 */
834 static int
835 hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args,
836 int journal_replay_only, vfs_context_t context)
837 {
838 struct proc *p = vfs_context_proc(context);
839 int retval = E_NONE;
840 struct hfsmount *hfsmp;
841 struct buf *bp;
842 dev_t dev;
843 HFSMasterDirectoryBlock *mdbp;
844 int ronly;
845 #if QUOTA
846 int i;
847 #endif
848 int mntwrapper;
849 kauth_cred_t cred;
850 u_int64_t disksize;
851 daddr64_t log_blkcnt;
852 u_int32_t log_blksize;
853 u_int32_t phys_blksize;
854 u_int32_t minblksize;
855 u_int32_t iswritable;
856 daddr64_t mdb_offset;
857 int isvirtual = 0;
858
859 ronly = vfs_isrdonly(mp);
860 dev = vnode_specrdev(devvp);
861 cred = p ? vfs_context_ucred(context) : NOCRED;
862 mntwrapper = 0;
863
864 bp = NULL;
865 hfsmp = NULL;
866 mdbp = NULL;
867 minblksize = kHFSBlockSize;
868
869 /* Advisory locking should be handled at the VFS layer */
870 vfs_setlocklocal(mp);
871
872 /* Get the logical block size (treated as physical block size everywhere) */
873 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, (caddr_t)&log_blksize, 0, context)) {
874 retval = ENXIO;
875 goto error_exit;
876 }
877 /* Get the physical block size. */
878 retval = VNOP_IOCTL(devvp, DKIOCGETPHYSICALBLOCKSIZE, (caddr_t)&phys_blksize, 0, context);
879 if (retval) {
880 if ((retval != ENOTSUP) && (retval != ENOTTY)) {
881 retval = ENXIO;
882 goto error_exit;
883 }
884 /* If device does not support this ioctl, assume that physical
885 * block size is same as logical block size
886 */
887 phys_blksize = log_blksize;
888 }
889 /* Switch to 512 byte sectors (temporarily) */
890 if (log_blksize > 512) {
891 u_int32_t size512 = 512;
892
893 if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&size512, FWRITE, context)) {
894 retval = ENXIO;
895 goto error_exit;
896 }
897 }
898 /* Get the number of 512 byte physical blocks. */
899 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
900 /* resetting block size may fail if getting block count did */
901 (void)VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context);
902
903 retval = ENXIO;
904 goto error_exit;
905 }
906 /* Compute an accurate disk size (i.e. within 512 bytes) */
907 disksize = (u_int64_t)log_blkcnt * (u_int64_t)512;
908
909 /*
910 * On Tiger it is not necessary to switch the device
911 * block size to be 4k if there are more than 31-bits
912 * worth of blocks but to insure compatibility with
913 * pre-Tiger systems we have to do it.
914 *
915 * If the device size is not a multiple of 4K (8 * 512), then
916 * switching the logical block size isn't going to help because
917 * we will be unable to write the alternate volume header.
918 * In this case, just leave the logical block size unchanged.
919 */
920 if (log_blkcnt > 0x000000007fffffff && (log_blkcnt & 7) == 0) {
921 minblksize = log_blksize = 4096;
922 if (phys_blksize < log_blksize)
923 phys_blksize = log_blksize;
924 }
925
926 /* Now switch to our preferred physical block size. */
927 if (log_blksize > 512) {
928 if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) {
929 retval = ENXIO;
930 goto error_exit;
931 }
932 /* Get the count of physical blocks. */
933 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
934 retval = ENXIO;
935 goto error_exit;
936 }
937 }
938 /*
939 * At this point:
940 * minblksize is the minimum physical block size
941 * log_blksize has our preferred physical block size
942 * log_blkcnt has the total number of physical blocks
943 */
944
945 mdb_offset = (daddr64_t)HFS_PRI_SECTOR(log_blksize);
946 if ((retval = (int)buf_meta_bread(devvp,
947 HFS_PHYSBLK_ROUNDDOWN(mdb_offset, (phys_blksize/log_blksize)),
948 phys_blksize, cred, &bp))) {
949 goto error_exit;
950 }
951 MALLOC(mdbp, HFSMasterDirectoryBlock *, kMDBSize, M_TEMP, M_WAITOK);
952 bcopy((char *)buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize), mdbp, kMDBSize);
953 buf_brelse(bp);
954 bp = NULL;
955
956 MALLOC(hfsmp, struct hfsmount *, sizeof(struct hfsmount), M_HFSMNT, M_WAITOK);
957 bzero(hfsmp, sizeof(struct hfsmount));
958
959 /*
960 * Init the volume information structure
961 */
962
963 lck_mtx_init(&hfsmp->hfs_mutex, hfs_mutex_group, hfs_lock_attr);
964 lck_mtx_init(&hfsmp->hfc_mutex, hfs_mutex_group, hfs_lock_attr);
965 lck_rw_init(&hfsmp->hfs_global_lock, hfs_rwlock_group, hfs_lock_attr);
966 lck_rw_init(&hfsmp->hfs_insync, hfs_rwlock_group, hfs_lock_attr);
967
968 vfs_setfsprivate(mp, hfsmp);
969 hfsmp->hfs_mp = mp; /* Make VFSTOHFS work */
970 hfsmp->hfs_raw_dev = vnode_specrdev(devvp);
971 hfsmp->hfs_devvp = devvp;
972 vnode_ref(devvp); /* Hold a ref on the device, dropped when hfsmp is freed. */
973 hfsmp->hfs_logical_block_size = log_blksize;
974 hfsmp->hfs_logical_block_count = log_blkcnt;
975 hfsmp->hfs_physical_block_size = phys_blksize;
976 hfsmp->hfs_log_per_phys = (phys_blksize / log_blksize);
977 hfsmp->hfs_flags |= HFS_WRITEABLE_MEDIA;
978 if (ronly)
979 hfsmp->hfs_flags |= HFS_READ_ONLY;
980 if (((unsigned int)vfs_flags(mp)) & MNT_UNKNOWNPERMISSIONS)
981 hfsmp->hfs_flags |= HFS_UNKNOWN_PERMS;
982
983 #if QUOTA
984 for (i = 0; i < MAXQUOTAS; i++)
985 dqfileinit(&hfsmp->hfs_qfiles[i]);
986 #endif
987
988 if (args) {
989 hfsmp->hfs_uid = (args->hfs_uid == (uid_t)VNOVAL) ? UNKNOWNUID : args->hfs_uid;
990 if (hfsmp->hfs_uid == 0xfffffffd) hfsmp->hfs_uid = UNKNOWNUID;
991 hfsmp->hfs_gid = (args->hfs_gid == (gid_t)VNOVAL) ? UNKNOWNGID : args->hfs_gid;
992 if (hfsmp->hfs_gid == 0xfffffffd) hfsmp->hfs_gid = UNKNOWNGID;
993 vfs_setowner(mp, hfsmp->hfs_uid, hfsmp->hfs_gid); /* tell the VFS */
994 if (args->hfs_mask != (mode_t)VNOVAL) {
995 hfsmp->hfs_dir_mask = args->hfs_mask & ALLPERMS;
996 if (args->flags & HFSFSMNT_NOXONFILES) {
997 hfsmp->hfs_file_mask = (args->hfs_mask & DEFFILEMODE);
998 } else {
999 hfsmp->hfs_file_mask = args->hfs_mask & ALLPERMS;
1000 }
1001 } else {
1002 hfsmp->hfs_dir_mask = UNKNOWNPERMISSIONS & ALLPERMS; /* 0777: rwx---rwx */
1003 hfsmp->hfs_file_mask = UNKNOWNPERMISSIONS & DEFFILEMODE; /* 0666: no --x by default? */
1004 }
1005 if ((args->flags != (int)VNOVAL) && (args->flags & HFSFSMNT_WRAPPER))
1006 mntwrapper = 1;
1007 } else {
1008 /* Even w/o explicit mount arguments, MNT_UNKNOWNPERMISSIONS requires setting up uid, gid, and mask: */
1009 if (((unsigned int)vfs_flags(mp)) & MNT_UNKNOWNPERMISSIONS) {
1010 hfsmp->hfs_uid = UNKNOWNUID;
1011 hfsmp->hfs_gid = UNKNOWNGID;
1012 vfs_setowner(mp, hfsmp->hfs_uid, hfsmp->hfs_gid); /* tell the VFS */
1013 hfsmp->hfs_dir_mask = UNKNOWNPERMISSIONS & ALLPERMS; /* 0777: rwx---rwx */
1014 hfsmp->hfs_file_mask = UNKNOWNPERMISSIONS & DEFFILEMODE; /* 0666: no --x by default? */
1015 }
1016 }
1017
1018 /* Find out if disk media is writable. */
1019 if (VNOP_IOCTL(devvp, DKIOCISWRITABLE, (caddr_t)&iswritable, 0, context) == 0) {
1020 if (iswritable)
1021 hfsmp->hfs_flags |= HFS_WRITEABLE_MEDIA;
1022 else
1023 hfsmp->hfs_flags &= ~HFS_WRITEABLE_MEDIA;
1024 }
1025
1026 // record the current time at which we're mounting this volume
1027 struct timeval tv;
1028 microtime(&tv);
1029 hfsmp->hfs_mount_time = tv.tv_sec;
1030
1031 /* Mount a standard HFS disk */
1032 if ((SWAP_BE16(mdbp->drSigWord) == kHFSSigWord) &&
1033 (mntwrapper || (SWAP_BE16(mdbp->drEmbedSigWord) != kHFSPlusSigWord))) {
1034
1035 /* If only journal replay is requested, exit immediately */
1036 if (journal_replay_only) {
1037 retval = 0;
1038 goto error_exit;
1039 }
1040
1041 if ((vfs_flags(mp) & MNT_ROOTFS)) {
1042 retval = EINVAL; /* Cannot root from HFS standard disks */
1043 goto error_exit;
1044 }
1045 /* HFS disks can only use 512 byte physical blocks */
1046 if (log_blksize > kHFSBlockSize) {
1047 log_blksize = kHFSBlockSize;
1048 if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) {
1049 retval = ENXIO;
1050 goto error_exit;
1051 }
1052 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
1053 retval = ENXIO;
1054 goto error_exit;
1055 }
1056 hfsmp->hfs_logical_block_size = log_blksize;
1057 hfsmp->hfs_logical_block_count = log_blkcnt;
1058 hfsmp->hfs_physical_block_size = log_blksize;
1059 hfsmp->hfs_log_per_phys = 1;
1060 }
1061 if (args) {
1062 hfsmp->hfs_encoding = args->hfs_encoding;
1063 HFSTOVCB(hfsmp)->volumeNameEncodingHint = args->hfs_encoding;
1064
1065 /* establish the timezone */
1066 gTimeZone = args->hfs_timezone;
1067 }
1068
1069 retval = hfs_getconverter(hfsmp->hfs_encoding, &hfsmp->hfs_get_unicode,
1070 &hfsmp->hfs_get_hfsname);
1071 if (retval)
1072 goto error_exit;
1073
1074 retval = hfs_MountHFSVolume(hfsmp, mdbp, p);
1075 if (retval)
1076 (void) hfs_relconverter(hfsmp->hfs_encoding);
1077
1078 } else /* Mount an HFS Plus disk */ {
1079 HFSPlusVolumeHeader *vhp;
1080 off_t embeddedOffset;
1081 int jnl_disable = 0;
1082
1083 /* Get the embedded Volume Header */
1084 if (SWAP_BE16(mdbp->drEmbedSigWord) == kHFSPlusSigWord) {
1085 embeddedOffset = SWAP_BE16(mdbp->drAlBlSt) * kHFSBlockSize;
1086 embeddedOffset += (u_int64_t)SWAP_BE16(mdbp->drEmbedExtent.startBlock) *
1087 (u_int64_t)SWAP_BE32(mdbp->drAlBlkSiz);
1088
1089 /*
1090 * If the embedded volume doesn't start on a block
1091 * boundary, then switch the device to a 512-byte
1092 * block size so everything will line up on a block
1093 * boundary.
1094 */
1095 if ((embeddedOffset % log_blksize) != 0) {
1096 printf("HFS Mount: embedded volume offset not"
1097 " a multiple of physical block size (%d);"
1098 " switching to 512\n", log_blksize);
1099 log_blksize = 512;
1100 if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE,
1101 (caddr_t)&log_blksize, FWRITE, context)) {
1102 retval = ENXIO;
1103 goto error_exit;
1104 }
1105 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT,
1106 (caddr_t)&log_blkcnt, 0, context)) {
1107 retval = ENXIO;
1108 goto error_exit;
1109 }
1110 /* Note: relative block count adjustment */
1111 hfsmp->hfs_logical_block_count *=
1112 hfsmp->hfs_logical_block_size / log_blksize;
1113 hfsmp->hfs_logical_block_size = log_blksize;
1114
1115 /* Update logical/physical block size */
1116 hfsmp->hfs_physical_block_size = log_blksize;
1117 phys_blksize = log_blksize;
1118 hfsmp->hfs_log_per_phys = 1;
1119 }
1120
1121 disksize = (u_int64_t)SWAP_BE16(mdbp->drEmbedExtent.blockCount) *
1122 (u_int64_t)SWAP_BE32(mdbp->drAlBlkSiz);
1123
1124 hfsmp->hfs_logical_block_count = disksize / log_blksize;
1125
1126 mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize));
1127 retval = (int)buf_meta_bread(devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys),
1128 phys_blksize, cred, &bp);
1129 if (retval)
1130 goto error_exit;
1131 bcopy((char *)buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize), mdbp, 512);
1132 buf_brelse(bp);
1133 bp = NULL;
1134 vhp = (HFSPlusVolumeHeader*) mdbp;
1135
1136 } else /* pure HFS+ */ {
1137 embeddedOffset = 0;
1138 vhp = (HFSPlusVolumeHeader*) mdbp;
1139 }
1140
1141 /*
1142 * On inconsistent disks, do not allow read-write mount
1143 * unless it is the boot volume being mounted.
1144 */
1145 if (!(vfs_flags(mp) & MNT_ROOTFS) &&
1146 (SWAP_BE32(vhp->attributes) & kHFSVolumeInconsistentMask) &&
1147 !(hfsmp->hfs_flags & HFS_READ_ONLY)) {
1148 retval = EINVAL;
1149 goto error_exit;
1150 }
1151
1152
1153 // XXXdbg
1154 //
1155 hfsmp->jnl = NULL;
1156 hfsmp->jvp = NULL;
1157 if (args != NULL && (args->flags & HFSFSMNT_EXTENDED_ARGS) &&
1158 args->journal_disable) {
1159 jnl_disable = 1;
1160 }
1161
1162 //
1163 // We only initialize the journal here if the last person
1164 // to mount this volume was journaling aware. Otherwise
1165 // we delay journal initialization until later at the end
1166 // of hfs_MountHFSPlusVolume() because the last person who
1167 // mounted it could have messed things up behind our back
1168 // (so we need to go find the .journal file, make sure it's
1169 // the right size, re-sync up if it was moved, etc).
1170 //
1171 if ( (SWAP_BE32(vhp->lastMountedVersion) == kHFSJMountVersion)
1172 && (SWAP_BE32(vhp->attributes) & kHFSVolumeJournaledMask)
1173 && !jnl_disable) {
1174
1175 // if we're able to init the journal, mark the mount
1176 // point as journaled.
1177 //
1178 if (hfs_early_journal_init(hfsmp, vhp, args, embeddedOffset, mdb_offset, mdbp, cred) == 0) {
1179 vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
1180 } else {
1181 // if the journal failed to open, then set the lastMountedVersion
1182 // to be "FSK!" which fsck_hfs will see and force the fsck instead
1183 // of just bailing out because the volume is journaled.
1184 if (!ronly) {
1185 HFSPlusVolumeHeader *jvhp;
1186
1187 hfsmp->hfs_flags |= HFS_NEED_JNL_RESET;
1188
1189 if (mdb_offset == 0) {
1190 mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize));
1191 }
1192
1193 bp = NULL;
1194 retval = (int)buf_meta_bread(devvp,
1195 HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys),
1196 phys_blksize, cred, &bp);
1197 if (retval == 0) {
1198 jvhp = (HFSPlusVolumeHeader *)(buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize));
1199
1200 if (SWAP_BE16(jvhp->signature) == kHFSPlusSigWord || SWAP_BE16(jvhp->signature) == kHFSXSigWord) {
1201 printf ("hfs(1): Journal replay fail. Writing lastMountVersion as FSK!\n");
1202 jvhp->lastMountedVersion = SWAP_BE32(kFSKMountVersion);
1203 buf_bwrite(bp);
1204 } else {
1205 buf_brelse(bp);
1206 }
1207 bp = NULL;
1208 } else if (bp) {
1209 buf_brelse(bp);
1210 // clear this so the error exit path won't try to use it
1211 bp = NULL;
1212 }
1213 }
1214
1215 // if this isn't the root device just bail out.
1216 // If it is the root device we just continue on
1217 // in the hopes that fsck_hfs will be able to
1218 // fix any damage that exists on the volume.
1219 if ( !(vfs_flags(mp) & MNT_ROOTFS)) {
1220 retval = EINVAL;
1221 goto error_exit;
1222 }
1223 }
1224 }
1225 // XXXdbg
1226
1227 /* Either the journal is replayed successfully, or there
1228 * was nothing to replay, or no journal exists. In any case,
1229 * return success.
1230 */
1231 if (journal_replay_only) {
1232 retval = 0;
1233 goto error_exit;
1234 }
1235
1236 (void) hfs_getconverter(0, &hfsmp->hfs_get_unicode, &hfsmp->hfs_get_hfsname);
1237
1238 retval = hfs_MountHFSPlusVolume(hfsmp, vhp, embeddedOffset, disksize, p, args, cred);
1239 /*
1240 * If the backend didn't like our physical blocksize
1241 * then retry with physical blocksize of 512.
1242 */
1243 if ((retval == ENXIO) && (log_blksize > 512) && (log_blksize != minblksize)) {
1244 printf("HFS Mount: could not use physical block size "
1245 "(%d) switching to 512\n", log_blksize);
1246 log_blksize = 512;
1247 if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) {
1248 retval = ENXIO;
1249 goto error_exit;
1250 }
1251 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
1252 retval = ENXIO;
1253 goto error_exit;
1254 }
1255 devvp->v_specsize = log_blksize;
1256 /* Note: relative block count adjustment (in case this is an embedded volume). */
1257 hfsmp->hfs_logical_block_count *= hfsmp->hfs_logical_block_size / log_blksize;
1258 hfsmp->hfs_logical_block_size = log_blksize;
1259 hfsmp->hfs_log_per_phys = hfsmp->hfs_physical_block_size / log_blksize;
1260
1261 if (hfsmp->jnl) {
1262 // close and re-open this with the new block size
1263 journal_close(hfsmp->jnl);
1264 hfsmp->jnl = NULL;
1265 if (hfs_early_journal_init(hfsmp, vhp, args, embeddedOffset, mdb_offset, mdbp, cred) == 0) {
1266 vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
1267 } else {
1268 // if the journal failed to open, then set the lastMountedVersion
1269 // to be "FSK!" which fsck_hfs will see and force the fsck instead
1270 // of just bailing out because the volume is journaled.
1271 if (!ronly) {
1272 HFSPlusVolumeHeader *jvhp;
1273
1274 hfsmp->hfs_flags |= HFS_NEED_JNL_RESET;
1275
1276 if (mdb_offset == 0) {
1277 mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize));
1278 }
1279
1280 bp = NULL;
1281 retval = (int)buf_meta_bread(devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys),
1282 phys_blksize, cred, &bp);
1283 if (retval == 0) {
1284 jvhp = (HFSPlusVolumeHeader *)(buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize));
1285
1286 if (SWAP_BE16(jvhp->signature) == kHFSPlusSigWord || SWAP_BE16(jvhp->signature) == kHFSXSigWord) {
1287 printf ("hfs(2): Journal replay fail. Writing lastMountVersion as FSK!\n");
1288 jvhp->lastMountedVersion = SWAP_BE32(kFSKMountVersion);
1289 buf_bwrite(bp);
1290 } else {
1291 buf_brelse(bp);
1292 }
1293 bp = NULL;
1294 } else if (bp) {
1295 buf_brelse(bp);
1296 // clear this so the error exit path won't try to use it
1297 bp = NULL;
1298 }
1299 }
1300
1301 // if this isn't the root device just bail out.
1302 // If it is the root device we just continue on
1303 // in the hopes that fsck_hfs will be able to
1304 // fix any damage that exists on the volume.
1305 if ( !(vfs_flags(mp) & MNT_ROOTFS)) {
1306 retval = EINVAL;
1307 goto error_exit;
1308 }
1309 }
1310 }
1311
1312 /* Try again with a smaller block size... */
1313 retval = hfs_MountHFSPlusVolume(hfsmp, vhp, embeddedOffset, disksize, p, args, cred);
1314 }
1315 if (retval)
1316 (void) hfs_relconverter(0);
1317 }
1318
1319 // save off a snapshot of the mtime from the previous mount
1320 // (for matador).
1321 hfsmp->hfs_last_mounted_mtime = hfsmp->hfs_mtime;
1322
1323 if ( retval ) {
1324 goto error_exit;
1325 }
1326
1327 mp->mnt_vfsstat.f_fsid.val[0] = (long)dev;
1328 mp->mnt_vfsstat.f_fsid.val[1] = vfs_typenum(mp);
1329 vfs_setmaxsymlen(mp, 0);
1330 mp->mnt_vtable->vfc_threadsafe = TRUE;
1331 mp->mnt_vtable->vfc_vfsflags |= VFC_VFSNATIVEXATTR;
1332 #if NAMEDSTREAMS
1333 mp->mnt_kern_flag |= MNTK_NAMED_STREAMS;
1334 #endif
1335 if (!(hfsmp->hfs_flags & HFS_STANDARD)) {
1336 /* Tell VFS that we support directory hard links. */
1337 mp->mnt_vtable->vfc_vfsflags |= VFC_VFSDIRLINKS;
1338 } else {
1339 /* HFS standard doesn't support extended readdir! */
1340 mp->mnt_vtable->vfc_vfsflags &= ~VFC_VFSREADDIR_EXTENDED;
1341 }
1342
1343 if (args) {
1344 /*
1345 * Set the free space warning levels for a non-root volume:
1346 *
1347 * Set the lower freespace limit (the level that will trigger a warning)
1348 * to 5% of the volume size or 250MB, whichever is less, and the desired
1349 * level (which will cancel the alert request) to 1/2 above that limit.
1350 * Start looking for free space to drop below this level and generate a
1351 * warning immediately if needed:
1352 */
1353 hfsmp->hfs_freespace_notify_warninglimit =
1354 MIN(HFS_LOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize,
1355 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_LOWDISKTRIGGERFRACTION);
1356 hfsmp->hfs_freespace_notify_desiredlevel =
1357 MIN(HFS_LOWDISKSHUTOFFLEVEL / HFSTOVCB(hfsmp)->blockSize,
1358 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_LOWDISKSHUTOFFFRACTION);
1359 } else {
1360 /*
1361 * Set the free space warning levels for the root volume:
1362 *
1363 * Set the lower freespace limit (the level that will trigger a warning)
1364 * to 1% of the volume size or 50MB, whichever is less, and the desired
1365 * level (which will cancel the alert request) to 2% or 75MB, whichever is less.
1366 */
1367 hfsmp->hfs_freespace_notify_warninglimit =
1368 MIN(HFS_ROOTLOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize,
1369 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTLOWDISKTRIGGERFRACTION);
1370 hfsmp->hfs_freespace_notify_desiredlevel =
1371 MIN(HFS_ROOTLOWDISKSHUTOFFLEVEL / HFSTOVCB(hfsmp)->blockSize,
1372 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTLOWDISKSHUTOFFFRACTION);
1373 };
1374
1375 /* Check if the file system exists on virtual device, like disk image */
1376 if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, context) == 0) {
1377 if (isvirtual) {
1378 hfsmp->hfs_flags |= HFS_VIRTUAL_DEVICE;
1379 }
1380 }
1381
1382 /*
1383 * Start looking for free space to drop below this level and generate a
1384 * warning immediately if needed:
1385 */
1386 hfsmp->hfs_notification_conditions = 0;
1387 hfs_generate_volume_notifications(hfsmp);
1388
1389 if (ronly == 0) {
1390 (void) hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
1391 }
1392 FREE(mdbp, M_TEMP);
1393 return (0);
1394
1395 error_exit:
1396 if (bp)
1397 buf_brelse(bp);
1398 if (mdbp)
1399 FREE(mdbp, M_TEMP);
1400
1401 if (hfsmp && hfsmp->jvp && hfsmp->jvp != hfsmp->hfs_devvp) {
1402 (void)VNOP_CLOSE(hfsmp->jvp, ronly ? FREAD : FREAD|FWRITE, context);
1403 hfsmp->jvp = NULL;
1404 }
1405 if (hfsmp) {
1406 if (hfsmp->hfs_devvp) {
1407 vnode_rele(hfsmp->hfs_devvp);
1408 }
1409 FREE(hfsmp, M_HFSMNT);
1410 vfs_setfsprivate(mp, NULL);
1411 }
1412 return (retval);
1413 }
1414
1415
1416 /*
1417 * Make a filesystem operational.
1418 * Nothing to do at the moment.
1419 */
1420 /* ARGSUSED */
1421 static int
1422 hfs_start(__unused struct mount *mp, __unused int flags, __unused vfs_context_t context)
1423 {
1424 return (0);
1425 }
1426
1427
1428 /*
1429 * unmount system call
1430 */
1431 static int
1432 hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context)
1433 {
1434 struct proc *p = vfs_context_proc(context);
1435 struct hfsmount *hfsmp = VFSTOHFS(mp);
1436 int retval = E_NONE;
1437 int flags;
1438 int force;
1439 int started_tr = 0;
1440
1441 flags = 0;
1442 force = 0;
1443 if (mntflags & MNT_FORCE) {
1444 flags |= FORCECLOSE;
1445 force = 1;
1446 }
1447
1448 if ((retval = hfs_flushfiles(mp, flags, p)) && !force)
1449 return (retval);
1450
1451 if (hfsmp->hfs_flags & HFS_METADATA_ZONE)
1452 (void) hfs_recording_suspend(hfsmp);
1453
1454 /*
1455 * Flush out the b-trees, volume bitmap and Volume Header
1456 */
1457 if ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0) {
1458 retval = hfs_start_transaction(hfsmp);
1459 if (retval == 0) {
1460 started_tr = 1;
1461 } else if (!force) {
1462 goto err_exit;
1463 }
1464
1465 if (hfsmp->hfs_startup_vp) {
1466 (void) hfs_lock(VTOC(hfsmp->hfs_startup_vp), HFS_EXCLUSIVE_LOCK);
1467 retval = hfs_fsync(hfsmp->hfs_startup_vp, MNT_WAIT, 0, p);
1468 hfs_unlock(VTOC(hfsmp->hfs_startup_vp));
1469 if (retval && !force)
1470 goto err_exit;
1471 }
1472
1473 if (hfsmp->hfs_attribute_vp) {
1474 (void) hfs_lock(VTOC(hfsmp->hfs_attribute_vp), HFS_EXCLUSIVE_LOCK);
1475 retval = hfs_fsync(hfsmp->hfs_attribute_vp, MNT_WAIT, 0, p);
1476 hfs_unlock(VTOC(hfsmp->hfs_attribute_vp));
1477 if (retval && !force)
1478 goto err_exit;
1479 }
1480
1481 (void) hfs_lock(VTOC(hfsmp->hfs_catalog_vp), HFS_EXCLUSIVE_LOCK);
1482 retval = hfs_fsync(hfsmp->hfs_catalog_vp, MNT_WAIT, 0, p);
1483 hfs_unlock(VTOC(hfsmp->hfs_catalog_vp));
1484 if (retval && !force)
1485 goto err_exit;
1486
1487 (void) hfs_lock(VTOC(hfsmp->hfs_extents_vp), HFS_EXCLUSIVE_LOCK);
1488 retval = hfs_fsync(hfsmp->hfs_extents_vp, MNT_WAIT, 0, p);
1489 hfs_unlock(VTOC(hfsmp->hfs_extents_vp));
1490 if (retval && !force)
1491 goto err_exit;
1492
1493 if (hfsmp->hfs_allocation_vp) {
1494 (void) hfs_lock(VTOC(hfsmp->hfs_allocation_vp), HFS_EXCLUSIVE_LOCK);
1495 retval = hfs_fsync(hfsmp->hfs_allocation_vp, MNT_WAIT, 0, p);
1496 hfs_unlock(VTOC(hfsmp->hfs_allocation_vp));
1497 if (retval && !force)
1498 goto err_exit;
1499 }
1500
1501 if (hfsmp->hfc_filevp && vnode_issystem(hfsmp->hfc_filevp)) {
1502 retval = hfs_fsync(hfsmp->hfc_filevp, MNT_WAIT, 0, p);
1503 if (retval && !force)
1504 goto err_exit;
1505 }
1506
1507 /* If runtime corruption was detected, indicate that the volume
1508 * was not unmounted cleanly.
1509 */
1510 if (hfsmp->vcbAtrb & kHFSVolumeInconsistentMask) {
1511 HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeUnmountedMask;
1512 } else {
1513 HFSTOVCB(hfsmp)->vcbAtrb |= kHFSVolumeUnmountedMask;
1514 }
1515
1516 retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
1517 if (retval) {
1518 HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeUnmountedMask;
1519 if (!force)
1520 goto err_exit; /* could not flush everything */
1521 }
1522
1523 if (started_tr) {
1524 hfs_end_transaction(hfsmp);
1525 started_tr = 0;
1526 }
1527 }
1528
1529 if (hfsmp->jnl) {
1530 journal_flush(hfsmp->jnl);
1531 }
1532
1533 /*
1534 * Invalidate our caches and release metadata vnodes
1535 */
1536 (void) hfsUnmount(hfsmp, p);
1537
1538 /*
1539 * Last chance to dump unreferenced system files.
1540 */
1541 (void) vflush(mp, NULLVP, FORCECLOSE);
1542
1543 if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord)
1544 (void) hfs_relconverter(hfsmp->hfs_encoding);
1545
1546 // XXXdbg
1547 if (hfsmp->jnl) {
1548 journal_close(hfsmp->jnl);
1549 hfsmp->jnl = NULL;
1550 }
1551
1552 VNOP_FSYNC(hfsmp->hfs_devvp, MNT_WAIT, context);
1553
1554 if (hfsmp->jvp && hfsmp->jvp != hfsmp->hfs_devvp) {
1555 retval = VNOP_CLOSE(hfsmp->jvp,
1556 hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE,
1557 context);
1558 vnode_put(hfsmp->jvp);
1559 hfsmp->jvp = NULL;
1560 }
1561 // XXXdbg
1562
1563 #ifdef HFS_SPARSE_DEV
1564 /* Drop our reference on the backing fs (if any). */
1565 if ((hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) && hfsmp->hfs_backingfs_rootvp) {
1566 struct vnode * tmpvp;
1567
1568 hfsmp->hfs_flags &= ~HFS_HAS_SPARSE_DEVICE;
1569 tmpvp = hfsmp->hfs_backingfs_rootvp;
1570 hfsmp->hfs_backingfs_rootvp = NULLVP;
1571 vnode_rele(tmpvp);
1572 }
1573 #endif /* HFS_SPARSE_DEV */
1574 lck_mtx_destroy(&hfsmp->hfc_mutex, hfs_mutex_group);
1575 vnode_rele(hfsmp->hfs_devvp);
1576 FREE(hfsmp, M_HFSMNT);
1577
1578 return (0);
1579
1580 err_exit:
1581 if (started_tr) {
1582 hfs_end_transaction(hfsmp);
1583 }
1584 return retval;
1585 }
1586
1587
1588 /*
1589 * Return the root of a filesystem.
1590 */
1591 static int
1592 hfs_vfs_root(struct mount *mp, struct vnode **vpp, __unused vfs_context_t context)
1593 {
1594 return hfs_vget(VFSTOHFS(mp), (cnid_t)kHFSRootFolderID, vpp, 1);
1595 }
1596
1597
1598 /*
1599 * Do operations associated with quotas
1600 */
1601 #if !QUOTA
1602 static int
1603 hfs_quotactl(__unused struct mount *mp, __unused int cmds, __unused uid_t uid, __unused caddr_t datap, __unused vfs_context_t context)
1604 {
1605 return (ENOTSUP);
1606 }
1607 #else
1608 static int
1609 hfs_quotactl(struct mount *mp, int cmds, uid_t uid, caddr_t datap, vfs_context_t context)
1610 {
1611 struct proc *p = vfs_context_proc(context);
1612 int cmd, type, error;
1613
1614 if (uid == ~0U)
1615 uid = vfs_context_ucred(context)->cr_ruid;
1616 cmd = cmds >> SUBCMDSHIFT;
1617
1618 switch (cmd) {
1619 case Q_SYNC:
1620 case Q_QUOTASTAT:
1621 break;
1622 case Q_GETQUOTA:
1623 if (uid == vfs_context_ucred(context)->cr_ruid)
1624 break;
1625 /* fall through */
1626 default:
1627 if ( (error = vfs_context_suser(context)) )
1628 return (error);
1629 }
1630
1631 type = cmds & SUBCMDMASK;
1632 if ((u_int)type >= MAXQUOTAS)
1633 return (EINVAL);
1634 if (vfs_busy(mp, LK_NOWAIT))
1635 return (0);
1636
1637 switch (cmd) {
1638
1639 case Q_QUOTAON:
1640 error = hfs_quotaon(p, mp, type, datap);
1641 break;
1642
1643 case Q_QUOTAOFF:
1644 error = hfs_quotaoff(p, mp, type);
1645 break;
1646
1647 case Q_SETQUOTA:
1648 error = hfs_setquota(mp, uid, type, datap);
1649 break;
1650
1651 case Q_SETUSE:
1652 error = hfs_setuse(mp, uid, type, datap);
1653 break;
1654
1655 case Q_GETQUOTA:
1656 error = hfs_getquota(mp, uid, type, datap);
1657 break;
1658
1659 case Q_SYNC:
1660 error = hfs_qsync(mp);
1661 break;
1662
1663 case Q_QUOTASTAT:
1664 error = hfs_quotastat(mp, type, datap);
1665 break;
1666
1667 default:
1668 error = EINVAL;
1669 break;
1670 }
1671 vfs_unbusy(mp);
1672
1673 return (error);
1674 }
1675 #endif /* QUOTA */
1676
1677 /* Subtype is composite of bits */
1678 #define HFS_SUBTYPE_JOURNALED 0x01
1679 #define HFS_SUBTYPE_CASESENSITIVE 0x02
1680 /* bits 2 - 6 reserved */
1681 #define HFS_SUBTYPE_STANDARDHFS 0x80
1682
1683 /*
1684 * Get file system statistics.
1685 */
1686 static int
1687 hfs_statfs(struct mount *mp, register struct vfsstatfs *sbp, __unused vfs_context_t context)
1688 {
1689 ExtendedVCB *vcb = VFSTOVCB(mp);
1690 struct hfsmount *hfsmp = VFSTOHFS(mp);
1691 u_long freeCNIDs;
1692 u_int16_t subtype = 0;
1693
1694 freeCNIDs = (u_long)0xFFFFFFFF - (u_long)vcb->vcbNxtCNID;
1695
1696 sbp->f_bsize = (u_int32_t)vcb->blockSize;
1697 sbp->f_iosize = (size_t)cluster_max_io_size(mp, 0);
1698 sbp->f_blocks = (u_int64_t)((unsigned long)vcb->totalBlocks);
1699 sbp->f_bfree = (u_int64_t)((unsigned long )hfs_freeblks(hfsmp, 0));
1700 sbp->f_bavail = (u_int64_t)((unsigned long )hfs_freeblks(hfsmp, 1));
1701 sbp->f_files = (u_int64_t)((unsigned long )(vcb->totalBlocks - 2)); /* max files is constrained by total blocks */
1702 sbp->f_ffree = (u_int64_t)((unsigned long )(MIN(freeCNIDs, sbp->f_bavail)));
1703
1704 /*
1705 * Subtypes (flavors) for HFS
1706 * 0: Mac OS Extended
1707 * 1: Mac OS Extended (Journaled)
1708 * 2: Mac OS Extended (Case Sensitive)
1709 * 3: Mac OS Extended (Case Sensitive, Journaled)
1710 * 4 - 127: Reserved
1711 * 128: Mac OS Standard
1712 *
1713 */
1714 if (hfsmp->hfs_flags & HFS_STANDARD) {
1715 subtype = HFS_SUBTYPE_STANDARDHFS;
1716 } else /* HFS Plus */ {
1717 if (hfsmp->jnl)
1718 subtype |= HFS_SUBTYPE_JOURNALED;
1719 if (hfsmp->hfs_flags & HFS_CASE_SENSITIVE)
1720 subtype |= HFS_SUBTYPE_CASESENSITIVE;
1721 }
1722 sbp->f_fssubtype = subtype;
1723
1724 return (0);
1725 }
1726
1727
1728 //
1729 // XXXdbg -- this is a callback to be used by the journal to
1730 // get meta data blocks flushed out to disk.
1731 //
1732 // XXXdbg -- be smarter and don't flush *every* block on each
1733 // call. try to only flush some so we don't wind up
1734 // being too synchronous.
1735 //
1736 __private_extern__
1737 void
1738 hfs_sync_metadata(void *arg)
1739 {
1740 struct mount *mp = (struct mount *)arg;
1741 struct hfsmount *hfsmp;
1742 ExtendedVCB *vcb;
1743 buf_t bp;
1744 int retval;
1745 daddr64_t priIDSector;
1746 hfsmp = VFSTOHFS(mp);
1747 vcb = HFSTOVCB(hfsmp);
1748
1749 // now make sure the super block is flushed
1750 priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
1751 HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size));
1752
1753 retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
1754 HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys),
1755 hfsmp->hfs_physical_block_size, NOCRED, &bp);
1756 if ((retval != 0 ) && (retval != ENXIO)) {
1757 printf("hfs_sync_metadata: can't read volume header at %d! (retval 0x%x)\n",
1758 (int)priIDSector, retval);
1759 }
1760
1761 if (retval == 0 && ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI)) {
1762 buf_bwrite(bp);
1763 } else if (bp) {
1764 buf_brelse(bp);
1765 }
1766
1767 // the alternate super block...
1768 // XXXdbg - we probably don't need to do this each and every time.
1769 // hfs_btreeio.c:FlushAlternate() should flag when it was
1770 // written...
1771 if (hfsmp->hfs_alt_id_sector) {
1772 retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
1773 HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_alt_id_sector, hfsmp->hfs_log_per_phys),
1774 hfsmp->hfs_physical_block_size, NOCRED, &bp);
1775 if (retval == 0 && ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI)) {
1776 buf_bwrite(bp);
1777 } else if (bp) {
1778 buf_brelse(bp);
1779 }
1780 }
1781 }
1782
1783
1784 struct hfs_sync_cargs {
1785 kauth_cred_t cred;
1786 struct proc *p;
1787 int waitfor;
1788 int error;
1789 };
1790
1791
1792 static int
1793 hfs_sync_callback(struct vnode *vp, void *cargs)
1794 {
1795 struct cnode *cp;
1796 struct hfs_sync_cargs *args;
1797 int error;
1798
1799 args = (struct hfs_sync_cargs *)cargs;
1800
1801 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) != 0) {
1802 return (VNODE_RETURNED);
1803 }
1804 cp = VTOC(vp);
1805
1806 if ((cp->c_flag & C_MODIFIED) ||
1807 (cp->c_touch_acctime | cp->c_touch_chgtime | cp->c_touch_modtime) ||
1808 vnode_hasdirtyblks(vp)) {
1809 error = hfs_fsync(vp, args->waitfor, 0, args->p);
1810
1811 if (error)
1812 args->error = error;
1813 }
1814 hfs_unlock(cp);
1815 return (VNODE_RETURNED);
1816 }
1817
1818
1819
1820 /*
1821 * Go through the disk queues to initiate sandbagged IO;
1822 * go through the inodes to write those that have been modified;
1823 * initiate the writing of the super block if it has been modified.
1824 *
1825 * Note: we are always called with the filesystem marked `MPBUSY'.
1826 */
1827 static int
1828 hfs_sync(struct mount *mp, int waitfor, vfs_context_t context)
1829 {
1830 struct proc *p = vfs_context_proc(context);
1831 struct cnode *cp;
1832 struct hfsmount *hfsmp;
1833 ExtendedVCB *vcb;
1834 struct vnode *meta_vp[4];
1835 int i;
1836 int error, allerror = 0;
1837 struct hfs_sync_cargs args;
1838
1839 hfsmp = VFSTOHFS(mp);
1840
1841 /*
1842 * hfs_changefs might be manipulating vnodes so back off
1843 */
1844 if (hfsmp->hfs_flags & HFS_IN_CHANGEFS)
1845 return (0);
1846
1847 if (hfsmp->hfs_flags & HFS_READ_ONLY)
1848 return (EROFS);
1849
1850 /* skip over frozen volumes */
1851 if (!lck_rw_try_lock_shared(&hfsmp->hfs_insync))
1852 return 0;
1853
1854 args.cred = kauth_cred_get();
1855 args.waitfor = waitfor;
1856 args.p = p;
1857 args.error = 0;
1858 /*
1859 * hfs_sync_callback will be called for each vnode
1860 * hung off of this mount point... the vnode will be
1861 * properly referenced and unreferenced around the callback
1862 */
1863 vnode_iterate(mp, 0, hfs_sync_callback, (void *)&args);
1864
1865 if (args.error)
1866 allerror = args.error;
1867
1868 vcb = HFSTOVCB(hfsmp);
1869
1870 meta_vp[0] = vcb->extentsRefNum;
1871 meta_vp[1] = vcb->catalogRefNum;
1872 meta_vp[2] = vcb->allocationsRefNum; /* This is NULL for standard HFS */
1873 meta_vp[3] = hfsmp->hfs_attribute_vp; /* Optional file */
1874
1875 /* Now sync our three metadata files */
1876 for (i = 0; i < 4; ++i) {
1877 struct vnode *btvp;
1878
1879 btvp = meta_vp[i];;
1880 if ((btvp==0) || (vnode_mount(btvp) != mp))
1881 continue;
1882
1883 /* XXX use hfs_systemfile_lock instead ? */
1884 (void) hfs_lock(VTOC(btvp), HFS_EXCLUSIVE_LOCK);
1885 cp = VTOC(btvp);
1886
1887 if (((cp->c_flag & C_MODIFIED) == 0) &&
1888 (cp->c_touch_acctime == 0) &&
1889 (cp->c_touch_chgtime == 0) &&
1890 (cp->c_touch_modtime == 0) &&
1891 vnode_hasdirtyblks(btvp) == 0) {
1892 hfs_unlock(VTOC(btvp));
1893 continue;
1894 }
1895 error = vnode_get(btvp);
1896 if (error) {
1897 hfs_unlock(VTOC(btvp));
1898 continue;
1899 }
1900 if ((error = hfs_fsync(btvp, waitfor, 0, p)))
1901 allerror = error;
1902
1903 hfs_unlock(cp);
1904 vnode_put(btvp);
1905 };
1906
1907 /*
1908 * Force stale file system control information to be flushed.
1909 */
1910 if (vcb->vcbSigWord == kHFSSigWord) {
1911 if ((error = VNOP_FSYNC(hfsmp->hfs_devvp, waitfor, context))) {
1912 allerror = error;
1913 }
1914 }
1915 #if QUOTA
1916 hfs_qsync(mp);
1917 #endif /* QUOTA */
1918
1919 hfs_hotfilesync(hfsmp, vfs_context_kernel());
1920
1921 /*
1922 * Write back modified superblock.
1923 */
1924 if (IsVCBDirty(vcb)) {
1925 error = hfs_flushvolumeheader(hfsmp, waitfor, 0);
1926 if (error)
1927 allerror = error;
1928 }
1929
1930 if (hfsmp->jnl) {
1931 journal_flush(hfsmp->jnl);
1932 }
1933
1934 lck_rw_unlock_shared(&hfsmp->hfs_insync);
1935 return (allerror);
1936 }
1937
1938
1939 /*
1940 * File handle to vnode
1941 *
1942 * Have to be really careful about stale file handles:
1943 * - check that the cnode id is valid
1944 * - call hfs_vget() to get the locked cnode
1945 * - check for an unallocated cnode (i_mode == 0)
1946 * - check that the given client host has export rights and return
1947 * those rights via. exflagsp and credanonp
1948 */
1949 static int
1950 hfs_fhtovp(struct mount *mp, int fhlen, unsigned char *fhp, struct vnode **vpp, __unused vfs_context_t context)
1951 {
1952 struct hfsfid *hfsfhp;
1953 struct vnode *nvp;
1954 int result;
1955
1956 *vpp = NULL;
1957 hfsfhp = (struct hfsfid *)fhp;
1958
1959 if (fhlen < (int)sizeof(struct hfsfid))
1960 return (EINVAL);
1961
1962 result = hfs_vget(VFSTOHFS(mp), ntohl(hfsfhp->hfsfid_cnid), &nvp, 0);
1963 if (result) {
1964 if (result == ENOENT)
1965 result = ESTALE;
1966 return result;
1967 }
1968
1969 /* The createtime can be changed by hfs_setattr or hfs_setattrlist.
1970 * For NFS, we are assuming that only if the createtime was moved
1971 * forward would it mean the fileID got reused in that session by
1972 * wrapping. We don't have a volume ID or other unique identifier to
1973 * to use here for a generation ID across reboots, crashes where
1974 * metadata noting lastFileID didn't make it to disk but client has
1975 * it, or volume erasures where fileIDs start over again. Lastly,
1976 * with HFS allowing "wraps" of fileIDs now, this becomes more
1977 * error prone. Future, would be change the "wrap bit" to a unique
1978 * wrap number and use that for generation number. For now do this.
1979 */
1980 if (((time_t)(ntohl(hfsfhp->hfsfid_gen)) < VTOC(nvp)->c_itime)) {
1981 hfs_unlock(VTOC(nvp));
1982 vnode_put(nvp);
1983 return (ESTALE);
1984 }
1985 *vpp = nvp;
1986
1987 hfs_unlock(VTOC(nvp));
1988 return (0);
1989 }
1990
1991
1992 /*
1993 * Vnode pointer to File handle
1994 */
1995 /* ARGSUSED */
1996 static int
1997 hfs_vptofh(struct vnode *vp, int *fhlenp, unsigned char *fhp, __unused vfs_context_t context)
1998 {
1999 struct cnode *cp;
2000 struct hfsfid *hfsfhp;
2001
2002 if (ISHFS(VTOVCB(vp)))
2003 return (ENOTSUP); /* hfs standard is not exportable */
2004
2005 if (*fhlenp < (int)sizeof(struct hfsfid))
2006 return (EOVERFLOW);
2007
2008 cp = VTOC(vp);
2009 hfsfhp = (struct hfsfid *)fhp;
2010 hfsfhp->hfsfid_cnid = htonl(cp->c_fileid);
2011 hfsfhp->hfsfid_gen = htonl(cp->c_itime);
2012 *fhlenp = sizeof(struct hfsfid);
2013
2014 return (0);
2015 }
2016
2017
2018 /*
2019 * Initial HFS filesystems, done only once.
2020 */
2021 static int
2022 hfs_init(__unused struct vfsconf *vfsp)
2023 {
2024 static int done = 0;
2025
2026 if (done)
2027 return (0);
2028 done = 1;
2029 hfs_chashinit();
2030 hfs_converterinit();
2031
2032 BTReserveSetup();
2033
2034
2035 hfs_lock_attr = lck_attr_alloc_init();
2036 hfs_group_attr = lck_grp_attr_alloc_init();
2037 hfs_mutex_group = lck_grp_alloc_init("hfs-mutex", hfs_group_attr);
2038 hfs_rwlock_group = lck_grp_alloc_init("hfs-rwlock", hfs_group_attr);
2039
2040
2041 return (0);
2042 }
2043
2044 static int
2045 hfs_getmountpoint(struct vnode *vp, struct hfsmount **hfsmpp)
2046 {
2047 struct hfsmount * hfsmp;
2048 char fstypename[MFSNAMELEN];
2049
2050 if (vp == NULL)
2051 return (EINVAL);
2052
2053 if (!vnode_isvroot(vp))
2054 return (EINVAL);
2055
2056 vnode_vfsname(vp, fstypename);
2057 if (strncmp(fstypename, "hfs", sizeof(fstypename)) != 0)
2058 return (EINVAL);
2059
2060 hfsmp = VTOHFS(vp);
2061
2062 if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord)
2063 return (EINVAL);
2064
2065 *hfsmpp = hfsmp;
2066
2067 return (0);
2068 }
2069
2070 // XXXdbg
2071 #include <sys/filedesc.h>
2072
2073 /*
2074 * HFS filesystem related variables.
2075 */
2076 static int
2077 hfs_sysctl(int *name, __unused u_int namelen, user_addr_t oldp, size_t *oldlenp,
2078 user_addr_t newp, size_t newlen, vfs_context_t context)
2079 {
2080 struct proc *p = vfs_context_proc(context);
2081 int error;
2082 struct hfsmount *hfsmp;
2083
2084 /* all sysctl names at this level are terminal */
2085
2086 if (name[0] == HFS_ENCODINGBIAS) {
2087 int bias;
2088
2089 bias = hfs_getencodingbias();
2090 error = sysctl_int(oldp, oldlenp, newp, newlen, &bias);
2091 if (error == 0 && newp)
2092 hfs_setencodingbias(bias);
2093 return (error);
2094
2095 } else if (name[0] == HFS_EXTEND_FS) {
2096 u_int64_t newsize;
2097 vnode_t vp = vfs_context_cwd(context);
2098
2099 if (newp == USER_ADDR_NULL || vp == NULLVP)
2100 return (EINVAL);
2101 if ((error = hfs_getmountpoint(vp, &hfsmp)))
2102 return (error);
2103 error = sysctl_quad(oldp, oldlenp, newp, newlen, (quad_t *)&newsize);
2104 if (error)
2105 return (error);
2106
2107 error = hfs_extendfs(hfsmp, newsize, context);
2108 return (error);
2109
2110 } else if (name[0] == HFS_ENCODINGHINT) {
2111 size_t bufsize;
2112 size_t bytes;
2113 u_int32_t hint;
2114 u_int16_t *unicode_name;
2115 char *filename;
2116
2117 if ((newlen <= 0) || (newlen > MAXPATHLEN))
2118 return (EINVAL);
2119
2120 bufsize = MAX(newlen * 3, MAXPATHLEN);
2121 MALLOC(filename, char *, newlen, M_TEMP, M_WAITOK);
2122 MALLOC(unicode_name, u_int16_t *, bufsize, M_TEMP, M_WAITOK);
2123
2124 error = copyin(newp, (caddr_t)filename, newlen);
2125 if (error == 0) {
2126 error = utf8_decodestr((u_int8_t *)filename, newlen - 1, unicode_name,
2127 &bytes, bufsize, 0, UTF_DECOMPOSED);
2128 if (error == 0) {
2129 hint = hfs_pickencoding(unicode_name, bytes / 2);
2130 error = sysctl_int(oldp, oldlenp, USER_ADDR_NULL, 0, (int32_t *)&hint);
2131 }
2132 }
2133 FREE(unicode_name, M_TEMP);
2134 FREE(filename, M_TEMP);
2135 return (error);
2136
2137 } else if (name[0] == HFS_ENABLE_JOURNALING) {
2138 // make the file system journaled...
2139 vnode_t vp = vfs_context_cwd(context);
2140 vnode_t jvp;
2141 ExtendedVCB *vcb;
2142 struct cat_attr jnl_attr, jinfo_attr;
2143 struct cat_fork jnl_fork, jinfo_fork;
2144 void *jnl = NULL;
2145 int lockflags;
2146
2147 /* Only root can enable journaling */
2148 if (!is_suser()) {
2149 return (EPERM);
2150 }
2151 if (vp == NULLVP)
2152 return EINVAL;
2153
2154 hfsmp = VTOHFS(vp);
2155 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
2156 return EROFS;
2157 }
2158 if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord) {
2159 printf("hfs: can't make a plain hfs volume journaled.\n");
2160 return EINVAL;
2161 }
2162
2163 if (hfsmp->jnl) {
2164 printf("hfs: volume @ mp %p is already journaled!\n", vnode_mount(vp));
2165 return EAGAIN;
2166 }
2167
2168 vcb = HFSTOVCB(hfsmp);
2169 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS, HFS_EXCLUSIVE_LOCK);
2170 if (BTHasContiguousNodes(VTOF(vcb->catalogRefNum)) == 0 ||
2171 BTHasContiguousNodes(VTOF(vcb->extentsRefNum)) == 0) {
2172
2173 printf("hfs: volume has a btree w/non-contiguous nodes. can not enable journaling.\n");
2174 hfs_systemfile_unlock(hfsmp, lockflags);
2175 return EINVAL;
2176 }
2177 hfs_systemfile_unlock(hfsmp, lockflags);
2178
2179 // make sure these both exist!
2180 if ( GetFileInfo(vcb, kHFSRootFolderID, ".journal_info_block", &jinfo_attr, &jinfo_fork) == 0
2181 || GetFileInfo(vcb, kHFSRootFolderID, ".journal", &jnl_attr, &jnl_fork) == 0) {
2182
2183 return EINVAL;
2184 }
2185
2186 hfs_sync(hfsmp->hfs_mp, MNT_WAIT, context);
2187
2188 printf("hfs: Initializing the journal (joffset 0x%llx sz 0x%llx)...\n",
2189 (off_t)name[2], (off_t)name[3]);
2190
2191 jvp = hfsmp->hfs_devvp;
2192 jnl = journal_create(jvp,
2193 (off_t)name[2] * (off_t)HFSTOVCB(hfsmp)->blockSize
2194 + HFSTOVCB(hfsmp)->hfsPlusIOPosOffset,
2195 (off_t)((unsigned)name[3]),
2196 hfsmp->hfs_devvp,
2197 hfsmp->hfs_logical_block_size,
2198 0,
2199 0,
2200 hfs_sync_metadata, hfsmp->hfs_mp);
2201
2202 if (jnl == NULL) {
2203 printf("hfs: FAILED to create the journal!\n");
2204 if (jvp && jvp != hfsmp->hfs_devvp) {
2205 VNOP_CLOSE(jvp, hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE, context);
2206 }
2207 jvp = NULL;
2208
2209 return EINVAL;
2210 }
2211
2212 hfs_global_exclusive_lock_acquire(hfsmp);
2213
2214 /*
2215 * Flush all dirty metadata buffers.
2216 */
2217 buf_flushdirtyblks(hfsmp->hfs_devvp, MNT_WAIT, 0, "hfs_sysctl");
2218 buf_flushdirtyblks(hfsmp->hfs_extents_vp, MNT_WAIT, 0, "hfs_sysctl");
2219 buf_flushdirtyblks(hfsmp->hfs_catalog_vp, MNT_WAIT, 0, "hfs_sysctl");
2220 buf_flushdirtyblks(hfsmp->hfs_allocation_vp, MNT_WAIT, 0, "hfs_sysctl");
2221 if (hfsmp->hfs_attribute_vp)
2222 buf_flushdirtyblks(hfsmp->hfs_attribute_vp, MNT_WAIT, 0, "hfs_sysctl");
2223
2224 HFSTOVCB(hfsmp)->vcbJinfoBlock = name[1];
2225 HFSTOVCB(hfsmp)->vcbAtrb |= kHFSVolumeJournaledMask;
2226 hfsmp->jvp = jvp;
2227 hfsmp->jnl = jnl;
2228
2229 // save this off for the hack-y check in hfs_remove()
2230 hfsmp->jnl_start = (u_int32_t)name[2];
2231 hfsmp->jnl_size = (off_t)((unsigned)name[3]);
2232 hfsmp->hfs_jnlinfoblkid = jinfo_attr.ca_fileid;
2233 hfsmp->hfs_jnlfileid = jnl_attr.ca_fileid;
2234
2235 vfs_setflags(hfsmp->hfs_mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
2236
2237 hfs_global_exclusive_lock_release(hfsmp);
2238 hfs_flushvolumeheader(hfsmp, MNT_WAIT, 1);
2239
2240 return 0;
2241 } else if (name[0] == HFS_DISABLE_JOURNALING) {
2242 // clear the journaling bit
2243 vnode_t vp = vfs_context_cwd(context);
2244
2245 /* Only root can disable journaling */
2246 if (!is_suser()) {
2247 return (EPERM);
2248 }
2249 if (vp == NULLVP)
2250 return EINVAL;
2251
2252 hfsmp = VTOHFS(vp);
2253
2254 /*
2255 * Disabling journaling is disallowed on volumes with directory hard links
2256 * because we have not tested the relevant code path.
2257 */
2258 if (hfsmp->hfs_private_attr[DIR_HARDLINKS].ca_entries != 0){
2259 printf("hfs: cannot disable journaling on volumes with directory hardlinks\n");
2260 return EPERM;
2261 }
2262
2263 printf("hfs: disabling journaling for mount @ %p\n", vnode_mount(vp));
2264
2265 hfs_global_exclusive_lock_acquire(hfsmp);
2266
2267 // Lights out for you buddy!
2268 journal_close(hfsmp->jnl);
2269 hfsmp->jnl = NULL;
2270
2271 if (hfsmp->jvp && hfsmp->jvp != hfsmp->hfs_devvp) {
2272 VNOP_CLOSE(hfsmp->jvp, hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE, context);
2273 }
2274 hfsmp->jvp = NULL;
2275 vfs_clearflags(hfsmp->hfs_mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
2276 hfsmp->jnl_start = 0;
2277 hfsmp->hfs_jnlinfoblkid = 0;
2278 hfsmp->hfs_jnlfileid = 0;
2279
2280 HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeJournaledMask;
2281
2282 hfs_global_exclusive_lock_release(hfsmp);
2283 hfs_flushvolumeheader(hfsmp, MNT_WAIT, 1);
2284
2285 return 0;
2286 } else if (name[0] == HFS_GET_JOURNAL_INFO) {
2287 vnode_t vp = vfs_context_cwd(context);
2288 off_t jnl_start, jnl_size;
2289
2290 if (vp == NULLVP)
2291 return EINVAL;
2292
2293 hfsmp = VTOHFS(vp);
2294 if (hfsmp->jnl == NULL) {
2295 jnl_start = 0;
2296 jnl_size = 0;
2297 } else {
2298 jnl_start = (off_t)(hfsmp->jnl_start * HFSTOVCB(hfsmp)->blockSize) + (off_t)HFSTOVCB(hfsmp)->hfsPlusIOPosOffset;
2299 jnl_size = (off_t)hfsmp->jnl_size;
2300 }
2301
2302 if ((error = copyout((caddr_t)&jnl_start, CAST_USER_ADDR_T(name[1]), sizeof(off_t))) != 0) {
2303 return error;
2304 }
2305 if ((error = copyout((caddr_t)&jnl_size, CAST_USER_ADDR_T(name[2]), sizeof(off_t))) != 0) {
2306 return error;
2307 }
2308
2309 return 0;
2310 } else if (name[0] == HFS_SET_PKG_EXTENSIONS) {
2311
2312 return set_package_extensions_table((void *)name[1], name[2], name[3]);
2313
2314 } else if (name[0] == VFS_CTL_QUERY) {
2315 struct sysctl_req *req;
2316 struct vfsidctl vc;
2317 struct user_vfsidctl user_vc;
2318 struct mount *mp;
2319 struct vfsquery vq;
2320 boolean_t is_64_bit;
2321
2322 is_64_bit = proc_is64bit(p);
2323 req = CAST_DOWN(struct sysctl_req *, oldp); /* we're new style vfs sysctl. */
2324
2325 if (is_64_bit) {
2326 error = SYSCTL_IN(req, &user_vc, sizeof(user_vc));
2327 if (error) return (error);
2328
2329 mp = vfs_getvfs(&user_vc.vc_fsid);
2330 }
2331 else {
2332 error = SYSCTL_IN(req, &vc, sizeof(vc));
2333 if (error) return (error);
2334
2335 mp = vfs_getvfs(&vc.vc_fsid);
2336 }
2337 if (mp == NULL) return (ENOENT);
2338
2339 hfsmp = VFSTOHFS(mp);
2340 bzero(&vq, sizeof(vq));
2341 vq.vq_flags = hfsmp->hfs_notification_conditions;
2342 return SYSCTL_OUT(req, &vq, sizeof(vq));;
2343 } else if (name[0] == HFS_REPLAY_JOURNAL) {
2344 char *devnode = NULL;
2345 size_t devnode_len;
2346
2347 devnode_len = *oldlenp;
2348 MALLOC(devnode, char *, devnode_len + 1, M_TEMP, M_WAITOK);
2349 if (devnode == NULL) {
2350 return ENOMEM;
2351 }
2352
2353 error = copyin(oldp, (caddr_t)devnode, devnode_len);
2354 if (error) {
2355 FREE(devnode, M_TEMP);
2356 return error;
2357 }
2358 devnode[devnode_len] = 0;
2359
2360 error = hfs_journal_replay(devnode, context);
2361 FREE(devnode, M_TEMP);
2362 return error;
2363 }
2364
2365 return (ENOTSUP);
2366 }
2367
2368 /* hfs_vfs_vget is not static since it is used in hfs_readwrite.c to support the
2369 * build_path ioctl. We use it to leverage the code below that updates the origin
2370 * cache if necessary.
2371 */
2372 int
2373 hfs_vfs_vget(struct mount *mp, ino64_t ino, struct vnode **vpp, __unused vfs_context_t context)
2374 {
2375 int error;
2376 int lockflags;
2377 struct hfsmount *hfsmp;
2378
2379 hfsmp = VFSTOHFS(mp);
2380
2381 error = hfs_vget(hfsmp, (cnid_t)ino, vpp, 1);
2382 if (error)
2383 return (error);
2384
2385 /*
2386 * ADLs may need to have their origin state updated
2387 * since build_path needs a valid parent. The same is true
2388 * for hardlinked files as well. There isn't a race window here in re-acquiring
2389 * the cnode lock since we aren't pulling any data out of the cnode; instead, we're
2390 * going back to the catalog.
2391 */
2392 if ((VTOC(*vpp)->c_flag & C_HARDLINK) &&
2393 (hfs_lock(VTOC(*vpp), HFS_EXCLUSIVE_LOCK) == 0)) {
2394 cnode_t *cp = VTOC(*vpp);
2395 struct cat_desc cdesc;
2396
2397 if (!hfs_haslinkorigin(cp)) {
2398 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
2399 error = cat_findname(hfsmp, (cnid_t)ino, &cdesc);
2400 hfs_systemfile_unlock(hfsmp, lockflags);
2401 if (error == 0) {
2402 if ((cdesc.cd_parentcnid !=
2403 hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) &&
2404 (cdesc.cd_parentcnid !=
2405 hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid)) {
2406 hfs_savelinkorigin(cp, cdesc.cd_parentcnid);
2407 }
2408 cat_releasedesc(&cdesc);
2409 }
2410 }
2411 hfs_unlock(cp);
2412 }
2413 return (0);
2414 }
2415
2416
2417 /*
2418 * Look up an HFS object by ID.
2419 *
2420 * The object is returned with an iocount reference and the cnode locked.
2421 *
2422 * If the object is a file then it will represent the data fork.
2423 */
2424 __private_extern__
2425 int
2426 hfs_vget(struct hfsmount *hfsmp, cnid_t cnid, struct vnode **vpp, int skiplock)
2427 {
2428 struct vnode *vp = NULLVP;
2429 struct cat_desc cndesc;
2430 struct cat_attr cnattr;
2431 struct cat_fork cnfork;
2432 u_int32_t linkref = 0;
2433 int error;
2434
2435 /* Check for cnids that should't be exported. */
2436 if ((cnid < kHFSFirstUserCatalogNodeID) &&
2437 (cnid != kHFSRootFolderID && cnid != kHFSRootParentID)) {
2438 return (ENOENT);
2439 }
2440 /* Don't export our private directories. */
2441 if (cnid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid ||
2442 cnid == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) {
2443 return (ENOENT);
2444 }
2445 /*
2446 * Check the hash first
2447 */
2448 vp = hfs_chash_getvnode(hfsmp->hfs_raw_dev, cnid, 0, skiplock);
2449 if (vp) {
2450 *vpp = vp;
2451 return(0);
2452 }
2453
2454 bzero(&cndesc, sizeof(cndesc));
2455 bzero(&cnattr, sizeof(cnattr));
2456 bzero(&cnfork, sizeof(cnfork));
2457
2458 /*
2459 * Not in hash, lookup in catalog
2460 */
2461 if (cnid == kHFSRootParentID) {
2462 static char hfs_rootname[] = "/";
2463
2464 cndesc.cd_nameptr = (const u_int8_t *)&hfs_rootname[0];
2465 cndesc.cd_namelen = 1;
2466 cndesc.cd_parentcnid = kHFSRootParentID;
2467 cndesc.cd_cnid = kHFSRootFolderID;
2468 cndesc.cd_flags = CD_ISDIR;
2469
2470 cnattr.ca_fileid = kHFSRootFolderID;
2471 cnattr.ca_linkcount = 1;
2472 cnattr.ca_entries = 1;
2473 cnattr.ca_dircount = 1;
2474 cnattr.ca_mode = (S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO);
2475 } else {
2476 int lockflags;
2477 cnid_t pid;
2478 const char *nameptr;
2479
2480 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
2481 error = cat_idlookup(hfsmp, cnid, 0, &cndesc, &cnattr, &cnfork);
2482 hfs_systemfile_unlock(hfsmp, lockflags);
2483
2484 if (error) {
2485 *vpp = NULL;
2486 return (error);
2487 }
2488
2489 /*
2490 * Check for a raw hardlink inode and save its linkref.
2491 */
2492 pid = cndesc.cd_parentcnid;
2493 nameptr = (const char *)cndesc.cd_nameptr;
2494
2495 if ((pid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) &&
2496 (bcmp(nameptr, HFS_INODE_PREFIX, HFS_INODE_PREFIX_LEN) == 0)) {
2497 linkref = strtoul(&nameptr[HFS_INODE_PREFIX_LEN], NULL, 10);
2498
2499 } else if ((pid == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) &&
2500 (bcmp(nameptr, HFS_DIRINODE_PREFIX, HFS_DIRINODE_PREFIX_LEN) == 0)) {
2501 linkref = strtoul(&nameptr[HFS_DIRINODE_PREFIX_LEN], NULL, 10);
2502
2503 } else if ((pid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) &&
2504 (bcmp(nameptr, HFS_DELETE_PREFIX, HFS_DELETE_PREFIX_LEN) == 0)) {
2505 *vpp = NULL;
2506 cat_releasedesc(&cndesc);
2507 return (ENOENT); /* open unlinked file */
2508 }
2509 }
2510
2511 /*
2512 * Finish initializing cnode descriptor for hardlinks.
2513 *
2514 * We need a valid name and parent for reverse lookups.
2515 */
2516 if (linkref) {
2517 cnid_t nextlinkid;
2518 cnid_t prevlinkid;
2519 struct cat_desc linkdesc;
2520 int lockflags;
2521
2522 cnattr.ca_linkref = linkref;
2523
2524 /*
2525 * Pick up the first link in the chain and get a descriptor for it.
2526 * This allows blind volfs paths to work for hardlinks.
2527 */
2528 if ((hfs_lookuplink(hfsmp, linkref, &prevlinkid, &nextlinkid) == 0) &&
2529 (nextlinkid != 0)) {
2530 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
2531 error = cat_findname(hfsmp, nextlinkid, &linkdesc);
2532 hfs_systemfile_unlock(hfsmp, lockflags);
2533 if (error == 0) {
2534 cat_releasedesc(&cndesc);
2535 bcopy(&linkdesc, &cndesc, sizeof(linkdesc));
2536 }
2537 }
2538 }
2539
2540 if (linkref) {
2541 error = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, &cnfork, &vp);
2542 if (error == 0) {
2543 VTOC(vp)->c_flag |= C_HARDLINK;
2544 vnode_setmultipath(vp);
2545 }
2546 } else {
2547 struct componentname cn;
2548
2549 /* Supply hfs_getnewvnode with a component name. */
2550 MALLOC_ZONE(cn.cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
2551 cn.cn_nameiop = LOOKUP;
2552 cn.cn_flags = ISLASTCN | HASBUF;
2553 cn.cn_context = NULL;
2554 cn.cn_pnlen = MAXPATHLEN;
2555 cn.cn_nameptr = cn.cn_pnbuf;
2556 cn.cn_namelen = cndesc.cd_namelen;
2557 cn.cn_hash = 0;
2558 cn.cn_consume = 0;
2559 bcopy(cndesc.cd_nameptr, cn.cn_nameptr, cndesc.cd_namelen + 1);
2560
2561 error = hfs_getnewvnode(hfsmp, NULLVP, &cn, &cndesc, 0, &cnattr, &cnfork, &vp);
2562
2563 if ((error == 0) && (VTOC(vp)->c_flag & C_HARDLINK)) {
2564 hfs_savelinkorigin(VTOC(vp), cndesc.cd_parentcnid);
2565 }
2566 FREE_ZONE(cn.cn_pnbuf, cn.cn_pnlen, M_NAMEI);
2567 }
2568 cat_releasedesc(&cndesc);
2569
2570 *vpp = vp;
2571 if (vp && skiplock) {
2572 hfs_unlock(VTOC(vp));
2573 }
2574 return (error);
2575 }
2576
2577
2578 /*
2579 * Flush out all the files in a filesystem.
2580 */
2581 static int
2582 #if QUOTA
2583 hfs_flushfiles(struct mount *mp, int flags, struct proc *p)
2584 #else
2585 hfs_flushfiles(struct mount *mp, int flags, __unused struct proc *p)
2586 #endif /* QUOTA */
2587 {
2588 struct hfsmount *hfsmp;
2589 struct vnode *skipvp = NULLVP;
2590 int error;
2591 #if QUOTA
2592 int quotafilecnt;
2593 int i;
2594 #endif
2595
2596 hfsmp = VFSTOHFS(mp);
2597
2598 #if QUOTA
2599 /*
2600 * The open quota files have an indirect reference on
2601 * the root directory vnode. We must account for this
2602 * extra reference when doing the intial vflush.
2603 */
2604 quotafilecnt = 0;
2605 if (((unsigned int)vfs_flags(mp)) & MNT_QUOTA) {
2606
2607 /* Find out how many quota files we have open. */
2608 for (i = 0; i < MAXQUOTAS; i++) {
2609 if (hfsmp->hfs_qfiles[i].qf_vp != NULLVP)
2610 ++quotafilecnt;
2611 }
2612
2613 /* Obtain the root vnode so we can skip over it. */
2614 skipvp = hfs_chash_getvnode(hfsmp->hfs_raw_dev, kHFSRootFolderID, 0, 0);
2615 }
2616 #endif /* QUOTA */
2617
2618 error = vflush(mp, skipvp, SKIPSYSTEM | SKIPSWAP | flags);
2619 if (error != 0)
2620 return(error);
2621
2622 error = vflush(mp, skipvp, SKIPSYSTEM | flags);
2623
2624 #if QUOTA
2625 if (((unsigned int)vfs_flags(mp)) & MNT_QUOTA) {
2626 if (skipvp) {
2627 /*
2628 * See if there are additional references on the
2629 * root vp besides the ones obtained from the open
2630 * quota files and the hfs_chash_getvnode call above.
2631 */
2632 if ((error == 0) &&
2633 (vnode_isinuse(skipvp, quotafilecnt))) {
2634 error = EBUSY; /* root directory is still open */
2635 }
2636 hfs_unlock(VTOC(skipvp));
2637 vnode_put(skipvp);
2638 }
2639 if (error && (flags & FORCECLOSE) == 0)
2640 return (error);
2641
2642 for (i = 0; i < MAXQUOTAS; i++) {
2643 if (hfsmp->hfs_qfiles[i].qf_vp == NULLVP)
2644 continue;
2645 hfs_quotaoff(p, mp, i);
2646 }
2647 error = vflush(mp, NULLVP, SKIPSYSTEM | flags);
2648 }
2649 #endif /* QUOTA */
2650
2651 return (error);
2652 }
2653
2654 /*
2655 * Update volume encoding bitmap (HFS Plus only)
2656 */
2657 __private_extern__
2658 void
2659 hfs_setencodingbits(struct hfsmount *hfsmp, u_int32_t encoding)
2660 {
2661 #define kIndexMacUkrainian 48 /* MacUkrainian encoding is 152 */
2662 #define kIndexMacFarsi 49 /* MacFarsi encoding is 140 */
2663
2664 u_int32_t index;
2665
2666 switch (encoding) {
2667 case kTextEncodingMacUkrainian:
2668 index = kIndexMacUkrainian;
2669 break;
2670 case kTextEncodingMacFarsi:
2671 index = kIndexMacFarsi;
2672 break;
2673 default:
2674 index = encoding;
2675 break;
2676 }
2677
2678 if (index < 64 && (hfsmp->encodingsBitmap & (u_int64_t)(1ULL << index)) == 0) {
2679 HFS_MOUNT_LOCK(hfsmp, TRUE)
2680 hfsmp->encodingsBitmap |= (u_int64_t)(1ULL << index);
2681 MarkVCBDirty(hfsmp);
2682 HFS_MOUNT_UNLOCK(hfsmp, TRUE);
2683 }
2684 }
2685
2686 /*
2687 * Update volume stats
2688 *
2689 * On journal volumes this will cause a volume header flush
2690 */
2691 __private_extern__
2692 int
2693 hfs_volupdate(struct hfsmount *hfsmp, enum volop op, int inroot)
2694 {
2695 struct timeval tv;
2696
2697 microtime(&tv);
2698
2699 lck_mtx_lock(&hfsmp->hfs_mutex);
2700
2701 MarkVCBDirty(hfsmp);
2702 hfsmp->hfs_mtime = tv.tv_sec;
2703
2704 switch (op) {
2705 case VOL_UPDATE:
2706 break;
2707 case VOL_MKDIR:
2708 if (hfsmp->hfs_dircount != 0xFFFFFFFF)
2709 ++hfsmp->hfs_dircount;
2710 if (inroot && hfsmp->vcbNmRtDirs != 0xFFFF)
2711 ++hfsmp->vcbNmRtDirs;
2712 break;
2713 case VOL_RMDIR:
2714 if (hfsmp->hfs_dircount != 0)
2715 --hfsmp->hfs_dircount;
2716 if (inroot && hfsmp->vcbNmRtDirs != 0xFFFF)
2717 --hfsmp->vcbNmRtDirs;
2718 break;
2719 case VOL_MKFILE:
2720 if (hfsmp->hfs_filecount != 0xFFFFFFFF)
2721 ++hfsmp->hfs_filecount;
2722 if (inroot && hfsmp->vcbNmFls != 0xFFFF)
2723 ++hfsmp->vcbNmFls;
2724 break;
2725 case VOL_RMFILE:
2726 if (hfsmp->hfs_filecount != 0)
2727 --hfsmp->hfs_filecount;
2728 if (inroot && hfsmp->vcbNmFls != 0xFFFF)
2729 --hfsmp->vcbNmFls;
2730 break;
2731 }
2732
2733 lck_mtx_unlock(&hfsmp->hfs_mutex);
2734
2735 if (hfsmp->jnl) {
2736 hfs_flushvolumeheader(hfsmp, 0, 0);
2737 }
2738
2739 return (0);
2740 }
2741
2742
2743 static int
2744 hfs_flushMDB(struct hfsmount *hfsmp, int waitfor, int altflush)
2745 {
2746 ExtendedVCB *vcb = HFSTOVCB(hfsmp);
2747 struct filefork *fp;
2748 HFSMasterDirectoryBlock *mdb;
2749 struct buf *bp = NULL;
2750 int retval;
2751 int sectorsize;
2752 ByteCount namelen;
2753
2754 sectorsize = hfsmp->hfs_logical_block_size;
2755 retval = (int)buf_bread(hfsmp->hfs_devvp, (daddr64_t)HFS_PRI_SECTOR(sectorsize), sectorsize, NOCRED, &bp);
2756 if (retval) {
2757 if (bp)
2758 buf_brelse(bp);
2759 return retval;
2760 }
2761
2762 lck_mtx_lock(&hfsmp->hfs_mutex);
2763
2764 mdb = (HFSMasterDirectoryBlock *)(buf_dataptr(bp) + HFS_PRI_OFFSET(sectorsize));
2765
2766 mdb->drCrDate = SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->vcbCrDate)));
2767 mdb->drLsMod = SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->vcbLsMod)));
2768 mdb->drAtrb = SWAP_BE16 (vcb->vcbAtrb);
2769 mdb->drNmFls = SWAP_BE16 (vcb->vcbNmFls);
2770 mdb->drAllocPtr = SWAP_BE16 (vcb->nextAllocation);
2771 mdb->drClpSiz = SWAP_BE32 (vcb->vcbClpSiz);
2772 mdb->drNxtCNID = SWAP_BE32 (vcb->vcbNxtCNID);
2773 mdb->drFreeBks = SWAP_BE16 (vcb->freeBlocks);
2774
2775 namelen = strlen((char *)vcb->vcbVN);
2776 retval = utf8_to_hfs(vcb, namelen, vcb->vcbVN, mdb->drVN);
2777 /* Retry with MacRoman in case that's how it was exported. */
2778 if (retval)
2779 retval = utf8_to_mac_roman(namelen, vcb->vcbVN, mdb->drVN);
2780
2781 mdb->drVolBkUp = SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->vcbVolBkUp)));
2782 mdb->drWrCnt = SWAP_BE32 (vcb->vcbWrCnt);
2783 mdb->drNmRtDirs = SWAP_BE16 (vcb->vcbNmRtDirs);
2784 mdb->drFilCnt = SWAP_BE32 (vcb->vcbFilCnt);
2785 mdb->drDirCnt = SWAP_BE32 (vcb->vcbDirCnt);
2786
2787 bcopy(vcb->vcbFndrInfo, mdb->drFndrInfo, sizeof(mdb->drFndrInfo));
2788
2789 fp = VTOF(vcb->extentsRefNum);
2790 mdb->drXTExtRec[0].startBlock = SWAP_BE16 (fp->ff_extents[0].startBlock);
2791 mdb->drXTExtRec[0].blockCount = SWAP_BE16 (fp->ff_extents[0].blockCount);
2792 mdb->drXTExtRec[1].startBlock = SWAP_BE16 (fp->ff_extents[1].startBlock);
2793 mdb->drXTExtRec[1].blockCount = SWAP_BE16 (fp->ff_extents[1].blockCount);
2794 mdb->drXTExtRec[2].startBlock = SWAP_BE16 (fp->ff_extents[2].startBlock);
2795 mdb->drXTExtRec[2].blockCount = SWAP_BE16 (fp->ff_extents[2].blockCount);
2796 mdb->drXTFlSize = SWAP_BE32 (fp->ff_blocks * vcb->blockSize);
2797 mdb->drXTClpSiz = SWAP_BE32 (fp->ff_clumpsize);
2798 FTOC(fp)->c_flag &= ~C_MODIFIED;
2799
2800 fp = VTOF(vcb->catalogRefNum);
2801 mdb->drCTExtRec[0].startBlock = SWAP_BE16 (fp->ff_extents[0].startBlock);
2802 mdb->drCTExtRec[0].blockCount = SWAP_BE16 (fp->ff_extents[0].blockCount);
2803 mdb->drCTExtRec[1].startBlock = SWAP_BE16 (fp->ff_extents[1].startBlock);
2804 mdb->drCTExtRec[1].blockCount = SWAP_BE16 (fp->ff_extents[1].blockCount);
2805 mdb->drCTExtRec[2].startBlock = SWAP_BE16 (fp->ff_extents[2].startBlock);
2806 mdb->drCTExtRec[2].blockCount = SWAP_BE16 (fp->ff_extents[2].blockCount);
2807 mdb->drCTFlSize = SWAP_BE32 (fp->ff_blocks * vcb->blockSize);
2808 mdb->drCTClpSiz = SWAP_BE32 (fp->ff_clumpsize);
2809 FTOC(fp)->c_flag &= ~C_MODIFIED;
2810
2811 MarkVCBClean( vcb );
2812
2813 lck_mtx_unlock(&hfsmp->hfs_mutex);
2814
2815 /* If requested, flush out the alternate MDB */
2816 if (altflush) {
2817 struct buf *alt_bp = NULL;
2818
2819 if (buf_meta_bread(hfsmp->hfs_devvp, hfsmp->hfs_alt_id_sector, sectorsize, NOCRED, &alt_bp) == 0) {
2820 bcopy(mdb, (char *)buf_dataptr(alt_bp) + HFS_ALT_OFFSET(sectorsize), kMDBSize);
2821
2822 (void) VNOP_BWRITE(alt_bp);
2823 } else if (alt_bp)
2824 buf_brelse(alt_bp);
2825 }
2826
2827 if (waitfor != MNT_WAIT)
2828 buf_bawrite(bp);
2829 else
2830 retval = VNOP_BWRITE(bp);
2831
2832 return (retval);
2833 }
2834
2835 /*
2836 * Flush any dirty in-memory mount data to the on-disk
2837 * volume header.
2838 *
2839 * Note: the on-disk volume signature is intentionally
2840 * not flushed since the on-disk "H+" and "HX" signatures
2841 * are always stored in-memory as "H+".
2842 */
2843 __private_extern__
2844 int
2845 hfs_flushvolumeheader(struct hfsmount *hfsmp, int waitfor, int altflush)
2846 {
2847 ExtendedVCB *vcb = HFSTOVCB(hfsmp);
2848 struct filefork *fp;
2849 HFSPlusVolumeHeader *volumeHeader;
2850 int retval;
2851 struct buf *bp;
2852 int i;
2853 daddr64_t priIDSector;
2854 int critical;
2855 u_int16_t signature;
2856 u_int16_t hfsversion;
2857
2858 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
2859 return(0);
2860 }
2861 if (hfsmp->hfs_flags & HFS_STANDARD) {
2862 return hfs_flushMDB(hfsmp, waitfor, altflush);
2863 }
2864 critical = altflush;
2865 priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
2866 HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size));
2867
2868 if (hfs_start_transaction(hfsmp) != 0) {
2869 return EINVAL;
2870 }
2871
2872 retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
2873 HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys),
2874 hfsmp->hfs_physical_block_size, NOCRED, &bp);
2875 if (retval) {
2876 if (bp)
2877 buf_brelse(bp);
2878
2879 hfs_end_transaction(hfsmp);
2880
2881 printf("HFS: err %d reading VH blk (%s)\n", retval, vcb->vcbVN);
2882 return (retval);
2883 }
2884
2885 if (hfsmp->jnl) {
2886 journal_modify_block_start(hfsmp->jnl, bp);
2887 }
2888
2889 volumeHeader = (HFSPlusVolumeHeader *)((char *)buf_dataptr(bp) +
2890 HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size));
2891
2892 /*
2893 * Sanity check what we just read.
2894 */
2895 signature = SWAP_BE16 (volumeHeader->signature);
2896 hfsversion = SWAP_BE16 (volumeHeader->version);
2897 if ((signature != kHFSPlusSigWord && signature != kHFSXSigWord) ||
2898 (hfsversion < kHFSPlusVersion) || (hfsversion > 100) ||
2899 (SWAP_BE32 (volumeHeader->blockSize) != vcb->blockSize)) {
2900 #if 1
2901 panic("HFS: corrupt VH on %s, sig 0x%04x, ver %d, blksize %d",
2902 vcb->vcbVN, signature, hfsversion,
2903 SWAP_BE32 (volumeHeader->blockSize));
2904 #endif
2905 printf("HFS: corrupt VH blk (%s)\n", vcb->vcbVN);
2906 buf_brelse(bp);
2907 return (EIO);
2908 }
2909
2910 /*
2911 * For embedded HFS+ volumes, update create date if it changed
2912 * (ie from a setattrlist call)
2913 */
2914 if ((vcb->hfsPlusIOPosOffset != 0) &&
2915 (SWAP_BE32 (volumeHeader->createDate) != vcb->localCreateDate)) {
2916 struct buf *bp2;
2917 HFSMasterDirectoryBlock *mdb;
2918
2919 retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
2920 HFS_PHYSBLK_ROUNDDOWN(HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size), hfsmp->hfs_log_per_phys),
2921 hfsmp->hfs_physical_block_size, NOCRED, &bp2);
2922 if (retval) {
2923 if (bp2)
2924 buf_brelse(bp2);
2925 retval = 0;
2926 } else {
2927 mdb = (HFSMasterDirectoryBlock *)(buf_dataptr(bp2) +
2928 HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size));
2929
2930 if ( SWAP_BE32 (mdb->drCrDate) != vcb->localCreateDate )
2931 {
2932 if (hfsmp->jnl) {
2933 journal_modify_block_start(hfsmp->jnl, bp2);
2934 }
2935
2936 mdb->drCrDate = SWAP_BE32 (vcb->localCreateDate); /* pick up the new create date */
2937
2938 if (hfsmp->jnl) {
2939 journal_modify_block_end(hfsmp->jnl, bp2, NULL, NULL);
2940 } else {
2941 (void) VNOP_BWRITE(bp2); /* write out the changes */
2942 }
2943 }
2944 else
2945 {
2946 buf_brelse(bp2); /* just release it */
2947 }
2948 }
2949 }
2950
2951 lck_mtx_lock(&hfsmp->hfs_mutex);
2952
2953 /* Note: only update the lower 16 bits worth of attributes */
2954 volumeHeader->attributes = SWAP_BE32 (vcb->vcbAtrb);
2955 volumeHeader->journalInfoBlock = SWAP_BE32 (vcb->vcbJinfoBlock);
2956 if (hfsmp->jnl) {
2957 volumeHeader->lastMountedVersion = SWAP_BE32 (kHFSJMountVersion);
2958 } else {
2959 volumeHeader->lastMountedVersion = SWAP_BE32 (kHFSPlusMountVersion);
2960 }
2961 volumeHeader->createDate = SWAP_BE32 (vcb->localCreateDate); /* volume create date is in local time */
2962 volumeHeader->modifyDate = SWAP_BE32 (to_hfs_time(vcb->vcbLsMod));
2963 volumeHeader->backupDate = SWAP_BE32 (to_hfs_time(vcb->vcbVolBkUp));
2964 volumeHeader->fileCount = SWAP_BE32 (vcb->vcbFilCnt);
2965 volumeHeader->folderCount = SWAP_BE32 (vcb->vcbDirCnt);
2966 volumeHeader->totalBlocks = SWAP_BE32 (vcb->totalBlocks);
2967 volumeHeader->freeBlocks = SWAP_BE32 (vcb->freeBlocks);
2968 volumeHeader->nextAllocation = SWAP_BE32 (vcb->nextAllocation);
2969 volumeHeader->rsrcClumpSize = SWAP_BE32 (vcb->vcbClpSiz);
2970 volumeHeader->dataClumpSize = SWAP_BE32 (vcb->vcbClpSiz);
2971 volumeHeader->nextCatalogID = SWAP_BE32 (vcb->vcbNxtCNID);
2972 volumeHeader->writeCount = SWAP_BE32 (vcb->vcbWrCnt);
2973 volumeHeader->encodingsBitmap = SWAP_BE64 (vcb->encodingsBitmap);
2974
2975 if (bcmp(vcb->vcbFndrInfo, volumeHeader->finderInfo, sizeof(volumeHeader->finderInfo)) != 0) {
2976 bcopy(vcb->vcbFndrInfo, volumeHeader->finderInfo, sizeof(volumeHeader->finderInfo));
2977 critical = 1;
2978 }
2979
2980 /*
2981 * System files are only dirty when altflush is set.
2982 */
2983 if (altflush == 0) {
2984 goto done;
2985 }
2986
2987 /* Sync Extents over-flow file meta data */
2988 fp = VTOF(vcb->extentsRefNum);
2989 if (FTOC(fp)->c_flag & C_MODIFIED) {
2990 for (i = 0; i < kHFSPlusExtentDensity; i++) {
2991 volumeHeader->extentsFile.extents[i].startBlock =
2992 SWAP_BE32 (fp->ff_extents[i].startBlock);
2993 volumeHeader->extentsFile.extents[i].blockCount =
2994 SWAP_BE32 (fp->ff_extents[i].blockCount);
2995 }
2996 volumeHeader->extentsFile.logicalSize = SWAP_BE64 (fp->ff_size);
2997 volumeHeader->extentsFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
2998 volumeHeader->extentsFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize);
2999 FTOC(fp)->c_flag &= ~C_MODIFIED;
3000 }
3001
3002 /* Sync Catalog file meta data */
3003 fp = VTOF(vcb->catalogRefNum);
3004 if (FTOC(fp)->c_flag & C_MODIFIED) {
3005 for (i = 0; i < kHFSPlusExtentDensity; i++) {
3006 volumeHeader->catalogFile.extents[i].startBlock =
3007 SWAP_BE32 (fp->ff_extents[i].startBlock);
3008 volumeHeader->catalogFile.extents[i].blockCount =
3009 SWAP_BE32 (fp->ff_extents[i].blockCount);
3010 }
3011 volumeHeader->catalogFile.logicalSize = SWAP_BE64 (fp->ff_size);
3012 volumeHeader->catalogFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
3013 volumeHeader->catalogFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize);
3014 FTOC(fp)->c_flag &= ~C_MODIFIED;
3015 }
3016
3017 /* Sync Allocation file meta data */
3018 fp = VTOF(vcb->allocationsRefNum);
3019 if (FTOC(fp)->c_flag & C_MODIFIED) {
3020 for (i = 0; i < kHFSPlusExtentDensity; i++) {
3021 volumeHeader->allocationFile.extents[i].startBlock =
3022 SWAP_BE32 (fp->ff_extents[i].startBlock);
3023 volumeHeader->allocationFile.extents[i].blockCount =
3024 SWAP_BE32 (fp->ff_extents[i].blockCount);
3025 }
3026 volumeHeader->allocationFile.logicalSize = SWAP_BE64 (fp->ff_size);
3027 volumeHeader->allocationFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
3028 volumeHeader->allocationFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize);
3029 FTOC(fp)->c_flag &= ~C_MODIFIED;
3030 }
3031
3032 /* Sync Attribute file meta data */
3033 if (hfsmp->hfs_attribute_vp) {
3034 fp = VTOF(hfsmp->hfs_attribute_vp);
3035 for (i = 0; i < kHFSPlusExtentDensity; i++) {
3036 volumeHeader->attributesFile.extents[i].startBlock =
3037 SWAP_BE32 (fp->ff_extents[i].startBlock);
3038 volumeHeader->attributesFile.extents[i].blockCount =
3039 SWAP_BE32 (fp->ff_extents[i].blockCount);
3040 }
3041 FTOC(fp)->c_flag &= ~C_MODIFIED;
3042 volumeHeader->attributesFile.logicalSize = SWAP_BE64 (fp->ff_size);
3043 volumeHeader->attributesFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
3044 volumeHeader->attributesFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize);
3045 }
3046
3047 /* Sync Startup file meta data */
3048 if (hfsmp->hfs_startup_vp) {
3049 fp = VTOF(hfsmp->hfs_startup_vp);
3050 if (FTOC(fp)->c_flag & C_MODIFIED) {
3051 for (i = 0; i < kHFSPlusExtentDensity; i++) {
3052 volumeHeader->startupFile.extents[i].startBlock =
3053 SWAP_BE32 (fp->ff_extents[i].startBlock);
3054 volumeHeader->startupFile.extents[i].blockCount =
3055 SWAP_BE32 (fp->ff_extents[i].blockCount);
3056 }
3057 volumeHeader->startupFile.logicalSize = SWAP_BE64 (fp->ff_size);
3058 volumeHeader->startupFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
3059 volumeHeader->startupFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize);
3060 FTOC(fp)->c_flag &= ~C_MODIFIED;
3061 }
3062 }
3063
3064 done:
3065 MarkVCBClean(hfsmp);
3066 lck_mtx_unlock(&hfsmp->hfs_mutex);
3067
3068 /* If requested, flush out the alternate volume header */
3069 if (altflush && hfsmp->hfs_alt_id_sector) {
3070 struct buf *alt_bp = NULL;
3071
3072 if (buf_meta_bread(hfsmp->hfs_devvp,
3073 HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_alt_id_sector, hfsmp->hfs_log_per_phys),
3074 hfsmp->hfs_physical_block_size, NOCRED, &alt_bp) == 0) {
3075 if (hfsmp->jnl) {
3076 journal_modify_block_start(hfsmp->jnl, alt_bp);
3077 }
3078
3079 bcopy(volumeHeader, (char *)buf_dataptr(alt_bp) +
3080 HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size),
3081 kMDBSize);
3082
3083 if (hfsmp->jnl) {
3084 journal_modify_block_end(hfsmp->jnl, alt_bp, NULL, NULL);
3085 } else {
3086 (void) VNOP_BWRITE(alt_bp);
3087 }
3088 } else if (alt_bp)
3089 buf_brelse(alt_bp);
3090 }
3091
3092 if (hfsmp->jnl) {
3093 journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL);
3094 } else {
3095 if (waitfor != MNT_WAIT)
3096 buf_bawrite(bp);
3097 else {
3098 retval = VNOP_BWRITE(bp);
3099 /* When critical data changes, flush the device cache */
3100 if (critical && (retval == 0)) {
3101 (void) VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE,
3102 NULL, FWRITE, NULL);
3103 }
3104 }
3105 }
3106 hfs_end_transaction(hfsmp);
3107
3108 return (retval);
3109 }
3110
3111
3112 /*
3113 * Extend a file system.
3114 */
3115 __private_extern__
3116 int
3117 hfs_extendfs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context)
3118 {
3119 struct proc *p = vfs_context_proc(context);
3120 kauth_cred_t cred = vfs_context_ucred(context);
3121 struct vnode *vp;
3122 struct vnode *devvp;
3123 struct buf *bp;
3124 struct filefork *fp = NULL;
3125 ExtendedVCB *vcb;
3126 struct cat_fork forkdata;
3127 u_int64_t oldsize;
3128 u_int64_t newblkcnt;
3129 u_int64_t prev_phys_block_count;
3130 u_int32_t addblks;
3131 u_int64_t sectorcnt;
3132 u_int32_t sectorsize;
3133 u_int32_t phys_sectorsize;
3134 daddr64_t prev_alt_sector;
3135 daddr_t bitmapblks;
3136 int lockflags;
3137 int error;
3138 int64_t oldBitmapSize;
3139 Boolean usedExtendFileC = false;
3140
3141 devvp = hfsmp->hfs_devvp;
3142 vcb = HFSTOVCB(hfsmp);
3143
3144 /*
3145 * - HFS Plus file systems only.
3146 * - Journaling must be enabled.
3147 * - No embedded volumes.
3148 */
3149 if ((vcb->vcbSigWord == kHFSSigWord) ||
3150 (hfsmp->jnl == NULL) ||
3151 (vcb->hfsPlusIOPosOffset != 0)) {
3152 return (EPERM);
3153 }
3154 /*
3155 * If extending file system by non-root, then verify
3156 * ownership and check permissions.
3157 */
3158 if (suser(cred, NULL)) {
3159 error = hfs_vget(hfsmp, kHFSRootFolderID, &vp, 0);
3160
3161 if (error)
3162 return (error);
3163 error = hfs_owner_rights(hfsmp, VTOC(vp)->c_uid, cred, p, 0);
3164 if (error == 0) {
3165 error = hfs_write_access(vp, cred, p, false);
3166 }
3167 hfs_unlock(VTOC(vp));
3168 vnode_put(vp);
3169 if (error)
3170 return (error);
3171
3172 error = vnode_authorize(devvp, NULL, KAUTH_VNODE_READ_DATA | KAUTH_VNODE_WRITE_DATA, context);
3173 if (error)
3174 return (error);
3175 }
3176 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, (caddr_t)&sectorsize, 0, context)) {
3177 return (ENXIO);
3178 }
3179 if (sectorsize != hfsmp->hfs_logical_block_size) {
3180 return (ENXIO);
3181 }
3182 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&sectorcnt, 0, context)) {
3183 return (ENXIO);
3184 }
3185 if ((sectorsize * sectorcnt) < newsize) {
3186 printf("hfs_extendfs: not enough space on device\n");
3187 return (ENOSPC);
3188 }
3189 error = VNOP_IOCTL(devvp, DKIOCGETPHYSICALBLOCKSIZE, (caddr_t)&phys_sectorsize, 0, context);
3190 if (error) {
3191 if ((error != ENOTSUP) && (error != ENOTTY)) {
3192 return (ENXIO);
3193 }
3194 /* If ioctl is not supported, force physical and logical sector size to be same */
3195 phys_sectorsize = sectorsize;
3196 }
3197 oldsize = (u_int64_t)hfsmp->totalBlocks * (u_int64_t)hfsmp->blockSize;
3198
3199 /*
3200 * Validate new size.
3201 */
3202 if ((newsize <= oldsize) || (newsize % sectorsize) || (newsize % phys_sectorsize)) {
3203 printf("hfs_extendfs: invalid size\n");
3204 return (EINVAL);
3205 }
3206 newblkcnt = newsize / vcb->blockSize;
3207 if (newblkcnt > (u_int64_t)0xFFFFFFFF)
3208 return (EOVERFLOW);
3209
3210 addblks = newblkcnt - vcb->totalBlocks;
3211
3212 printf("hfs_extendfs: growing %s by %d blocks\n", vcb->vcbVN, addblks);
3213 /*
3214 * Enclose changes inside a transaction.
3215 */
3216 if (hfs_start_transaction(hfsmp) != 0) {
3217 return (EINVAL);
3218 }
3219
3220 /*
3221 * Note: we take the attributes lock in case we have an attribute data vnode
3222 * which needs to change size.
3223 */
3224 lockflags = hfs_systemfile_lock(hfsmp, SFL_ATTRIBUTE | SFL_EXTENTS | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
3225 vp = vcb->allocationsRefNum;
3226 fp = VTOF(vp);
3227 bcopy(&fp->ff_data, &forkdata, sizeof(forkdata));
3228
3229 /*
3230 * Calculate additional space required (if any) by allocation bitmap.
3231 */
3232 oldBitmapSize = fp->ff_size;
3233 bitmapblks = roundup((newblkcnt+7) / 8, vcb->vcbVBMIOSize) / vcb->blockSize;
3234 if (bitmapblks > (daddr_t)fp->ff_blocks)
3235 bitmapblks -= fp->ff_blocks;
3236 else
3237 bitmapblks = 0;
3238
3239 if (bitmapblks > 0) {
3240 daddr64_t blkno;
3241 daddr_t blkcnt;
3242 off_t bytesAdded;
3243
3244 /*
3245 * Get the bitmap's current size (in allocation blocks) so we know
3246 * where to start zero filling once the new space is added. We've
3247 * got to do this before the bitmap is grown.
3248 */
3249 blkno = (daddr64_t)fp->ff_blocks;
3250
3251 /*
3252 * Try to grow the allocation file in the normal way, using allocation
3253 * blocks already existing in the file system. This way, we might be
3254 * able to grow the bitmap contiguously, or at least in the metadata
3255 * zone.
3256 */
3257 error = ExtendFileC(vcb, fp, bitmapblks * vcb->blockSize, 0,
3258 kEFAllMask | kEFNoClumpMask | kEFReserveMask | kEFMetadataMask,
3259 &bytesAdded);
3260
3261 if (error == 0) {
3262 usedExtendFileC = true;
3263 } else {
3264 /*
3265 * If the above allocation failed, fall back to allocating the new
3266 * extent of the bitmap from the space we're going to add. Since those
3267 * blocks don't yet belong to the file system, we have to update the
3268 * extent list directly, and manually adjust the file size.
3269 */
3270 bytesAdded = 0;
3271 error = AddFileExtent(vcb, fp, vcb->totalBlocks, bitmapblks);
3272 if (error) {
3273 printf("hfs_extendfs: error %d adding extents\n", error);
3274 goto out;
3275 }
3276 fp->ff_blocks += bitmapblks;
3277 VTOC(vp)->c_blocks = fp->ff_blocks;
3278 VTOC(vp)->c_flag |= C_MODIFIED;
3279 }
3280
3281 /*
3282 * Update the allocation file's size to include the newly allocated
3283 * blocks. Note that ExtendFileC doesn't do this, which is why this
3284 * statement is outside the above "if" statement.
3285 */
3286 fp->ff_size += (u_int64_t)bitmapblks * (u_int64_t)vcb->blockSize;
3287
3288 /*
3289 * Zero out the new bitmap blocks.
3290 */
3291 {
3292
3293 bp = NULL;
3294 blkcnt = bitmapblks;
3295 while (blkcnt > 0) {
3296 error = (int)buf_meta_bread(vp, blkno, vcb->blockSize, NOCRED, &bp);
3297 if (error) {
3298 if (bp) {
3299 buf_brelse(bp);
3300 }
3301 break;
3302 }
3303 bzero((char *)buf_dataptr(bp), vcb->blockSize);
3304 buf_markaged(bp);
3305 error = (int)buf_bwrite(bp);
3306 if (error)
3307 break;
3308 --blkcnt;
3309 ++blkno;
3310 }
3311 }
3312 if (error) {
3313 printf("hfs_extendfs: error %d clearing blocks\n", error);
3314 goto out;
3315 }
3316 /*
3317 * Mark the new bitmap space as allocated.
3318 *
3319 * Note that ExtendFileC will have marked any blocks it allocated, so
3320 * this is only needed if we used AddFileExtent. Also note that this
3321 * has to come *after* the zero filling of new blocks in the case where
3322 * we used AddFileExtent (since the part of the bitmap we're touching
3323 * is in those newly allocated blocks).
3324 */
3325 if (!usedExtendFileC) {
3326 error = BlockMarkAllocated(vcb, vcb->totalBlocks, bitmapblks);
3327 if (error) {
3328 printf("hfs_extendfs: error %d setting bitmap\n", error);
3329 goto out;
3330 }
3331 vcb->freeBlocks -= bitmapblks;
3332 }
3333 }
3334 /*
3335 * Mark the new alternate VH as allocated.
3336 */
3337 if (vcb->blockSize == 512)
3338 error = BlockMarkAllocated(vcb, vcb->totalBlocks + addblks - 2, 2);
3339 else
3340 error = BlockMarkAllocated(vcb, vcb->totalBlocks + addblks - 1, 1);
3341 if (error) {
3342 printf("hfs_extendfs: error %d setting bitmap (VH)\n", error);
3343 goto out;
3344 }
3345 /*
3346 * Mark the old alternate VH as free.
3347 */
3348 if (vcb->blockSize == 512)
3349 (void) BlockMarkFree(vcb, vcb->totalBlocks - 2, 2);
3350 else
3351 (void) BlockMarkFree(vcb, vcb->totalBlocks - 1, 1);
3352 /*
3353 * Adjust file system variables for new space.
3354 */
3355 prev_phys_block_count = hfsmp->hfs_logical_block_count;
3356 prev_alt_sector = hfsmp->hfs_alt_id_sector;
3357
3358 vcb->totalBlocks += addblks;
3359 vcb->freeBlocks += addblks;
3360 hfsmp->hfs_logical_block_count = newsize / sectorsize;
3361 hfsmp->hfs_alt_id_sector = (hfsmp->hfsPlusIOPosOffset / sectorsize) +
3362 HFS_ALT_SECTOR(sectorsize, hfsmp->hfs_logical_block_count);
3363 MarkVCBDirty(vcb);
3364 error = hfs_flushvolumeheader(hfsmp, MNT_WAIT, HFS_ALTFLUSH);
3365 if (error) {
3366 printf("hfs_extendfs: couldn't flush volume headers (%d)", error);
3367 /*
3368 * Restore to old state.
3369 */
3370 if (usedExtendFileC) {
3371 (void) TruncateFileC(vcb, fp, oldBitmapSize, false);
3372 } else {
3373 fp->ff_blocks -= bitmapblks;
3374 fp->ff_size -= (u_int64_t)bitmapblks * (u_int64_t)vcb->blockSize;
3375 /*
3376 * No need to mark the excess blocks free since those bitmap blocks
3377 * are no longer part of the bitmap. But we do need to undo the
3378 * effect of the "vcb->freeBlocks -= bitmapblks" above.
3379 */
3380 vcb->freeBlocks += bitmapblks;
3381 }
3382 vcb->totalBlocks -= addblks;
3383 vcb->freeBlocks -= addblks;
3384 hfsmp->hfs_logical_block_count = prev_phys_block_count;
3385 hfsmp->hfs_alt_id_sector = prev_alt_sector;
3386 MarkVCBDirty(vcb);
3387 if (vcb->blockSize == 512)
3388 (void) BlockMarkAllocated(vcb, vcb->totalBlocks - 2, 2);
3389 else
3390 (void) BlockMarkAllocated(vcb, vcb->totalBlocks - 1, 1);
3391 goto out;
3392 }
3393 /*
3394 * Invalidate the old alternate volume header.
3395 */
3396 bp = NULL;
3397 if (prev_alt_sector) {
3398 if (buf_meta_bread(hfsmp->hfs_devvp,
3399 HFS_PHYSBLK_ROUNDDOWN(prev_alt_sector, hfsmp->hfs_log_per_phys),
3400 hfsmp->hfs_physical_block_size, NOCRED, &bp) == 0) {
3401 journal_modify_block_start(hfsmp->jnl, bp);
3402
3403 bzero((char *)buf_dataptr(bp) + HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size), kMDBSize);
3404
3405 journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL);
3406 } else if (bp) {
3407 buf_brelse(bp);
3408 }
3409 }
3410
3411 /*
3412 * TODO: Adjust the size of the metadata zone based on new volume size?
3413 */
3414
3415 /*
3416 * Adjust the size of hfsmp->hfs_attrdata_vp
3417 */
3418 if (hfsmp->hfs_attrdata_vp) {
3419 struct cnode *attr_cp;
3420 struct filefork *attr_fp;
3421
3422 if (vnode_get(hfsmp->hfs_attrdata_vp) == 0) {
3423 attr_cp = VTOC(hfsmp->hfs_attrdata_vp);
3424 attr_fp = VTOF(hfsmp->hfs_attrdata_vp);
3425
3426 attr_cp->c_blocks = newblkcnt;
3427 attr_fp->ff_blocks = newblkcnt;
3428 attr_fp->ff_extents[0].blockCount = newblkcnt;
3429 attr_fp->ff_size = (off_t) newblkcnt * hfsmp->blockSize;
3430 ubc_setsize(hfsmp->hfs_attrdata_vp, attr_fp->ff_size);
3431 vnode_put(hfsmp->hfs_attrdata_vp);
3432 }
3433 }
3434
3435 out:
3436 if (error && fp) {
3437 /* Restore allocation fork. */
3438 bcopy(&forkdata, &fp->ff_data, sizeof(forkdata));
3439 VTOC(vp)->c_blocks = fp->ff_blocks;
3440
3441 }
3442 /*
3443 Regardless of whether or not the totalblocks actually increased,
3444 we should reset the allocLimit field. If it changed, it will
3445 get updated; if not, it will remain the same.
3446 */
3447 hfsmp->allocLimit = vcb->totalBlocks;
3448 hfs_systemfile_unlock(hfsmp, lockflags);
3449 hfs_end_transaction(hfsmp);
3450
3451 return (error);
3452 }
3453
3454 #define HFS_MIN_SIZE (32LL * 1024LL * 1024LL)
3455
3456 /*
3457 * Truncate a file system (while still mounted).
3458 */
3459 __private_extern__
3460 int
3461 hfs_truncatefs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context)
3462 {
3463 struct buf *bp = NULL;
3464 u_int64_t oldsize;
3465 u_int32_t newblkcnt;
3466 u_int32_t reclaimblks = 0;
3467 int lockflags = 0;
3468 int transaction_begun = 0;
3469 int error;
3470
3471 lck_mtx_lock(&hfsmp->hfs_mutex);
3472 if (hfsmp->hfs_flags & HFS_RESIZE_IN_PROGRESS) {
3473 lck_mtx_unlock(&hfsmp->hfs_mutex);
3474 return (EALREADY);
3475 }
3476 hfsmp->hfs_flags |= HFS_RESIZE_IN_PROGRESS;
3477 hfsmp->hfs_resize_filesmoved = 0;
3478 hfsmp->hfs_resize_totalfiles = 0;
3479 lck_mtx_unlock(&hfsmp->hfs_mutex);
3480
3481 /*
3482 * - Journaled HFS Plus volumes only.
3483 * - No embedded volumes.
3484 */
3485 if ((hfsmp->jnl == NULL) ||
3486 (hfsmp->hfsPlusIOPosOffset != 0)) {
3487 error = EPERM;
3488 goto out;
3489 }
3490 oldsize = (u_int64_t)hfsmp->totalBlocks * (u_int64_t)hfsmp->blockSize;
3491 newblkcnt = newsize / hfsmp->blockSize;
3492 reclaimblks = hfsmp->totalBlocks - newblkcnt;
3493
3494 /* Make sure new size is valid. */
3495 if ((newsize < HFS_MIN_SIZE) ||
3496 (newsize >= oldsize) ||
3497 (newsize % hfsmp->hfs_logical_block_size) ||
3498 (newsize % hfsmp->hfs_physical_block_size)) {
3499 printf ("hfs_truncatefs: invalid size\n");
3500 error = EINVAL;
3501 goto out;
3502 }
3503 /* Make sure there's enough space to work with. */
3504 if (reclaimblks >= hfs_freeblks(hfsmp, 1)) {
3505 printf("hfs_truncatefs: insufficient space (need %u blocks; have %u blocks)\n", reclaimblks, hfs_freeblks(hfsmp, 1));
3506 error = ENOSPC;
3507 goto out;
3508 }
3509
3510 /* Start with a clean journal. */
3511 journal_flush(hfsmp->jnl);
3512
3513 if (hfs_start_transaction(hfsmp) != 0) {
3514 error = EINVAL;
3515 goto out;
3516 }
3517 transaction_begun = 1;
3518
3519 /*
3520 * Prevent new allocations from using the part we're trying to truncate.
3521 *
3522 * NOTE: allocLimit is set to the allocation block number where the new
3523 * alternate volume header will be. That way there will be no files to
3524 * interfere with allocating the new alternate volume header, and no files
3525 * in the allocation blocks beyond (i.e. the blocks we're trying to
3526 * truncate away.
3527 */
3528 lck_mtx_lock(&hfsmp->hfs_mutex);
3529 if (hfsmp->blockSize == 512)
3530 hfsmp->allocLimit = newblkcnt - 2;
3531 else
3532 hfsmp->allocLimit = newblkcnt - 1;
3533 hfsmp->freeBlocks -= reclaimblks;
3534 lck_mtx_unlock(&hfsmp->hfs_mutex);
3535
3536 /*
3537 * Look for files that have blocks at or beyond the location of the
3538 * new alternate volume header.
3539 */
3540 if (hfs_isallocated(hfsmp, hfsmp->allocLimit, reclaimblks)) {
3541 /*
3542 * hfs_reclaimspace will use separate transactions when
3543 * relocating files (so we don't overwhelm the journal).
3544 */
3545 hfs_end_transaction(hfsmp);
3546 transaction_begun = 0;
3547
3548 /* Attempt to reclaim some space. */
3549 if (hfs_reclaimspace(hfsmp, hfsmp->allocLimit, reclaimblks, context) != 0) {
3550 printf("hfs_truncatefs: couldn't reclaim space on %s\n", hfsmp->vcbVN);
3551 error = ENOSPC;
3552 goto out;
3553 }
3554 if (hfs_start_transaction(hfsmp) != 0) {
3555 error = EINVAL;
3556 goto out;
3557 }
3558 transaction_begun = 1;
3559
3560 /* Check if we're clear now. */
3561 if (hfs_isallocated(hfsmp, hfsmp->allocLimit, reclaimblks)) {
3562 printf("hfs_truncatefs: didn't reclaim enough space on %s\n", hfsmp->vcbVN);
3563 error = EAGAIN; /* tell client to try again */
3564 goto out;
3565 }
3566 }
3567
3568 /*
3569 * Note: we take the attributes lock in case we have an attribute data vnode
3570 * which needs to change size.
3571 */
3572 lockflags = hfs_systemfile_lock(hfsmp, SFL_ATTRIBUTE | SFL_EXTENTS | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
3573
3574 /*
3575 * Mark the old alternate volume header as free.
3576 * We don't bother shrinking allocation bitmap file.
3577 */
3578 if (hfsmp->blockSize == 512)
3579 (void) BlockMarkFree(hfsmp, hfsmp->totalBlocks - 2, 2);
3580 else
3581 (void) BlockMarkFree(hfsmp, hfsmp->totalBlocks - 1, 1);
3582
3583 /*
3584 * Allocate last 1KB for alternate volume header.
3585 */
3586 error = BlockMarkAllocated(hfsmp, hfsmp->allocLimit, (hfsmp->blockSize == 512) ? 2 : 1);
3587 if (error) {
3588 printf("hfs_truncatefs: Error %d allocating new alternate volume header\n", error);
3589 goto out;
3590 }
3591
3592 /*
3593 * Invalidate the existing alternate volume header.
3594 *
3595 * Don't include this in a transaction (don't call journal_modify_block)
3596 * since this block will be outside of the truncated file system!
3597 */
3598 if (hfsmp->hfs_alt_id_sector) {
3599 if (buf_meta_bread(hfsmp->hfs_devvp,
3600 HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_alt_id_sector, hfsmp->hfs_log_per_phys),
3601 hfsmp->hfs_physical_block_size, NOCRED, &bp) == 0) {
3602
3603 bzero((void*)((char *)buf_dataptr(bp) + HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size)), kMDBSize);
3604 (void) VNOP_BWRITE(bp);
3605 } else if (bp) {
3606 buf_brelse(bp);
3607 }
3608 bp = NULL;
3609 }
3610
3611 /* Log successful shrinking. */
3612 printf("hfs_truncatefs: shrank \"%s\" to %d blocks (was %d blocks)\n",
3613 hfsmp->vcbVN, newblkcnt, hfsmp->totalBlocks);
3614
3615 /*
3616 * Adjust file system variables and flush them to disk.
3617 */
3618 hfsmp->totalBlocks = newblkcnt;
3619 hfsmp->hfs_logical_block_count = newsize / hfsmp->hfs_logical_block_size;
3620 hfsmp->hfs_alt_id_sector = HFS_ALT_SECTOR(hfsmp->hfs_logical_block_size, hfsmp->hfs_logical_block_count);
3621 MarkVCBDirty(hfsmp);
3622 error = hfs_flushvolumeheader(hfsmp, MNT_WAIT, HFS_ALTFLUSH);
3623 if (error)
3624 panic("hfs_truncatefs: unexpected error flushing volume header (%d)\n", error);
3625
3626 /*
3627 * TODO: Adjust the size of the metadata zone based on new volume size?
3628 */
3629
3630 /*
3631 * Adjust the size of hfsmp->hfs_attrdata_vp
3632 */
3633 if (hfsmp->hfs_attrdata_vp) {
3634 struct cnode *cp;
3635 struct filefork *fp;
3636
3637 if (vnode_get(hfsmp->hfs_attrdata_vp) == 0) {
3638 cp = VTOC(hfsmp->hfs_attrdata_vp);
3639 fp = VTOF(hfsmp->hfs_attrdata_vp);
3640
3641 cp->c_blocks = newblkcnt;
3642 fp->ff_blocks = newblkcnt;
3643 fp->ff_extents[0].blockCount = newblkcnt;
3644 fp->ff_size = (off_t) newblkcnt * hfsmp->blockSize;
3645 ubc_setsize(hfsmp->hfs_attrdata_vp, fp->ff_size);
3646 vnode_put(hfsmp->hfs_attrdata_vp);
3647 }
3648 }
3649
3650 out:
3651 if (error)
3652 hfsmp->freeBlocks += reclaimblks;
3653
3654 lck_mtx_lock(&hfsmp->hfs_mutex);
3655 hfsmp->allocLimit = hfsmp->totalBlocks;
3656 if (hfsmp->nextAllocation >= hfsmp->allocLimit)
3657 hfsmp->nextAllocation = hfsmp->hfs_metazone_end + 1;
3658 hfsmp->hfs_flags &= ~HFS_RESIZE_IN_PROGRESS;
3659 lck_mtx_unlock(&hfsmp->hfs_mutex);
3660
3661 if (lockflags) {
3662 hfs_systemfile_unlock(hfsmp, lockflags);
3663 }
3664 if (transaction_begun) {
3665 hfs_end_transaction(hfsmp);
3666 journal_flush(hfsmp->jnl);
3667 }
3668
3669 return (error);
3670 }
3671
3672
3673 /*
3674 * Invalidate the physical block numbers associated with buffer cache blocks
3675 * in the given extent of the given vnode.
3676 */
3677 struct hfs_inval_blk_no {
3678 daddr64_t sectorStart;
3679 daddr64_t sectorCount;
3680 };
3681 static int
3682 hfs_invalidate_block_numbers_callback(buf_t bp, void *args_in)
3683 {
3684 daddr64_t blkno;
3685 struct hfs_inval_blk_no *args;
3686
3687 blkno = buf_blkno(bp);
3688 args = args_in;
3689
3690 if (blkno >= args->sectorStart && blkno < args->sectorStart+args->sectorCount)
3691 buf_setblkno(bp, buf_lblkno(bp));
3692
3693 return BUF_RETURNED;
3694 }
3695 static void
3696 hfs_invalidate_sectors(struct vnode *vp, daddr64_t sectorStart, daddr64_t sectorCount)
3697 {
3698 struct hfs_inval_blk_no args;
3699 args.sectorStart = sectorStart;
3700 args.sectorCount = sectorCount;
3701
3702 buf_iterate(vp, hfs_invalidate_block_numbers_callback, BUF_SCAN_DIRTY|BUF_SCAN_CLEAN, &args);
3703 }
3704
3705
3706 /*
3707 * Copy the contents of an extent to a new location. Also invalidates the
3708 * physical block number of any buffer cache block in the copied extent
3709 * (so that if the block is written, it will go through VNOP_BLOCKMAP to
3710 * determine the new physical block number).
3711 */
3712 static int
3713 hfs_copy_extent(
3714 struct hfsmount *hfsmp,
3715 struct vnode *vp, /* The file whose extent is being copied. */
3716 u_int32_t oldStart, /* The start of the source extent. */
3717 u_int32_t newStart, /* The start of the destination extent. */
3718 u_int32_t blockCount, /* The number of allocation blocks to copy. */
3719 vfs_context_t context)
3720 {
3721 int err = 0;
3722 size_t bufferSize;
3723 void *buffer = NULL;
3724 struct vfsioattr ioattr;
3725 buf_t bp = NULL;
3726 off_t resid;
3727 size_t ioSize;
3728 u_int32_t ioSizeSectors; /* Device sectors in this I/O */
3729 daddr64_t srcSector, destSector;
3730 u_int32_t sectorsPerBlock = hfsmp->blockSize / hfsmp->hfs_logical_block_size;
3731
3732 /*
3733 * Sanity check that we have locked the vnode of the file we're copying.
3734 *
3735 * But since hfs_systemfile_lock() doesn't actually take the lock on
3736 * the allocation file if a journal is active, ignore the check if the
3737 * file being copied is the allocation file.
3738 */
3739 struct cnode *cp = VTOC(vp);
3740 if (cp != hfsmp->hfs_allocation_cp && cp->c_lockowner != current_thread())
3741 panic("hfs_copy_extent: vp=%p (cp=%p) not owned?\n", vp, cp);
3742
3743 /*
3744 * Wait for any in-progress writes to this vnode to complete, so that we'll
3745 * be copying consistent bits. (Otherwise, it's possible that an async
3746 * write will complete to the old extent after we read from it. That
3747 * could lead to corruption.)
3748 */
3749 err = vnode_waitforwrites(vp, 0, 0, 0, "hfs_copy_extent");
3750 if (err) {
3751 printf("hfs_copy_extent: Error %d from vnode_waitforwrites\n", err);
3752 return err;
3753 }
3754
3755 /*
3756 * Determine the I/O size to use
3757 *
3758 * NOTE: Many external drives will result in an ioSize of 128KB.
3759 * TODO: Should we use a larger buffer, doing several consecutive
3760 * reads, then several consecutive writes?
3761 */
3762 vfs_ioattr(hfsmp->hfs_mp, &ioattr);
3763 bufferSize = MIN(ioattr.io_maxreadcnt, ioattr.io_maxwritecnt);
3764 if (kmem_alloc(kernel_map, (vm_offset_t*) &buffer, bufferSize))
3765 return ENOMEM;
3766
3767 /* Get a buffer for doing the I/O */
3768 bp = buf_alloc(hfsmp->hfs_devvp);
3769 buf_setdataptr(bp, (uintptr_t)buffer);
3770
3771 resid = (off_t) blockCount * (off_t) hfsmp->blockSize;
3772 srcSector = (daddr64_t) oldStart * hfsmp->blockSize / hfsmp->hfs_logical_block_size;
3773 destSector = (daddr64_t) newStart * hfsmp->blockSize / hfsmp->hfs_logical_block_size;
3774 while (resid > 0) {
3775 ioSize = MIN(bufferSize, resid);
3776 ioSizeSectors = ioSize / hfsmp->hfs_logical_block_size;
3777
3778 /* Prepare the buffer for reading */
3779 buf_reset(bp, B_READ);
3780 buf_setsize(bp, ioSize);
3781 buf_setcount(bp, ioSize);
3782 buf_setblkno(bp, srcSector);
3783 buf_setlblkno(bp, srcSector);
3784
3785 /* Do the read */
3786 err = VNOP_STRATEGY(bp);
3787 if (!err)
3788 err = buf_biowait(bp);
3789 if (err) {
3790 printf("hfs_copy_extent: Error %d from VNOP_STRATEGY (read)\n", err);
3791 break;
3792 }
3793
3794 /* Prepare the buffer for writing */
3795 buf_reset(bp, B_WRITE);
3796 buf_setsize(bp, ioSize);
3797 buf_setcount(bp, ioSize);
3798 buf_setblkno(bp, destSector);
3799 buf_setlblkno(bp, destSector);
3800 if (journal_uses_fua(hfsmp->jnl))
3801 buf_markfua(bp);
3802
3803 /* Do the write */
3804 vnode_startwrite(hfsmp->hfs_devvp);
3805 err = VNOP_STRATEGY(bp);
3806 if (!err)
3807 err = buf_biowait(bp);
3808 if (err) {
3809 printf("hfs_copy_extent: Error %d from VNOP_STRATEGY (write)\n", err);
3810 break;
3811 }
3812
3813 resid -= ioSize;
3814 srcSector += ioSizeSectors;
3815 destSector += ioSizeSectors;
3816 }
3817 if (bp)
3818 buf_free(bp);
3819 if (buffer)
3820 kmem_free(kernel_map, (vm_offset_t)buffer, bufferSize);
3821
3822 /* Make sure all writes have been flushed to disk. */
3823 if (!journal_uses_fua(hfsmp->jnl)) {
3824 err = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, context);
3825 if (err) {
3826 printf("hfs_copy_extent: DKIOCSYNCHRONIZECACHE failed (%d)\n", err);
3827 err = 0; /* Don't fail the copy. */
3828 }
3829 }
3830
3831 if (!err)
3832 hfs_invalidate_sectors(vp, (daddr64_t)oldStart*sectorsPerBlock, (daddr64_t)blockCount*sectorsPerBlock);
3833
3834 return err;
3835 }
3836
3837
3838 /*
3839 * Reclaim space at the end of a volume, used by a given system file.
3840 *
3841 * This routine attempts to move any extent which contains allocation blocks
3842 * at or after "startblk." A separate transaction is used to do the move.
3843 * The contents of any moved extents are read and written via the volume's
3844 * device vnode -- NOT via "vp." During the move, moved blocks which are part
3845 * of a transaction have their physical block numbers invalidated so they will
3846 * eventually be written to their new locations.
3847 *
3848 * This routine can be used to move overflow extents for the allocation file.
3849 *
3850 * Inputs:
3851 * hfsmp The volume being resized.
3852 * startblk Blocks >= this allocation block need to be moved.
3853 * locks Which locks need to be taken for the given system file.
3854 * vp The vnode for the system file.
3855 *
3856 * Outputs:
3857 * moved Set to true if any extents were moved.
3858 */
3859 static int
3860 hfs_relocate_callback(__unused HFSPlusExtentKey *key, HFSPlusExtentRecord *record, HFSPlusExtentRecord *state)
3861 {
3862 bcopy(state, record, sizeof(HFSPlusExtentRecord));
3863 return 0;
3864 }
3865 static int
3866 hfs_reclaim_sys_file(struct hfsmount *hfsmp, struct vnode *vp, u_long startblk, int locks, Boolean *moved, vfs_context_t context)
3867 {
3868 int error;
3869 int lockflags;
3870 int i;
3871 u_long datablks;
3872 u_long block;
3873 u_int32_t oldStartBlock;
3874 u_int32_t newStartBlock;
3875 u_int32_t blockCount;
3876 struct filefork *fp;
3877
3878 /* If there is no vnode for this file, then there's nothing to do. */
3879 if (vp == NULL)
3880 return 0;
3881
3882 /* printf("hfs_reclaim_sys_file: %.*s\n", VTOC(vp)->c_desc.cd_namelen, VTOC(vp)->c_desc.cd_nameptr); */
3883
3884 /* We always need the allocation bitmap and extents B-tree */
3885 locks |= SFL_BITMAP | SFL_EXTENTS;
3886
3887 error = hfs_start_transaction(hfsmp);
3888 if (error) {
3889 printf("hfs_reclaim_sys_file: hfs_start_transaction returned %d\n", error);
3890 return error;
3891 }
3892 lockflags = hfs_systemfile_lock(hfsmp, locks, HFS_EXCLUSIVE_LOCK);
3893 fp = VTOF(vp);
3894 datablks = 0;
3895
3896 /* Relocate non-overflow extents */
3897 for (i = 0; i < kHFSPlusExtentDensity; ++i) {
3898 if (fp->ff_extents[i].blockCount == 0)
3899 break;
3900 oldStartBlock = fp->ff_extents[i].startBlock;
3901 blockCount = fp->ff_extents[i].blockCount;
3902 datablks += blockCount;
3903 block = oldStartBlock + blockCount;
3904 if (block > startblk) {
3905 error = BlockAllocate(hfsmp, 1, blockCount, blockCount, true, true, &newStartBlock, &blockCount);
3906 if (error) {
3907 printf("hfs_reclaim_sys_file: BlockAllocate returned %d\n", error);
3908 goto fail;
3909 }
3910 if (blockCount != fp->ff_extents[i].blockCount) {
3911 printf("hfs_reclaim_sys_file: new blockCount=%u, original blockCount=%u", blockCount, fp->ff_extents[i].blockCount);
3912 goto free_fail;
3913 }
3914 error = hfs_copy_extent(hfsmp, vp, oldStartBlock, newStartBlock, blockCount, context);
3915 if (error) {
3916 printf("hfs_reclaim_sys_file: hfs_copy_extent returned %d\n", error);
3917 goto free_fail;
3918 }
3919 fp->ff_extents[i].startBlock = newStartBlock;
3920 VTOC(vp)->c_flag |= C_MODIFIED;
3921 *moved = true;
3922 error = BlockDeallocate(hfsmp, oldStartBlock, blockCount);
3923 if (error) {
3924 /* TODO: Mark volume inconsistent? */
3925 printf("hfs_reclaim_sys_file: BlockDeallocate returned %d\n", error);
3926 goto fail;
3927 }
3928 error = hfs_flushvolumeheader(hfsmp, MNT_WAIT, HFS_ALTFLUSH);
3929 if (error) {
3930 /* TODO: Mark volume inconsistent? */
3931 printf("hfs_reclaim_sys_file: hfs_flushvolumeheader returned %d\n", error);
3932 goto fail;
3933 }
3934 }
3935 }
3936
3937 /* Relocate overflow extents (if any) */
3938 if (i == kHFSPlusExtentDensity && fp->ff_blocks > datablks) {
3939 struct BTreeIterator *iterator = NULL;
3940 struct FSBufferDescriptor btdata;
3941 HFSPlusExtentRecord record;
3942 HFSPlusExtentKey *key;
3943 FCB *fcb;
3944 u_int32_t fileID;
3945 u_int8_t forktype;
3946
3947 forktype = VNODE_IS_RSRC(vp) ? 0xFF : 0;
3948 fileID = VTOC(vp)->c_cnid;
3949 if (kmem_alloc(kernel_map, (vm_offset_t*) &iterator, sizeof(*iterator))) {
3950 printf("hfs_reclaim_sys_file: kmem_alloc failed!\n");
3951 error = ENOMEM;
3952 goto fail;
3953 }
3954
3955 bzero(iterator, sizeof(*iterator));
3956 key = (HFSPlusExtentKey *) &iterator->key;
3957 key->keyLength = kHFSPlusExtentKeyMaximumLength;
3958 key->forkType = forktype;
3959 key->fileID = fileID;
3960 key->startBlock = datablks;
3961
3962 btdata.bufferAddress = &record;
3963 btdata.itemSize = sizeof(record);
3964 btdata.itemCount = 1;
3965
3966 fcb = VTOF(hfsmp->hfs_extents_vp);
3967
3968 error = BTSearchRecord(fcb, iterator, &btdata, NULL, iterator);
3969 while (error == 0) {
3970 /* Stop when we encounter a different file or fork. */
3971 if ((key->fileID != fileID) ||
3972 (key->forkType != forktype)) {
3973 break;
3974 }
3975 /*
3976 * Check if the file overlaps target space.
3977 */
3978 for (i = 0; i < kHFSPlusExtentDensity; ++i) {
3979 if (record[i].blockCount == 0) {
3980 goto overflow_done;
3981 }
3982 oldStartBlock = record[i].startBlock;
3983 blockCount = record[i].blockCount;
3984 block = oldStartBlock + blockCount;
3985 if (block > startblk) {
3986 error = BlockAllocate(hfsmp, 1, blockCount, blockCount, true, true, &newStartBlock, &blockCount);
3987 if (error) {
3988 printf("hfs_reclaim_sys_file: BlockAllocate returned %d\n", error);
3989 goto overflow_done;
3990 }
3991 if (blockCount != record[i].blockCount) {
3992 printf("hfs_reclaim_sys_file: new blockCount=%u, original blockCount=%u", blockCount, fp->ff_extents[i].blockCount);
3993 kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator));
3994 goto free_fail;
3995 }
3996 error = hfs_copy_extent(hfsmp, vp, oldStartBlock, newStartBlock, blockCount, context);
3997 if (error) {
3998 printf("hfs_reclaim_sys_file: hfs_copy_extent returned %d\n", error);
3999 kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator));
4000 goto free_fail;
4001 }
4002 record[i].startBlock = newStartBlock;
4003 VTOC(vp)->c_flag |= C_MODIFIED;
4004 *moved = true;
4005 /*
4006 * NOTE: To support relocating overflow extents of the
4007 * allocation file, we must update the BTree record BEFORE
4008 * deallocating the old extent so that BlockDeallocate will
4009 * use the extent's new location to calculate physical block
4010 * numbers. (This is for the case where the old extent's
4011 * bitmap bits actually reside in the extent being moved.)
4012 */
4013 error = BTUpdateRecord(fcb, iterator, (IterateCallBackProcPtr) hfs_relocate_callback, &record);
4014 if (error) {
4015 /* TODO: Mark volume inconsistent? */
4016 printf("hfs_reclaim_sys_file: BTUpdateRecord returned %d\n", error);
4017 goto overflow_done;
4018 }
4019 error = BlockDeallocate(hfsmp, oldStartBlock, blockCount);
4020 if (error) {
4021 /* TODO: Mark volume inconsistent? */
4022 printf("hfs_reclaim_sys_file: BlockDeallocate returned %d\n", error);
4023 goto overflow_done;
4024 }
4025 }
4026 }
4027 /* Look for more records. */
4028 error = BTIterateRecord(fcb, kBTreeNextRecord, iterator, &btdata, NULL);
4029 if (error == btNotFound) {
4030 error = 0;
4031 break;
4032 }
4033 }
4034 overflow_done:
4035 kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator));
4036 if (error) {
4037 goto fail;
4038 }
4039 }
4040
4041 hfs_systemfile_unlock(hfsmp, lockflags);
4042 error = hfs_end_transaction(hfsmp);
4043 if (error) {
4044 printf("hfs_reclaim_sys_file: hfs_end_transaction returned %d\n", error);
4045 }
4046
4047 return error;
4048
4049 free_fail:
4050 (void) BlockDeallocate(hfsmp, newStartBlock, blockCount);
4051 fail:
4052 (void) hfs_systemfile_unlock(hfsmp, lockflags);
4053 (void) hfs_end_transaction(hfsmp);
4054 return error;
4055 }
4056
4057
4058 /*
4059 * This journal_relocate callback updates the journal info block to point
4060 * at the new journal location. This write must NOT be done using the
4061 * transaction. We must write the block immediately. We must also force
4062 * it to get to the media so that the new journal location will be seen by
4063 * the replay code before we can safely let journaled blocks be written
4064 * to their normal locations.
4065 *
4066 * The tests for journal_uses_fua below are mildly hacky. Since the journal
4067 * and the file system are both on the same device, I'm leveraging what
4068 * the journal has decided about FUA.
4069 */
4070 struct hfs_journal_relocate_args {
4071 struct hfsmount *hfsmp;
4072 vfs_context_t context;
4073 u_int32_t newStartBlock;
4074 };
4075
4076 static errno_t
4077 hfs_journal_relocate_callback(void *_args)
4078 {
4079 int error;
4080 struct hfs_journal_relocate_args *args = _args;
4081 struct hfsmount *hfsmp = args->hfsmp;
4082 buf_t bp;
4083 JournalInfoBlock *jibp;
4084
4085 error = buf_meta_bread(hfsmp->hfs_devvp,
4086 hfsmp->vcbJinfoBlock * (hfsmp->blockSize/hfsmp->hfs_logical_block_size),
4087 hfsmp->blockSize, vfs_context_ucred(args->context), &bp);
4088 if (error) {
4089 printf("hfs_reclaim_journal_file: failed to read JIB (%d)\n", error);
4090 return error;
4091 }
4092 jibp = (JournalInfoBlock*) buf_dataptr(bp);
4093 jibp->offset = SWAP_BE64((u_int64_t)args->newStartBlock * hfsmp->blockSize);
4094 jibp->size = SWAP_BE64(hfsmp->jnl_size);
4095 if (journal_uses_fua(hfsmp->jnl))
4096 buf_markfua(bp);
4097 error = buf_bwrite(bp);
4098 if (error) {
4099 printf("hfs_reclaim_journal_file: failed to write JIB (%d)\n", error);
4100 return error;
4101 }
4102 if (!journal_uses_fua(hfsmp->jnl)) {
4103 error = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, args->context);
4104 if (error) {
4105 printf("hfs_reclaim_journal_file: DKIOCSYNCHRONIZECACHE failed (%d)\n", error);
4106 error = 0; /* Don't fail the operation. */
4107 }
4108 }
4109
4110 return error;
4111 }
4112
4113
4114 static int
4115 hfs_reclaim_journal_file(struct hfsmount *hfsmp, vfs_context_t context)
4116 {
4117 int error;
4118 int lockflags;
4119 u_int32_t newStartBlock;
4120 u_int32_t oldBlockCount;
4121 u_int32_t newBlockCount;
4122 struct cat_desc journal_desc;
4123 struct cat_attr journal_attr;
4124 struct cat_fork journal_fork;
4125 struct hfs_journal_relocate_args callback_args;
4126
4127 error = hfs_start_transaction(hfsmp);
4128 if (error) {
4129 printf("hfs_reclaim_journal_file: hfs_start_transaction returned %d\n", error);
4130 return error;
4131 }
4132 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
4133
4134 oldBlockCount = hfsmp->jnl_size / hfsmp->blockSize;
4135
4136 /* TODO: Allow the journal to change size based on the new volume size. */
4137 error = BlockAllocate(hfsmp, 1, oldBlockCount, oldBlockCount, true, true, &newStartBlock, &newBlockCount);
4138 if (error) {
4139 printf("hfs_reclaim_journal_file: BlockAllocate returned %d\n", error);
4140 goto fail;
4141 }
4142 if (newBlockCount != oldBlockCount) {
4143 printf("hfs_reclaim_journal_file: newBlockCount != oldBlockCount (%u, %u)\n", newBlockCount, oldBlockCount);
4144 goto free_fail;
4145 }
4146
4147 error = BlockDeallocate(hfsmp, hfsmp->jnl_start, oldBlockCount);
4148 if (error) {
4149 printf("hfs_reclaim_journal_file: BlockDeallocate returned %d\n", error);
4150 goto free_fail;
4151 }
4152
4153 /* Update the catalog record for .journal */
4154 error = cat_idlookup(hfsmp, hfsmp->hfs_jnlfileid, 1, &journal_desc, &journal_attr, &journal_fork);
4155 if (error) {
4156 printf("hfs_reclaim_journal_file: cat_idlookup returned %d\n", error);
4157 goto free_fail;
4158 }
4159 journal_fork.cf_size = newBlockCount * hfsmp->blockSize;
4160 journal_fork.cf_extents[0].startBlock = newStartBlock;
4161 journal_fork.cf_extents[0].blockCount = newBlockCount;
4162 journal_fork.cf_blocks = newBlockCount;
4163 error = cat_update(hfsmp, &journal_desc, &journal_attr, &journal_fork, NULL);
4164 cat_releasedesc(&journal_desc); /* all done with cat descriptor */
4165 if (error) {
4166 printf("hfs_reclaim_journal_file: cat_update returned %d\n", error);
4167 goto free_fail;
4168 }
4169 callback_args.hfsmp = hfsmp;
4170 callback_args.context = context;
4171 callback_args.newStartBlock = newStartBlock;
4172
4173 error = journal_relocate(hfsmp->jnl, (off_t)newStartBlock*hfsmp->blockSize,
4174 (off_t)newBlockCount*hfsmp->blockSize, 0,
4175 hfs_journal_relocate_callback, &callback_args);
4176 if (error) {
4177 /* NOTE: journal_relocate will mark the journal invalid. */
4178 printf("hfs_reclaim_journal_file: journal_relocate returned %d\n", error);
4179 goto fail;
4180 }
4181 hfsmp->jnl_start = newStartBlock;
4182 hfsmp->jnl_size = (off_t)newBlockCount * hfsmp->blockSize;
4183
4184 hfs_systemfile_unlock(hfsmp, lockflags);
4185 error = hfs_end_transaction(hfsmp);
4186 if (error) {
4187 printf("hfs_reclaim_journal_file: hfs_end_transaction returned %d\n", error);
4188 }
4189
4190 return error;
4191
4192 free_fail:
4193 (void) BlockDeallocate(hfsmp, newStartBlock, newBlockCount);
4194 fail:
4195 hfs_systemfile_unlock(hfsmp, lockflags);
4196 (void) hfs_end_transaction(hfsmp);
4197 return error;
4198 }
4199
4200
4201 /*
4202 * Move the journal info block to a new location. We have to make sure the
4203 * new copy of the journal info block gets to the media first, then change
4204 * the field in the volume header and the catalog record.
4205 */
4206 static int
4207 hfs_reclaim_journal_info_block(struct hfsmount *hfsmp, vfs_context_t context)
4208 {
4209 int error;
4210 int lockflags;
4211 u_int32_t newBlock;
4212 u_int32_t blockCount;
4213 struct cat_desc jib_desc;
4214 struct cat_attr jib_attr;
4215 struct cat_fork jib_fork;
4216 buf_t old_bp, new_bp;
4217
4218 error = hfs_start_transaction(hfsmp);
4219 if (error) {
4220 printf("hfs_reclaim_journal_info_block: hfs_start_transaction returned %d\n", error);
4221 return error;
4222 }
4223 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
4224
4225 error = BlockAllocate(hfsmp, 1, 1, 1, true, true, &newBlock, &blockCount);
4226 if (error) {
4227 printf("hfs_reclaim_journal_info_block: BlockAllocate returned %d\n", error);
4228 goto fail;
4229 }
4230 if (blockCount != 1) {
4231 printf("hfs_reclaim_journal_info_block: blockCount != 1 (%u)\n", blockCount);
4232 goto free_fail;
4233 }
4234 error = BlockDeallocate(hfsmp, hfsmp->vcbJinfoBlock, 1);
4235 if (error) {
4236 printf("hfs_reclaim_journal_info_block: BlockDeallocate returned %d\n", error);
4237 goto free_fail;
4238 }
4239
4240 /* Copy the old journal info block content to the new location */
4241 error = buf_meta_bread(hfsmp->hfs_devvp,
4242 hfsmp->vcbJinfoBlock * (hfsmp->blockSize/hfsmp->hfs_logical_block_size),
4243 hfsmp->blockSize, vfs_context_ucred(context), &old_bp);
4244 if (error) {
4245 printf("hfs_reclaim_journal_info_block: failed to read JIB (%d)\n", error);
4246 goto free_fail;
4247 }
4248 new_bp = buf_getblk(hfsmp->hfs_devvp,
4249 newBlock * (hfsmp->blockSize/hfsmp->hfs_logical_block_size),
4250 hfsmp->blockSize, 0, 0, BLK_META);
4251 bcopy((char*)buf_dataptr(old_bp), (char*)buf_dataptr(new_bp), hfsmp->blockSize);
4252 buf_brelse(old_bp);
4253 if (journal_uses_fua(hfsmp->jnl))
4254 buf_markfua(new_bp);
4255 error = buf_bwrite(new_bp);
4256 if (error) {
4257 printf("hfs_reclaim_journal_info_block: failed to write new JIB (%d)\n", error);
4258 goto free_fail;
4259 }
4260 if (!journal_uses_fua(hfsmp->jnl)) {
4261 error = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, context);
4262 if (error) {
4263 printf("hfs_reclaim_journal_info_block: DKIOCSYNCHRONIZECACHE failed (%d)\n", error);
4264 /* Don't fail the operation. */
4265 }
4266 }
4267
4268 /* Update the catalog record for .journal_info_block */
4269 error = cat_idlookup(hfsmp, hfsmp->hfs_jnlinfoblkid, 1, &jib_desc, &jib_attr, &jib_fork);
4270 if (error) {
4271 printf("hfs_reclaim_journal_file: cat_idlookup returned %d\n", error);
4272 goto fail;
4273 }
4274 jib_fork.cf_size = hfsmp->blockSize;
4275 jib_fork.cf_extents[0].startBlock = newBlock;
4276 jib_fork.cf_extents[0].blockCount = 1;
4277 jib_fork.cf_blocks = 1;
4278 error = cat_update(hfsmp, &jib_desc, &jib_attr, &jib_fork, NULL);
4279 cat_releasedesc(&jib_desc); /* all done with cat descriptor */
4280 if (error) {
4281 printf("hfs_reclaim_journal_info_block: cat_update returned %d\n", error);
4282 goto fail;
4283 }
4284
4285 /* Update the pointer to the journal info block in the volume header. */
4286 hfsmp->vcbJinfoBlock = newBlock;
4287 error = hfs_flushvolumeheader(hfsmp, MNT_WAIT, HFS_ALTFLUSH);
4288 if (error) {
4289 printf("hfs_reclaim_journal_info_block: hfs_flushvolumeheader returned %d\n", error);
4290 goto fail;
4291 }
4292 hfs_systemfile_unlock(hfsmp, lockflags);
4293 error = hfs_end_transaction(hfsmp);
4294 if (error) {
4295 printf("hfs_reclaim_journal_info_block: hfs_end_transaction returned %d\n", error);
4296 }
4297 error = journal_flush(hfsmp->jnl);
4298 if (error) {
4299 printf("hfs_reclaim_journal_info_block: journal_flush returned %d\n", error);
4300 }
4301 return error;
4302
4303 free_fail:
4304 (void) BlockDeallocate(hfsmp, newBlock, blockCount);
4305 fail:
4306 hfs_systemfile_unlock(hfsmp, lockflags);
4307 (void) hfs_end_transaction(hfsmp);
4308 return error;
4309 }
4310
4311
4312 /*
4313 * Reclaim space at the end of a file system.
4314 */
4315 static int
4316 hfs_reclaimspace(struct hfsmount *hfsmp, u_long startblk, u_long reclaimblks, vfs_context_t context)
4317 {
4318 struct vnode *vp = NULL;
4319 FCB *fcb;
4320 struct BTreeIterator * iterator = NULL;
4321 struct FSBufferDescriptor btdata;
4322 struct HFSPlusCatalogFile filerec;
4323 u_int32_t saved_next_allocation;
4324 cnid_t * cnidbufp;
4325 size_t cnidbufsize;
4326 int filecnt = 0;
4327 int maxfilecnt;
4328 u_long block;
4329 u_long datablks;
4330 u_long rsrcblks;
4331 u_long blkstomove = 0;
4332 int lockflags;
4333 int i;
4334 int error;
4335 int lastprogress = 0;
4336 Boolean system_file_moved = false;
4337
4338 /* Relocate extents of the Allocation file if they're in the way. */
4339 error = hfs_reclaim_sys_file(hfsmp, hfsmp->hfs_allocation_vp, startblk, SFL_BITMAP, &system_file_moved, context);
4340 if (error) {
4341 printf("hfs_reclaimspace: reclaim allocation file returned %d\n", error);
4342 return error;
4343 }
4344 /* Relocate extents of the Extents B-tree if they're in the way. */
4345 error = hfs_reclaim_sys_file(hfsmp, hfsmp->hfs_extents_vp, startblk, SFL_EXTENTS, &system_file_moved, context);
4346 if (error) {
4347 printf("hfs_reclaimspace: reclaim extents b-tree returned %d\n", error);
4348 return error;
4349 }
4350 /* Relocate extents of the Catalog B-tree if they're in the way. */
4351 error = hfs_reclaim_sys_file(hfsmp, hfsmp->hfs_catalog_vp, startblk, SFL_CATALOG, &system_file_moved, context);
4352 if (error) {
4353 printf("hfs_reclaimspace: reclaim catalog b-tree returned %d\n", error);
4354 return error;
4355 }
4356 /* Relocate extents of the Attributes B-tree if they're in the way. */
4357 error = hfs_reclaim_sys_file(hfsmp, hfsmp->hfs_attribute_vp, startblk, SFL_ATTRIBUTE, &system_file_moved, context);
4358 if (error) {
4359 printf("hfs_reclaimspace: reclaim attribute b-tree returned %d\n", error);
4360 return error;
4361 }
4362 /* Relocate extents of the Startup File if there is one and they're in the way. */
4363 error = hfs_reclaim_sys_file(hfsmp, hfsmp->hfs_startup_vp, startblk, SFL_STARTUP, &system_file_moved, context);
4364 if (error) {
4365 printf("hfs_reclaimspace: reclaim startup file returned %d\n", error);
4366 return error;
4367 }
4368
4369 /*
4370 * We need to make sure the alternate volume header gets flushed if we moved
4371 * any extents in the volume header. But we need to do that before
4372 * shrinking the size of the volume, or else the journal code will panic
4373 * with an invalid (too large) block number.
4374 *
4375 * Note that system_file_moved will be set if ANY extent was moved, even
4376 * if it was just an overflow extent. In this case, the journal_flush isn't
4377 * strictly required, but shouldn't hurt.
4378 */
4379 if (system_file_moved)
4380 journal_flush(hfsmp->jnl);
4381
4382 if (hfsmp->jnl_start + (hfsmp->jnl_size / hfsmp->blockSize) > startblk) {
4383 error = hfs_reclaim_journal_file(hfsmp, context);
4384 if (error) {
4385 printf("hfs_reclaimspace: hfs_reclaim_journal_file failed (%d)\n", error);
4386 return error;
4387 }
4388 }
4389
4390 if (hfsmp->vcbJinfoBlock >= startblk) {
4391 error = hfs_reclaim_journal_info_block(hfsmp, context);
4392 if (error) {
4393 printf("hfs_reclaimspace: hfs_reclaim_journal_info_block failed (%d)\n", error);
4394 return error;
4395 }
4396 }
4397
4398 /* For now move a maximum of 250,000 files. */
4399 maxfilecnt = MIN(hfsmp->hfs_filecount, 250000);
4400 maxfilecnt = MIN((u_long)maxfilecnt, reclaimblks);
4401 cnidbufsize = maxfilecnt * sizeof(cnid_t);
4402 if (kmem_alloc(kernel_map, (vm_offset_t *)&cnidbufp, cnidbufsize)) {
4403 return (ENOMEM);
4404 }
4405 if (kmem_alloc(kernel_map, (vm_offset_t *)&iterator, sizeof(*iterator))) {
4406 kmem_free(kernel_map, (vm_offset_t)cnidbufp, cnidbufsize);
4407 return (ENOMEM);
4408 }
4409
4410 saved_next_allocation = hfsmp->nextAllocation;
4411 HFS_UPDATE_NEXT_ALLOCATION(hfsmp, hfsmp->hfs_metazone_start);
4412
4413 fcb = VTOF(hfsmp->hfs_catalog_vp);
4414 bzero(iterator, sizeof(*iterator));
4415
4416 btdata.bufferAddress = &filerec;
4417 btdata.itemSize = sizeof(filerec);
4418 btdata.itemCount = 1;
4419
4420 /* Keep the Catalog and extents files locked during iteration. */
4421 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS, HFS_SHARED_LOCK);
4422
4423 error = BTIterateRecord(fcb, kBTreeFirstRecord, iterator, NULL, NULL);
4424 if (error) {
4425 goto end_iteration;
4426 }
4427 /*
4428 * Iterate over all the catalog records looking for files
4429 * that overlap into the space we're trying to free up.
4430 */
4431 for (filecnt = 0; filecnt < maxfilecnt; ) {
4432 error = BTIterateRecord(fcb, kBTreeNextRecord, iterator, &btdata, NULL);
4433 if (error) {
4434 if (error == fsBTRecordNotFoundErr || error == fsBTEndOfIterationErr) {
4435 error = 0;
4436 }
4437 break;
4438 }
4439 if (filerec.recordType != kHFSPlusFileRecord) {
4440 continue;
4441 }
4442 datablks = rsrcblks = 0;
4443 /*
4444 * Check if either fork overlaps target space.
4445 */
4446 for (i = 0; i < kHFSPlusExtentDensity; ++i) {
4447 if (filerec.dataFork.extents[i].blockCount != 0) {
4448 datablks += filerec.dataFork.extents[i].blockCount;
4449 block = filerec.dataFork.extents[i].startBlock +
4450 filerec.dataFork.extents[i].blockCount;
4451 if (block >= startblk) {
4452 if ((filerec.fileID == hfsmp->hfs_jnlfileid) ||
4453 (filerec.fileID == hfsmp->hfs_jnlinfoblkid)) {
4454 printf("hfs_reclaimspace: cannot move active journal\n");
4455 error = EPERM;
4456 goto end_iteration;
4457 }
4458 cnidbufp[filecnt++] = filerec.fileID;
4459 blkstomove += filerec.dataFork.totalBlocks;
4460 break;
4461 }
4462 }
4463 if (filerec.resourceFork.extents[i].blockCount != 0) {
4464 rsrcblks += filerec.resourceFork.extents[i].blockCount;
4465 block = filerec.resourceFork.extents[i].startBlock +
4466 filerec.resourceFork.extents[i].blockCount;
4467 if (block >= startblk) {
4468 cnidbufp[filecnt++] = filerec.fileID;
4469 blkstomove += filerec.resourceFork.totalBlocks;
4470 break;
4471 }
4472 }
4473 }
4474 /*
4475 * Check for any overflow extents that overlap.
4476 */
4477 if (i == kHFSPlusExtentDensity) {
4478 if (filerec.dataFork.totalBlocks > datablks) {
4479 if (hfs_overlapped_overflow_extents(hfsmp, startblk, datablks, filerec.fileID, 0)) {
4480 cnidbufp[filecnt++] = filerec.fileID;
4481 blkstomove += filerec.dataFork.totalBlocks;
4482 }
4483 } else if (filerec.resourceFork.totalBlocks > rsrcblks) {
4484 if (hfs_overlapped_overflow_extents(hfsmp, startblk, rsrcblks, filerec.fileID, 1)) {
4485 cnidbufp[filecnt++] = filerec.fileID;
4486 blkstomove += filerec.resourceFork.totalBlocks;
4487 }
4488 }
4489 }
4490 }
4491
4492 end_iteration:
4493 if (filecnt == 0 && !system_file_moved) {
4494 printf("hfs_reclaimspace: no files moved\n");
4495 error = ENOSPC;
4496 }
4497 /* All done with catalog. */
4498 hfs_systemfile_unlock(hfsmp, lockflags);
4499 if (error || filecnt == 0)
4500 goto out;
4501
4502 /*
4503 * Double check space requirements to make sure
4504 * there is enough space to relocate any files
4505 * that reside in the reclaim area.
4506 *
4507 * Blocks To Move --------------
4508 * | | |
4509 * V V V
4510 * ------------------------------------------------------------------------
4511 * | | / /// // |
4512 * | | / /// // |
4513 * | | / /// // |
4514 * ------------------------------------------------------------------------
4515 *
4516 * <------------------- New Total Blocks ------------------><-- Reclaim -->
4517 *
4518 * <------------------------ Original Total Blocks ----------------------->
4519 *
4520 */
4521 if (blkstomove >= hfs_freeblks(hfsmp, 1)) {
4522 printf("hfs_truncatefs: insufficient space (need %lu blocks; have %u blocks)\n", blkstomove, hfs_freeblks(hfsmp, 1));
4523 error = ENOSPC;
4524 goto out;
4525 }
4526 hfsmp->hfs_resize_filesmoved = 0;
4527 hfsmp->hfs_resize_totalfiles = filecnt;
4528
4529 /* Now move any files that are in the way. */
4530 for (i = 0; i < filecnt; ++i) {
4531 struct vnode * rvp;
4532 struct cnode * cp;
4533
4534 if (hfs_vget(hfsmp, cnidbufp[i], &vp, 0) != 0)
4535 continue;
4536
4537 /* Relocating directory hard links is not supported, so we
4538 * punt (see radar 6217026). */
4539 cp = VTOC(vp);
4540 if ((cp->c_flag & C_HARDLINK) && vnode_isdir(vp)) {
4541 printf("hfs_reclaimspace: unable to relocate directory hard link %d\n", cp->c_cnid);
4542 error = EINVAL;
4543 goto out;
4544 }
4545
4546 /* Relocate any data fork blocks. */
4547 if (VTOF(vp) && VTOF(vp)->ff_blocks > 0) {
4548 error = hfs_relocate(vp, hfsmp->hfs_metazone_end + 1, kauth_cred_get(), current_proc());
4549 }
4550 if (error)
4551 break;
4552
4553 /* Relocate any resource fork blocks. */
4554 if ((cp->c_blocks - (VTOF(vp) ? VTOF((vp))->ff_blocks : 0)) > 0) {
4555 error = hfs_vgetrsrc(hfsmp, vp, &rvp, TRUE);
4556 if (error)
4557 break;
4558 error = hfs_relocate(rvp, hfsmp->hfs_metazone_end + 1, kauth_cred_get(), current_proc());
4559 VTOC(rvp)->c_flag |= C_NEED_RVNODE_PUT;
4560 if (error)
4561 break;
4562 }
4563 hfs_unlock(cp);
4564 vnode_put(vp);
4565 vp = NULL;
4566
4567 ++hfsmp->hfs_resize_filesmoved;
4568
4569 /* Report intermediate progress. */
4570 if (filecnt > 100) {
4571 int progress;
4572
4573 progress = (i * 100) / filecnt;
4574 if (progress > (lastprogress + 9)) {
4575 printf("hfs_reclaimspace: %d%% done...\n", progress);
4576 lastprogress = progress;
4577 }
4578 }
4579 }
4580 if (vp) {
4581 hfs_unlock(VTOC(vp));
4582 vnode_put(vp);
4583 vp = NULL;
4584 }
4585 if (hfsmp->hfs_resize_filesmoved != 0) {
4586 printf("hfs_reclaimspace: relocated %d files on \"%s\"\n",
4587 (int)hfsmp->hfs_resize_filesmoved, hfsmp->vcbVN);
4588 }
4589 out:
4590 kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator));
4591 kmem_free(kernel_map, (vm_offset_t)cnidbufp, cnidbufsize);
4592
4593 /*
4594 * Restore the roving allocation pointer on errors.
4595 * (but only if we didn't move any files)
4596 */
4597 if (error && hfsmp->hfs_resize_filesmoved == 0) {
4598 HFS_UPDATE_NEXT_ALLOCATION(hfsmp, saved_next_allocation);
4599 }
4600 return (error);
4601 }
4602
4603
4604 /*
4605 * Check if there are any overflow extents that overlap.
4606 */
4607 static int
4608 hfs_overlapped_overflow_extents(struct hfsmount *hfsmp, u_int32_t startblk, u_int32_t catblks, u_int32_t fileID, int rsrcfork)
4609 {
4610 struct BTreeIterator * iterator = NULL;
4611 struct FSBufferDescriptor btdata;
4612 HFSPlusExtentRecord extrec;
4613 HFSPlusExtentKey *extkeyptr;
4614 FCB *fcb;
4615 u_int32_t block;
4616 u_int8_t forktype;
4617 int overlapped = 0;
4618 int i;
4619 int error;
4620
4621 forktype = rsrcfork ? 0xFF : 0;
4622 if (kmem_alloc(kernel_map, (vm_offset_t *)&iterator, sizeof(*iterator))) {
4623 return (0);
4624 }
4625 bzero(iterator, sizeof(*iterator));
4626 extkeyptr = (HFSPlusExtentKey *)&iterator->key;
4627 extkeyptr->keyLength = kHFSPlusExtentKeyMaximumLength;
4628 extkeyptr->forkType = forktype;
4629 extkeyptr->fileID = fileID;
4630 extkeyptr->startBlock = catblks;
4631
4632 btdata.bufferAddress = &extrec;
4633 btdata.itemSize = sizeof(extrec);
4634 btdata.itemCount = 1;
4635
4636 fcb = VTOF(hfsmp->hfs_extents_vp);
4637
4638 error = BTSearchRecord(fcb, iterator, &btdata, NULL, iterator);
4639 while (error == 0) {
4640 /* Stop when we encounter a different file. */
4641 if ((extkeyptr->fileID != fileID) ||
4642 (extkeyptr->forkType != forktype)) {
4643 break;
4644 }
4645 /*
4646 * Check if the file overlaps target space.
4647 */
4648 for (i = 0; i < kHFSPlusExtentDensity; ++i) {
4649 if (extrec[i].blockCount == 0) {
4650 break;
4651 }
4652 block = extrec[i].startBlock + extrec[i].blockCount;
4653 if (block >= startblk) {
4654 overlapped = 1;
4655 break;
4656 }
4657 }
4658 /* Look for more records. */
4659 error = BTIterateRecord(fcb, kBTreeNextRecord, iterator, &btdata, NULL);
4660 }
4661
4662 kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator));
4663 return (overlapped);
4664 }
4665
4666
4667 /*
4668 * Calculate the progress of a file system resize operation.
4669 */
4670 __private_extern__
4671 int
4672 hfs_resize_progress(struct hfsmount *hfsmp, u_int32_t *progress)
4673 {
4674 if ((hfsmp->hfs_flags & HFS_RESIZE_IN_PROGRESS) == 0) {
4675 return (ENXIO);
4676 }
4677
4678 if (hfsmp->hfs_resize_totalfiles > 0)
4679 *progress = (hfsmp->hfs_resize_filesmoved * 100) / hfsmp->hfs_resize_totalfiles;
4680 else
4681 *progress = 0;
4682
4683 return (0);
4684 }
4685
4686
4687 /*
4688 * Get file system attributes.
4689 */
4690 static int
4691 hfs_vfs_getattr(struct mount *mp, struct vfs_attr *fsap, __unused vfs_context_t context)
4692 {
4693 #define HFS_ATTR_CMN_VALIDMASK (ATTR_CMN_VALIDMASK & ~(ATTR_CMN_NAMEDATTRCOUNT | ATTR_CMN_NAMEDATTRLIST))
4694 #define HFS_ATTR_FILE_VALIDMASK (ATTR_FILE_VALIDMASK & ~(ATTR_FILE_FILETYPE | ATTR_FILE_FORKCOUNT | ATTR_FILE_FORKLIST))
4695
4696 ExtendedVCB *vcb = VFSTOVCB(mp);
4697 struct hfsmount *hfsmp = VFSTOHFS(mp);
4698 u_long freeCNIDs;
4699
4700 freeCNIDs = (u_long)0xFFFFFFFF - (u_long)hfsmp->vcbNxtCNID;
4701
4702 VFSATTR_RETURN(fsap, f_objcount, (u_int64_t)hfsmp->vcbFilCnt + (u_int64_t)hfsmp->vcbDirCnt);
4703 VFSATTR_RETURN(fsap, f_filecount, (u_int64_t)hfsmp->vcbFilCnt);
4704 VFSATTR_RETURN(fsap, f_dircount, (u_int64_t)hfsmp->vcbDirCnt);
4705 VFSATTR_RETURN(fsap, f_maxobjcount, (u_int64_t)0xFFFFFFFF);
4706 VFSATTR_RETURN(fsap, f_iosize, (size_t)cluster_max_io_size(mp, 0));
4707 VFSATTR_RETURN(fsap, f_blocks, (u_int64_t)hfsmp->totalBlocks);
4708 VFSATTR_RETURN(fsap, f_bfree, (u_int64_t)hfs_freeblks(hfsmp, 0));
4709 VFSATTR_RETURN(fsap, f_bavail, (u_int64_t)hfs_freeblks(hfsmp, 1));
4710 VFSATTR_RETURN(fsap, f_bsize, (u_int32_t)vcb->blockSize);
4711 /* XXX needs clarification */
4712 VFSATTR_RETURN(fsap, f_bused, hfsmp->totalBlocks - hfs_freeblks(hfsmp, 1));
4713 /* Maximum files is constrained by total blocks. */
4714 VFSATTR_RETURN(fsap, f_files, (u_int64_t)(hfsmp->totalBlocks - 2));
4715 VFSATTR_RETURN(fsap, f_ffree, MIN((u_int64_t)freeCNIDs, (u_int64_t)hfs_freeblks(hfsmp, 1)));
4716
4717 fsap->f_fsid.val[0] = hfsmp->hfs_raw_dev;
4718 fsap->f_fsid.val[1] = vfs_typenum(mp);
4719 VFSATTR_SET_SUPPORTED(fsap, f_fsid);
4720
4721 VFSATTR_RETURN(fsap, f_signature, vcb->vcbSigWord);
4722 VFSATTR_RETURN(fsap, f_carbon_fsid, 0);
4723
4724 if (VFSATTR_IS_ACTIVE(fsap, f_capabilities)) {
4725 vol_capabilities_attr_t *cap;
4726
4727 cap = &fsap->f_capabilities;
4728
4729 if (hfsmp->hfs_flags & HFS_STANDARD) {
4730 cap->capabilities[VOL_CAPABILITIES_FORMAT] =
4731 VOL_CAP_FMT_PERSISTENTOBJECTIDS |
4732 VOL_CAP_FMT_CASE_PRESERVING |
4733 VOL_CAP_FMT_FAST_STATFS |
4734 VOL_CAP_FMT_HIDDEN_FILES |
4735 VOL_CAP_FMT_PATH_FROM_ID;
4736 } else {
4737 cap->capabilities[VOL_CAPABILITIES_FORMAT] =
4738 VOL_CAP_FMT_PERSISTENTOBJECTIDS |
4739 VOL_CAP_FMT_SYMBOLICLINKS |
4740 VOL_CAP_FMT_HARDLINKS |
4741 VOL_CAP_FMT_JOURNAL |
4742 VOL_CAP_FMT_ZERO_RUNS |
4743 (hfsmp->jnl ? VOL_CAP_FMT_JOURNAL_ACTIVE : 0) |
4744 (hfsmp->hfs_flags & HFS_CASE_SENSITIVE ? VOL_CAP_FMT_CASE_SENSITIVE : 0) |
4745 VOL_CAP_FMT_CASE_PRESERVING |
4746 VOL_CAP_FMT_FAST_STATFS |
4747 VOL_CAP_FMT_2TB_FILESIZE |
4748 VOL_CAP_FMT_HIDDEN_FILES |
4749 VOL_CAP_FMT_PATH_FROM_ID;
4750 }
4751 cap->capabilities[VOL_CAPABILITIES_INTERFACES] =
4752 VOL_CAP_INT_SEARCHFS |
4753 VOL_CAP_INT_ATTRLIST |
4754 VOL_CAP_INT_NFSEXPORT |
4755 VOL_CAP_INT_READDIRATTR |
4756 VOL_CAP_INT_EXCHANGEDATA |
4757 VOL_CAP_INT_ALLOCATE |
4758 VOL_CAP_INT_VOL_RENAME |
4759 VOL_CAP_INT_ADVLOCK |
4760 VOL_CAP_INT_FLOCK |
4761 #if NAMEDSTREAMS
4762 VOL_CAP_INT_EXTENDED_ATTR |
4763 VOL_CAP_INT_NAMEDSTREAMS;
4764 #else
4765 VOL_CAP_INT_EXTENDED_ATTR;
4766 #endif
4767 cap->capabilities[VOL_CAPABILITIES_RESERVED1] = 0;
4768 cap->capabilities[VOL_CAPABILITIES_RESERVED2] = 0;
4769
4770 cap->valid[VOL_CAPABILITIES_FORMAT] =
4771 VOL_CAP_FMT_PERSISTENTOBJECTIDS |
4772 VOL_CAP_FMT_SYMBOLICLINKS |
4773 VOL_CAP_FMT_HARDLINKS |
4774 VOL_CAP_FMT_JOURNAL |
4775 VOL_CAP_FMT_JOURNAL_ACTIVE |
4776 VOL_CAP_FMT_NO_ROOT_TIMES |
4777 VOL_CAP_FMT_SPARSE_FILES |
4778 VOL_CAP_FMT_ZERO_RUNS |
4779 VOL_CAP_FMT_CASE_SENSITIVE |
4780 VOL_CAP_FMT_CASE_PRESERVING |
4781 VOL_CAP_FMT_FAST_STATFS |
4782 VOL_CAP_FMT_2TB_FILESIZE |
4783 VOL_CAP_FMT_OPENDENYMODES |
4784 VOL_CAP_FMT_HIDDEN_FILES |
4785 VOL_CAP_FMT_PATH_FROM_ID;
4786 cap->valid[VOL_CAPABILITIES_INTERFACES] =
4787 VOL_CAP_INT_SEARCHFS |
4788 VOL_CAP_INT_ATTRLIST |
4789 VOL_CAP_INT_NFSEXPORT |
4790 VOL_CAP_INT_READDIRATTR |
4791 VOL_CAP_INT_EXCHANGEDATA |
4792 VOL_CAP_INT_COPYFILE |
4793 VOL_CAP_INT_ALLOCATE |
4794 VOL_CAP_INT_VOL_RENAME |
4795 VOL_CAP_INT_ADVLOCK |
4796 VOL_CAP_INT_FLOCK |
4797 VOL_CAP_INT_MANLOCK |
4798 #if NAMEDSTREAMS
4799 VOL_CAP_INT_EXTENDED_ATTR |
4800 VOL_CAP_INT_NAMEDSTREAMS;
4801 #else
4802 VOL_CAP_INT_EXTENDED_ATTR;
4803 #endif
4804 cap->valid[VOL_CAPABILITIES_RESERVED1] = 0;
4805 cap->valid[VOL_CAPABILITIES_RESERVED2] = 0;
4806 VFSATTR_SET_SUPPORTED(fsap, f_capabilities);
4807 }
4808 if (VFSATTR_IS_ACTIVE(fsap, f_attributes)) {
4809 vol_attributes_attr_t *attrp = &fsap->f_attributes;
4810
4811 attrp->validattr.commonattr = HFS_ATTR_CMN_VALIDMASK;
4812 attrp->validattr.volattr = ATTR_VOL_VALIDMASK & ~ATTR_VOL_INFO;
4813 attrp->validattr.dirattr = ATTR_DIR_VALIDMASK;
4814 attrp->validattr.fileattr = HFS_ATTR_FILE_VALIDMASK;
4815 attrp->validattr.forkattr = 0;
4816
4817 attrp->nativeattr.commonattr = HFS_ATTR_CMN_VALIDMASK;
4818 attrp->nativeattr.volattr = ATTR_VOL_VALIDMASK & ~ATTR_VOL_INFO;
4819 attrp->nativeattr.dirattr = ATTR_DIR_VALIDMASK;
4820 attrp->nativeattr.fileattr = HFS_ATTR_FILE_VALIDMASK;
4821 attrp->nativeattr.forkattr = 0;
4822 VFSATTR_SET_SUPPORTED(fsap, f_attributes);
4823 }
4824 fsap->f_create_time.tv_sec = hfsmp->vcbCrDate;
4825 fsap->f_create_time.tv_nsec = 0;
4826 VFSATTR_SET_SUPPORTED(fsap, f_create_time);
4827 fsap->f_modify_time.tv_sec = hfsmp->vcbLsMod;
4828 fsap->f_modify_time.tv_nsec = 0;
4829 VFSATTR_SET_SUPPORTED(fsap, f_modify_time);
4830
4831 fsap->f_backup_time.tv_sec = hfsmp->vcbVolBkUp;
4832 fsap->f_backup_time.tv_nsec = 0;
4833 VFSATTR_SET_SUPPORTED(fsap, f_backup_time);
4834 if (VFSATTR_IS_ACTIVE(fsap, f_fssubtype)) {
4835 u_int16_t subtype = 0;
4836
4837 /*
4838 * Subtypes (flavors) for HFS
4839 * 0: Mac OS Extended
4840 * 1: Mac OS Extended (Journaled)
4841 * 2: Mac OS Extended (Case Sensitive)
4842 * 3: Mac OS Extended (Case Sensitive, Journaled)
4843 * 4 - 127: Reserved
4844 * 128: Mac OS Standard
4845 *
4846 */
4847 if (hfsmp->hfs_flags & HFS_STANDARD) {
4848 subtype = HFS_SUBTYPE_STANDARDHFS;
4849 } else /* HFS Plus */ {
4850 if (hfsmp->jnl)
4851 subtype |= HFS_SUBTYPE_JOURNALED;
4852 if (hfsmp->hfs_flags & HFS_CASE_SENSITIVE)
4853 subtype |= HFS_SUBTYPE_CASESENSITIVE;
4854 }
4855 fsap->f_fssubtype = subtype;
4856 VFSATTR_SET_SUPPORTED(fsap, f_fssubtype);
4857 }
4858
4859 if (VFSATTR_IS_ACTIVE(fsap, f_vol_name)) {
4860 strlcpy(fsap->f_vol_name, (char *) hfsmp->vcbVN, MAXPATHLEN);
4861 VFSATTR_SET_SUPPORTED(fsap, f_vol_name);
4862 }
4863 return (0);
4864 }
4865
4866 /*
4867 * Perform a volume rename. Requires the FS' root vp.
4868 */
4869 static int
4870 hfs_rename_volume(struct vnode *vp, const char *name, proc_t p)
4871 {
4872 ExtendedVCB *vcb = VTOVCB(vp);
4873 struct cnode *cp = VTOC(vp);
4874 struct hfsmount *hfsmp = VTOHFS(vp);
4875 struct cat_desc to_desc;
4876 struct cat_desc todir_desc;
4877 struct cat_desc new_desc;
4878 cat_cookie_t cookie;
4879 int lockflags;
4880 int error = 0;
4881
4882 /*
4883 * Ignore attempts to rename a volume to a zero-length name.
4884 */
4885 if (name[0] == 0)
4886 return(0);
4887
4888 bzero(&to_desc, sizeof(to_desc));
4889 bzero(&todir_desc, sizeof(todir_desc));
4890 bzero(&new_desc, sizeof(new_desc));
4891 bzero(&cookie, sizeof(cookie));
4892
4893 todir_desc.cd_parentcnid = kHFSRootParentID;
4894 todir_desc.cd_cnid = kHFSRootFolderID;
4895 todir_desc.cd_flags = CD_ISDIR;
4896
4897 to_desc.cd_nameptr = (const u_int8_t *)name;
4898 to_desc.cd_namelen = strlen(name);
4899 to_desc.cd_parentcnid = kHFSRootParentID;
4900 to_desc.cd_cnid = cp->c_cnid;
4901 to_desc.cd_flags = CD_ISDIR;
4902
4903 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK)) == 0) {
4904 if ((error = hfs_start_transaction(hfsmp)) == 0) {
4905 if ((error = cat_preflight(hfsmp, CAT_RENAME, &cookie, p)) == 0) {
4906 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
4907
4908 error = cat_rename(hfsmp, &cp->c_desc, &todir_desc, &to_desc, &new_desc);
4909
4910 /*
4911 * If successful, update the name in the VCB, ensure it's terminated.
4912 */
4913 if (!error) {
4914 strlcpy((char *)vcb->vcbVN, name, sizeof(vcb->vcbVN));
4915 }
4916
4917 hfs_systemfile_unlock(hfsmp, lockflags);
4918 cat_postflight(hfsmp, &cookie, p);
4919
4920 if (error)
4921 MarkVCBDirty(vcb);
4922 (void) hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
4923 }
4924 hfs_end_transaction(hfsmp);
4925 }
4926 if (!error) {
4927 /* Release old allocated name buffer */
4928 if (cp->c_desc.cd_flags & CD_HASBUF) {
4929 const char *tmp_name = (const char *)cp->c_desc.cd_nameptr;
4930
4931 cp->c_desc.cd_nameptr = 0;
4932 cp->c_desc.cd_namelen = 0;
4933 cp->c_desc.cd_flags &= ~CD_HASBUF;
4934 vfs_removename(tmp_name);
4935 }
4936 /* Update cnode's catalog descriptor */
4937 replace_desc(cp, &new_desc);
4938 vcb->volumeNameEncodingHint = new_desc.cd_encoding;
4939 cp->c_touch_chgtime = TRUE;
4940 }
4941
4942 hfs_unlock(cp);
4943 }
4944
4945 return(error);
4946 }
4947
4948 /*
4949 * Get file system attributes.
4950 */
4951 static int
4952 hfs_vfs_setattr(struct mount *mp, struct vfs_attr *fsap, __unused vfs_context_t context)
4953 {
4954 kauth_cred_t cred = vfs_context_ucred(context);
4955 int error = 0;
4956
4957 /*
4958 * Must be superuser or owner of filesystem to change volume attributes
4959 */
4960 if (!kauth_cred_issuser(cred) && (kauth_cred_getuid(cred) != vfs_statfs(mp)->f_owner))
4961 return(EACCES);
4962
4963 if (VFSATTR_IS_ACTIVE(fsap, f_vol_name)) {
4964 vnode_t root_vp;
4965
4966 error = hfs_vfs_root(mp, &root_vp, context);
4967 if (error)
4968 goto out;
4969
4970 error = hfs_rename_volume(root_vp, fsap->f_vol_name, vfs_context_proc(context));
4971 (void) vnode_put(root_vp);
4972 if (error)
4973 goto out;
4974
4975 VFSATTR_SET_SUPPORTED(fsap, f_vol_name);
4976 }
4977
4978 out:
4979 return error;
4980 }
4981
4982 /* If a runtime corruption is detected, set the volume inconsistent
4983 * bit in the volume attributes. The volume inconsistent bit is a persistent
4984 * bit which represents that the volume is corrupt and needs repair.
4985 * The volume inconsistent bit can be set from the kernel when it detects
4986 * runtime corruption or from file system repair utilities like fsck_hfs when
4987 * a repair operation fails. The bit should be cleared only from file system
4988 * verify/repair utility like fsck_hfs when a verify/repair succeeds.
4989 */
4990 void hfs_mark_volume_inconsistent(struct hfsmount *hfsmp)
4991 {
4992 HFS_MOUNT_LOCK(hfsmp, TRUE);
4993 if ((hfsmp->vcbAtrb & kHFSVolumeInconsistentMask) == 0) {
4994 hfsmp->vcbAtrb |= kHFSVolumeInconsistentMask;
4995 MarkVCBDirty(hfsmp);
4996 }
4997 /* Log information to ASL log */
4998 fslog_fs_corrupt(hfsmp->hfs_mp);
4999 printf("HFS: Runtime corruption detected on %s, fsck will be forced on next mount.\n", hfsmp->vcbVN);
5000 HFS_MOUNT_UNLOCK(hfsmp, TRUE);
5001 }
5002
5003 /* Replay the journal on the device node provided. Returns zero if
5004 * journal replay succeeded or no journal was supposed to be replayed.
5005 */
5006 static int hfs_journal_replay(const char *devnode, vfs_context_t context)
5007 {
5008 int retval = 0;
5009 struct vnode *devvp = NULL;
5010 struct mount *mp = NULL;
5011 struct hfs_mount_args *args = NULL;
5012
5013 /* Lookup vnode for given raw device path */
5014 retval = vnode_open(devnode, FREAD|FWRITE, 0, 0, &devvp, NULL);
5015 if (retval) {
5016 goto out;
5017 }
5018
5019 /* Replay allowed only on raw devices */
5020 if (!vnode_ischr(devvp)) {
5021 retval = EINVAL;
5022 goto out;
5023 }
5024
5025 /* Create dummy mount structures */
5026 MALLOC(mp, struct mount *, sizeof(struct mount), M_TEMP, M_WAITOK);
5027 bzero(mp, sizeof(struct mount));
5028 mount_lock_init(mp);
5029
5030 MALLOC(args, struct hfs_mount_args *, sizeof(struct hfs_mount_args), M_TEMP, M_WAITOK);
5031 bzero(args, sizeof(struct hfs_mount_args));
5032
5033 retval = hfs_mountfs(devvp, mp, args, 1, context);
5034 buf_flushdirtyblks(devvp, MNT_WAIT, 0, "hfs_journal_replay");
5035
5036 out:
5037 if (mp) {
5038 mount_lock_destroy(mp);
5039 FREE(mp, M_TEMP);
5040 }
5041 if (args) {
5042 FREE(args, M_TEMP);
5043 }
5044 if (devvp) {
5045 vnode_close(devvp, FREAD|FWRITE, NULL);
5046 }
5047 return retval;
5048 }
5049
5050 /*
5051 * hfs vfs operations.
5052 */
5053 struct vfsops hfs_vfsops = {
5054 hfs_mount,
5055 hfs_start,
5056 hfs_unmount,
5057 hfs_vfs_root,
5058 hfs_quotactl,
5059 hfs_vfs_getattr, /* was hfs_statfs */
5060 hfs_sync,
5061 hfs_vfs_vget,
5062 hfs_fhtovp,
5063 hfs_vptofh,
5064 hfs_init,
5065 hfs_sysctl,
5066 hfs_vfs_setattr,
5067 {NULL}
5068 };