]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_vfsops.c
xnu-2782.30.5.tar.gz
[apple/xnu.git] / bsd / hfs / hfs_vfsops.c
1 /*
2 * Copyright (c) 1999-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1991, 1993, 1994
30 * The Regents of the University of California. All rights reserved.
31 * (c) UNIX System Laboratories, Inc.
32 * All or some portions of this file are derived from material licensed
33 * to the University of California by American Telephone and Telegraph
34 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
35 * the permission of UNIX System Laboratories, Inc.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * hfs_vfsops.c
66 * derived from @(#)ufs_vfsops.c 8.8 (Berkeley) 5/20/95
67 *
68 * (c) Copyright 1997-2002 Apple Computer, Inc. All rights reserved.
69 *
70 * hfs_vfsops.c -- VFS layer for loadable HFS file system.
71 *
72 */
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/kauth.h>
76
77 #include <sys/ubc.h>
78 #include <sys/ubc_internal.h>
79 #include <sys/vnode_internal.h>
80 #include <sys/mount_internal.h>
81 #include <sys/sysctl.h>
82 #include <sys/malloc.h>
83 #include <sys/stat.h>
84 #include <sys/quota.h>
85 #include <sys/disk.h>
86 #include <sys/paths.h>
87 #include <sys/utfconv.h>
88 #include <sys/kdebug.h>
89 #include <sys/fslog.h>
90 #include <sys/ubc.h>
91 #include <sys/buf_internal.h>
92
93 /* for parsing boot-args */
94 #include <pexpert/pexpert.h>
95
96
97 #include <kern/locks.h>
98
99 #include <vfs/vfs_journal.h>
100
101 #include <miscfs/specfs/specdev.h>
102 #include <hfs/hfs_mount.h>
103
104 #include <libkern/crypto/md5.h>
105 #include <uuid/uuid.h>
106
107 #include "hfs.h"
108 #include "hfs_catalog.h"
109 #include "hfs_cnode.h"
110 #include "hfs_dbg.h"
111 #include "hfs_endian.h"
112 #include "hfs_hotfiles.h"
113 #include "hfs_quota.h"
114 #include "hfs_btreeio.h"
115 #include "hfs_kdebug.h"
116
117 #include "hfscommon/headers/FileMgrInternal.h"
118 #include "hfscommon/headers/BTreesInternal.h"
119
120 #if CONFIG_PROTECT
121 #include <sys/cprotect.h>
122 #endif
123
124 #define HFS_MOUNT_DEBUG 1
125
126 #if HFS_DIAGNOSTIC
127 int hfs_dbg_all = 0;
128 int hfs_dbg_err = 0;
129 #endif
130
131 /* Enable/disable debugging code for live volume resizing, defined in hfs_resize.c */
132 extern int hfs_resize_debug;
133
134 lck_grp_attr_t * hfs_group_attr;
135 lck_attr_t * hfs_lock_attr;
136 lck_grp_t * hfs_mutex_group;
137 lck_grp_t * hfs_rwlock_group;
138 lck_grp_t * hfs_spinlock_group;
139
140 extern struct vnodeopv_desc hfs_vnodeop_opv_desc;
141
142 #if CONFIG_HFS_STD
143 extern struct vnodeopv_desc hfs_std_vnodeop_opv_desc;
144 static int hfs_flushMDB(struct hfsmount *hfsmp, int waitfor, int altflush);
145 #endif
146
147 /* not static so we can re-use in hfs_readwrite.c for build_path calls */
148 int hfs_vfs_vget(struct mount *mp, ino64_t ino, struct vnode **vpp, vfs_context_t context);
149
150 static int hfs_changefs(struct mount *mp, struct hfs_mount_args *args);
151 static int hfs_fhtovp(struct mount *mp, int fhlen, unsigned char *fhp, struct vnode **vpp, vfs_context_t context);
152 static int hfs_flushfiles(struct mount *, int, struct proc *);
153 static int hfs_getmountpoint(struct vnode *vp, struct hfsmount **hfsmpp);
154 static int hfs_init(struct vfsconf *vfsp);
155 static void hfs_locks_destroy(struct hfsmount *hfsmp);
156 static int hfs_vfs_root(struct mount *mp, struct vnode **vpp, vfs_context_t context);
157 static int hfs_quotactl(struct mount *, int, uid_t, caddr_t, vfs_context_t context);
158 static int hfs_start(struct mount *mp, int flags, vfs_context_t context);
159 static int hfs_vptofh(struct vnode *vp, int *fhlenp, unsigned char *fhp, vfs_context_t context);
160 static int hfs_journal_replay(vnode_t devvp, vfs_context_t context);
161 static void hfs_syncer_free(struct hfsmount *hfsmp);
162
163 void hfs_initialize_allocator (struct hfsmount *hfsmp);
164 int hfs_teardown_allocator (struct hfsmount *hfsmp);
165
166 int hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t context);
167 int hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, int journal_replay_only, vfs_context_t context);
168 int hfs_reload(struct mount *mp);
169 int hfs_statfs(struct mount *mp, register struct vfsstatfs *sbp, vfs_context_t context);
170 int hfs_sync(struct mount *mp, int waitfor, vfs_context_t context);
171 int hfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
172 user_addr_t newp, size_t newlen, vfs_context_t context);
173 int hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context);
174
175 /*
176 * Called by vfs_mountroot when mounting HFS Plus as root.
177 */
178
179 int
180 hfs_mountroot(mount_t mp, vnode_t rvp, vfs_context_t context)
181 {
182 struct hfsmount *hfsmp;
183 ExtendedVCB *vcb;
184 struct vfsstatfs *vfsp;
185 int error;
186
187 if ((error = hfs_mountfs(rvp, mp, NULL, 0, context))) {
188 if (HFS_MOUNT_DEBUG) {
189 printf("hfs_mountroot: hfs_mountfs returned %d, rvp (%p) name (%s) \n",
190 error, rvp, (rvp->v_name ? rvp->v_name : "unknown device"));
191 }
192 return (error);
193 }
194
195 /* Init hfsmp */
196 hfsmp = VFSTOHFS(mp);
197
198 hfsmp->hfs_uid = UNKNOWNUID;
199 hfsmp->hfs_gid = UNKNOWNGID;
200 hfsmp->hfs_dir_mask = (S_IRWXU | S_IRGRP|S_IXGRP | S_IROTH|S_IXOTH); /* 0755 */
201 hfsmp->hfs_file_mask = (S_IRWXU | S_IRGRP|S_IXGRP | S_IROTH|S_IXOTH); /* 0755 */
202
203 /* Establish the free block reserve. */
204 vcb = HFSTOVCB(hfsmp);
205 vcb->reserveBlocks = ((u_int64_t)vcb->totalBlocks * HFS_MINFREE) / 100;
206 vcb->reserveBlocks = MIN(vcb->reserveBlocks, HFS_MAXRESERVE / vcb->blockSize);
207
208 vfsp = vfs_statfs(mp);
209 (void)hfs_statfs(mp, vfsp, NULL);
210
211 /* Invoke ioctl that asks if the underlying device is Core Storage or not */
212 error = VNOP_IOCTL(rvp, _DKIOCCORESTORAGE, NULL, 0, context);
213 if (error == 0) {
214 hfsmp->hfs_flags |= HFS_CS;
215 }
216 return (0);
217 }
218
219
220 /*
221 * VFS Operations.
222 *
223 * mount system call
224 */
225
226 int
227 hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t context)
228 {
229 struct proc *p = vfs_context_proc(context);
230 struct hfsmount *hfsmp = NULL;
231 struct hfs_mount_args args;
232 int retval = E_NONE;
233 u_int32_t cmdflags;
234
235 if ((retval = copyin(data, (caddr_t)&args, sizeof(args)))) {
236 if (HFS_MOUNT_DEBUG) {
237 printf("hfs_mount: copyin returned %d for fs\n", retval);
238 }
239 return (retval);
240 }
241 cmdflags = (u_int32_t)vfs_flags(mp) & MNT_CMDFLAGS;
242 if (cmdflags & MNT_UPDATE) {
243 hfsmp = VFSTOHFS(mp);
244
245 /* Reload incore data after an fsck. */
246 if (cmdflags & MNT_RELOAD) {
247 if (vfs_isrdonly(mp)) {
248 int error = hfs_reload(mp);
249 if (error && HFS_MOUNT_DEBUG) {
250 printf("hfs_mount: hfs_reload returned %d on %s \n", error, hfsmp->vcbVN);
251 }
252 return error;
253 }
254 else {
255 if (HFS_MOUNT_DEBUG) {
256 printf("hfs_mount: MNT_RELOAD not supported on rdwr filesystem %s\n", hfsmp->vcbVN);
257 }
258 return (EINVAL);
259 }
260 }
261
262 /* Change to a read-only file system. */
263 if (((hfsmp->hfs_flags & HFS_READ_ONLY) == 0) &&
264 vfs_isrdonly(mp)) {
265 int flags;
266
267 /* Set flag to indicate that a downgrade to read-only
268 * is in progress and therefore block any further
269 * modifications to the file system.
270 */
271 hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK);
272 hfsmp->hfs_flags |= HFS_RDONLY_DOWNGRADE;
273 hfsmp->hfs_downgrading_thread = current_thread();
274 hfs_unlock_global (hfsmp);
275 hfs_syncer_free(hfsmp);
276
277 /* use VFS_SYNC to push out System (btree) files */
278 retval = VFS_SYNC(mp, MNT_WAIT, context);
279 if (retval && ((cmdflags & MNT_FORCE) == 0)) {
280 hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
281 hfsmp->hfs_downgrading_thread = NULL;
282 if (HFS_MOUNT_DEBUG) {
283 printf("hfs_mount: VFS_SYNC returned %d during b-tree sync of %s \n", retval, hfsmp->vcbVN);
284 }
285 goto out;
286 }
287
288 flags = WRITECLOSE;
289 if (cmdflags & MNT_FORCE)
290 flags |= FORCECLOSE;
291
292 if ((retval = hfs_flushfiles(mp, flags, p))) {
293 hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
294 hfsmp->hfs_downgrading_thread = NULL;
295 if (HFS_MOUNT_DEBUG) {
296 printf("hfs_mount: hfs_flushfiles returned %d on %s \n", retval, hfsmp->vcbVN);
297 }
298 goto out;
299 }
300
301 /* mark the volume cleanly unmounted */
302 hfsmp->vcbAtrb |= kHFSVolumeUnmountedMask;
303 retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
304 hfsmp->hfs_flags |= HFS_READ_ONLY;
305
306 /*
307 * Close down the journal.
308 *
309 * NOTE: It is critically important to close down the journal
310 * and have it issue all pending I/O prior to calling VNOP_FSYNC below.
311 * In a journaled environment it is expected that the journal be
312 * the only actor permitted to issue I/O for metadata blocks in HFS.
313 * If we were to call VNOP_FSYNC prior to closing down the journal,
314 * we would inadvertantly issue (and wait for) the I/O we just
315 * initiated above as part of the flushvolumeheader call.
316 *
317 * To avoid this, we follow the same order of operations as in
318 * unmount and issue the journal_close prior to calling VNOP_FSYNC.
319 */
320
321 if (hfsmp->jnl) {
322 hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK);
323
324 journal_close(hfsmp->jnl);
325 hfsmp->jnl = NULL;
326
327 // Note: we explicitly don't want to shutdown
328 // access to the jvp because we may need
329 // it later if we go back to being read-write.
330
331 hfs_unlock_global (hfsmp);
332
333 vfs_clearflags(hfsmp->hfs_mp, MNT_JOURNALED);
334 }
335
336 /*
337 * Write out any pending I/O still outstanding against the device node
338 * now that the journal has been closed.
339 */
340 if (retval == 0) {
341 vnode_get(hfsmp->hfs_devvp);
342 retval = VNOP_FSYNC(hfsmp->hfs_devvp, MNT_WAIT, context);
343 vnode_put(hfsmp->hfs_devvp);
344 }
345
346 if (retval) {
347 if (HFS_MOUNT_DEBUG) {
348 printf("hfs_mount: FSYNC on devvp returned %d for fs %s\n", retval, hfsmp->vcbVN);
349 }
350 hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
351 hfsmp->hfs_downgrading_thread = NULL;
352 hfsmp->hfs_flags &= ~HFS_READ_ONLY;
353 goto out;
354 }
355
356 if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) {
357 if (hfsmp->hfs_summary_table) {
358 int err = 0;
359 /*
360 * Take the bitmap lock to serialize against a concurrent bitmap scan still in progress
361 */
362 if (hfsmp->hfs_allocation_vp) {
363 err = hfs_lock (VTOC(hfsmp->hfs_allocation_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
364 }
365 FREE (hfsmp->hfs_summary_table, M_TEMP);
366 hfsmp->hfs_summary_table = NULL;
367 hfsmp->hfs_flags &= ~HFS_SUMMARY_TABLE;
368 if (err == 0 && hfsmp->hfs_allocation_vp){
369 hfs_unlock (VTOC(hfsmp->hfs_allocation_vp));
370 }
371 }
372 }
373
374 hfsmp->hfs_downgrading_thread = NULL;
375 }
376
377 /* Change to a writable file system. */
378 if (vfs_iswriteupgrade(mp)) {
379 /*
380 * On inconsistent disks, do not allow read-write mount
381 * unless it is the boot volume being mounted.
382 */
383 if (!(vfs_flags(mp) & MNT_ROOTFS) &&
384 (hfsmp->vcbAtrb & kHFSVolumeInconsistentMask)) {
385 if (HFS_MOUNT_DEBUG) {
386 printf("hfs_mount: attempting to mount inconsistent non-root volume %s\n", (hfsmp->vcbVN));
387 }
388 retval = EINVAL;
389 goto out;
390 }
391
392 // If the journal was shut-down previously because we were
393 // asked to be read-only, let's start it back up again now
394
395 if ( (HFSTOVCB(hfsmp)->vcbAtrb & kHFSVolumeJournaledMask)
396 && hfsmp->jnl == NULL
397 && hfsmp->jvp != NULL) {
398 int jflags;
399
400 if (hfsmp->hfs_flags & HFS_NEED_JNL_RESET) {
401 jflags = JOURNAL_RESET;
402 } else {
403 jflags = 0;
404 }
405
406 hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK);
407
408 /* We provide the mount point twice here: The first is used as
409 * an opaque argument to be passed back when hfs_sync_metadata
410 * is called. The second is provided to the throttling code to
411 * indicate which mount's device should be used when accounting
412 * for metadata writes.
413 */
414 hfsmp->jnl = journal_open(hfsmp->jvp,
415 (hfsmp->jnl_start * HFSTOVCB(hfsmp)->blockSize) + (off_t)HFSTOVCB(hfsmp)->hfsPlusIOPosOffset,
416 hfsmp->jnl_size,
417 hfsmp->hfs_devvp,
418 hfsmp->hfs_logical_block_size,
419 jflags,
420 0,
421 hfs_sync_metadata, hfsmp->hfs_mp,
422 hfsmp->hfs_mp);
423
424 /*
425 * Set up the trim callback function so that we can add
426 * recently freed extents to the free extent cache once
427 * the transaction that freed them is written to the
428 * journal on disk.
429 */
430 if (hfsmp->jnl)
431 journal_trim_set_callback(hfsmp->jnl, hfs_trim_callback, hfsmp);
432
433 hfs_unlock_global (hfsmp);
434
435 if (hfsmp->jnl == NULL) {
436 if (HFS_MOUNT_DEBUG) {
437 printf("hfs_mount: journal_open == NULL; couldn't be opened on %s \n", (hfsmp->vcbVN));
438 }
439 retval = EINVAL;
440 goto out;
441 } else {
442 hfsmp->hfs_flags &= ~HFS_NEED_JNL_RESET;
443 vfs_setflags(hfsmp->hfs_mp, MNT_JOURNALED);
444 }
445 }
446
447 /* See if we need to erase unused Catalog nodes due to <rdar://problem/6947811>. */
448 retval = hfs_erase_unused_nodes(hfsmp);
449 if (retval != E_NONE) {
450 if (HFS_MOUNT_DEBUG) {
451 printf("hfs_mount: hfs_erase_unused_nodes returned %d for fs %s\n", retval, hfsmp->vcbVN);
452 }
453 goto out;
454 }
455
456 /* If this mount point was downgraded from read-write
457 * to read-only, clear that information as we are now
458 * moving back to read-write.
459 */
460 hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
461 hfsmp->hfs_downgrading_thread = NULL;
462
463 /* mark the volume dirty (clear clean unmount bit) */
464 hfsmp->vcbAtrb &= ~kHFSVolumeUnmountedMask;
465
466 retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
467 if (retval != E_NONE) {
468 if (HFS_MOUNT_DEBUG) {
469 printf("hfs_mount: hfs_flushvolumeheader returned %d for fs %s\n", retval, hfsmp->vcbVN);
470 }
471 goto out;
472 }
473
474 /* Only clear HFS_READ_ONLY after a successful write */
475 hfsmp->hfs_flags &= ~HFS_READ_ONLY;
476
477
478 if (!(hfsmp->hfs_flags & (HFS_READ_ONLY | HFS_STANDARD))) {
479 /* Setup private/hidden directories for hardlinks. */
480 hfs_privatedir_init(hfsmp, FILE_HARDLINKS);
481 hfs_privatedir_init(hfsmp, DIR_HARDLINKS);
482
483 hfs_remove_orphans(hfsmp);
484
485 /*
486 * Allow hot file clustering if conditions allow.
487 */
488 if ((hfsmp->hfs_flags & HFS_METADATA_ZONE) &&
489 ((hfsmp->hfs_mp->mnt_kern_flag & MNTK_SSD) == 0)) {
490 (void) hfs_recording_init(hfsmp);
491 }
492 /* Force ACLs on HFS+ file systems. */
493 if (vfs_extendedsecurity(HFSTOVFS(hfsmp)) == 0) {
494 vfs_setextendedsecurity(HFSTOVFS(hfsmp));
495 }
496 }
497 }
498
499 /* Update file system parameters. */
500 retval = hfs_changefs(mp, &args);
501 if (retval && HFS_MOUNT_DEBUG) {
502 printf("hfs_mount: hfs_changefs returned %d for %s\n", retval, hfsmp->vcbVN);
503 }
504
505 } else /* not an update request */ {
506
507 /* Set the mount flag to indicate that we support volfs */
508 vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_DOVOLFS));
509
510 retval = hfs_mountfs(devvp, mp, &args, 0, context);
511 if (retval) {
512 const char *name = vnode_getname(devvp);
513 printf("hfs_mount: hfs_mountfs returned error=%d for device %s\n", retval, (name ? name : "unknown-dev"));
514 if (name) {
515 vnode_putname(name);
516 }
517 goto out;
518 }
519
520 /* After hfs_mountfs succeeds, we should have valid hfsmp */
521 hfsmp = VFSTOHFS(mp);
522
523 /*
524 * Check to see if the file system exists on CoreStorage.
525 *
526 * This must be done after examining the root folder's CP EA since
527 * hfs_vfs_root will create a vnode (which must not occur until after
528 * we've established the CP level of the FS).
529 */
530 if (retval == 0) {
531 errno_t err;
532 /* Invoke ioctl that asks if the underlying device is Core Storage or not */
533 err = VNOP_IOCTL(devvp, _DKIOCCORESTORAGE, NULL, 0, context);
534 if (err == 0) {
535 hfsmp->hfs_flags |= HFS_CS;
536 }
537 }
538 }
539
540 out:
541 if (retval == 0) {
542 (void)hfs_statfs(mp, vfs_statfs(mp), context);
543 }
544 return (retval);
545 }
546
547
548 struct hfs_changefs_cargs {
549 struct hfsmount *hfsmp;
550 int namefix;
551 int permfix;
552 int permswitch;
553 };
554
555 static int
556 hfs_changefs_callback(struct vnode *vp, void *cargs)
557 {
558 ExtendedVCB *vcb;
559 struct cnode *cp;
560 struct cat_desc cndesc;
561 struct cat_attr cnattr;
562 struct hfs_changefs_cargs *args;
563 int lockflags;
564 int error;
565
566 args = (struct hfs_changefs_cargs *)cargs;
567
568 cp = VTOC(vp);
569 vcb = HFSTOVCB(args->hfsmp);
570
571 lockflags = hfs_systemfile_lock(args->hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
572 error = cat_lookup(args->hfsmp, &cp->c_desc, 0, 0, &cndesc, &cnattr, NULL, NULL);
573 hfs_systemfile_unlock(args->hfsmp, lockflags);
574 if (error) {
575 /*
576 * If we couldn't find this guy skip to the next one
577 */
578 if (args->namefix)
579 cache_purge(vp);
580
581 return (VNODE_RETURNED);
582 }
583 /*
584 * Get the real uid/gid and perm mask from disk.
585 */
586 if (args->permswitch || args->permfix) {
587 cp->c_uid = cnattr.ca_uid;
588 cp->c_gid = cnattr.ca_gid;
589 cp->c_mode = cnattr.ca_mode;
590 }
591 /*
592 * If we're switching name converters then...
593 * Remove the existing entry from the namei cache.
594 * Update name to one based on new encoder.
595 */
596 if (args->namefix) {
597 cache_purge(vp);
598 replace_desc(cp, &cndesc);
599
600 if (cndesc.cd_cnid == kHFSRootFolderID) {
601 strlcpy((char *)vcb->vcbVN, (const char *)cp->c_desc.cd_nameptr, NAME_MAX+1);
602 cp->c_desc.cd_encoding = args->hfsmp->hfs_encoding;
603 }
604 } else {
605 cat_releasedesc(&cndesc);
606 }
607 return (VNODE_RETURNED);
608 }
609
610 /* Change fs mount parameters */
611 static int
612 hfs_changefs(struct mount *mp, struct hfs_mount_args *args)
613 {
614 int retval = 0;
615 int namefix, permfix, permswitch;
616 struct hfsmount *hfsmp;
617 ExtendedVCB *vcb;
618 struct hfs_changefs_cargs cargs;
619 u_int32_t mount_flags;
620
621 #if CONFIG_HFS_STD
622 u_int32_t old_encoding = 0;
623 hfs_to_unicode_func_t get_unicode_func;
624 unicode_to_hfs_func_t get_hfsname_func;
625 #endif
626
627 hfsmp = VFSTOHFS(mp);
628 vcb = HFSTOVCB(hfsmp);
629 mount_flags = (unsigned int)vfs_flags(mp);
630
631 hfsmp->hfs_flags |= HFS_IN_CHANGEFS;
632
633 permswitch = (((hfsmp->hfs_flags & HFS_UNKNOWN_PERMS) &&
634 ((mount_flags & MNT_UNKNOWNPERMISSIONS) == 0)) ||
635 (((hfsmp->hfs_flags & HFS_UNKNOWN_PERMS) == 0) &&
636 (mount_flags & MNT_UNKNOWNPERMISSIONS)));
637
638 /* The root filesystem must operate with actual permissions: */
639 if (permswitch && (mount_flags & MNT_ROOTFS) && (mount_flags & MNT_UNKNOWNPERMISSIONS)) {
640 vfs_clearflags(mp, (u_int64_t)((unsigned int)MNT_UNKNOWNPERMISSIONS)); /* Just say "No". */
641 retval = EINVAL;
642 goto exit;
643 }
644 if (mount_flags & MNT_UNKNOWNPERMISSIONS)
645 hfsmp->hfs_flags |= HFS_UNKNOWN_PERMS;
646 else
647 hfsmp->hfs_flags &= ~HFS_UNKNOWN_PERMS;
648
649 namefix = permfix = 0;
650
651 /*
652 * Tracking of hot files requires up-to-date access times. So if
653 * access time updates are disabled, we must also disable hot files.
654 */
655 if (mount_flags & MNT_NOATIME) {
656 (void) hfs_recording_suspend(hfsmp);
657 }
658
659 /* Change the timezone (Note: this affects all hfs volumes and hfs+ volume create dates) */
660 if (args->hfs_timezone.tz_minuteswest != VNOVAL) {
661 gTimeZone = args->hfs_timezone;
662 }
663
664 /* Change the default uid, gid and/or mask */
665 if ((args->hfs_uid != (uid_t)VNOVAL) && (hfsmp->hfs_uid != args->hfs_uid)) {
666 hfsmp->hfs_uid = args->hfs_uid;
667 if (vcb->vcbSigWord == kHFSPlusSigWord)
668 ++permfix;
669 }
670 if ((args->hfs_gid != (gid_t)VNOVAL) && (hfsmp->hfs_gid != args->hfs_gid)) {
671 hfsmp->hfs_gid = args->hfs_gid;
672 if (vcb->vcbSigWord == kHFSPlusSigWord)
673 ++permfix;
674 }
675 if (args->hfs_mask != (mode_t)VNOVAL) {
676 if (hfsmp->hfs_dir_mask != (args->hfs_mask & ALLPERMS)) {
677 hfsmp->hfs_dir_mask = args->hfs_mask & ALLPERMS;
678 hfsmp->hfs_file_mask = args->hfs_mask & ALLPERMS;
679 if ((args->flags != VNOVAL) && (args->flags & HFSFSMNT_NOXONFILES))
680 hfsmp->hfs_file_mask = (args->hfs_mask & DEFFILEMODE);
681 if (vcb->vcbSigWord == kHFSPlusSigWord)
682 ++permfix;
683 }
684 }
685
686 #if CONFIG_HFS_STD
687 /* Change the hfs encoding value (hfs only) */
688 if ((vcb->vcbSigWord == kHFSSigWord) &&
689 (args->hfs_encoding != (u_int32_t)VNOVAL) &&
690 (hfsmp->hfs_encoding != args->hfs_encoding)) {
691
692 retval = hfs_getconverter(args->hfs_encoding, &get_unicode_func, &get_hfsname_func);
693 if (retval)
694 goto exit;
695
696 /*
697 * Connect the new hfs_get_unicode converter but leave
698 * the old hfs_get_hfsname converter in place so that
699 * we can lookup existing vnodes to get their correctly
700 * encoded names.
701 *
702 * When we're all finished, we can then connect the new
703 * hfs_get_hfsname converter and release our interest
704 * in the old converters.
705 */
706 hfsmp->hfs_get_unicode = get_unicode_func;
707 old_encoding = hfsmp->hfs_encoding;
708 hfsmp->hfs_encoding = args->hfs_encoding;
709 ++namefix;
710 }
711 #endif
712
713 if (!(namefix || permfix || permswitch))
714 goto exit;
715
716 /* XXX 3762912 hack to support HFS filesystem 'owner' */
717 if (permfix)
718 vfs_setowner(mp,
719 hfsmp->hfs_uid == UNKNOWNUID ? KAUTH_UID_NONE : hfsmp->hfs_uid,
720 hfsmp->hfs_gid == UNKNOWNGID ? KAUTH_GID_NONE : hfsmp->hfs_gid);
721
722 /*
723 * For each active vnode fix things that changed
724 *
725 * Note that we can visit a vnode more than once
726 * and we can race with fsync.
727 *
728 * hfs_changefs_callback will be called for each vnode
729 * hung off of this mount point
730 *
731 * The vnode will be properly referenced and unreferenced
732 * around the callback
733 */
734 cargs.hfsmp = hfsmp;
735 cargs.namefix = namefix;
736 cargs.permfix = permfix;
737 cargs.permswitch = permswitch;
738
739 vnode_iterate(mp, 0, hfs_changefs_callback, (void *)&cargs);
740
741 #if CONFIG_HFS_STD
742 /*
743 * If we're switching name converters we can now
744 * connect the new hfs_get_hfsname converter and
745 * release our interest in the old converters.
746 */
747 if (namefix) {
748 /* HFS standard only */
749 hfsmp->hfs_get_hfsname = get_hfsname_func;
750 vcb->volumeNameEncodingHint = args->hfs_encoding;
751 (void) hfs_relconverter(old_encoding);
752 }
753 #endif
754
755 exit:
756 hfsmp->hfs_flags &= ~HFS_IN_CHANGEFS;
757 return (retval);
758 }
759
760
761 struct hfs_reload_cargs {
762 struct hfsmount *hfsmp;
763 int error;
764 };
765
766 static int
767 hfs_reload_callback(struct vnode *vp, void *cargs)
768 {
769 struct cnode *cp;
770 struct hfs_reload_cargs *args;
771 int lockflags;
772
773 args = (struct hfs_reload_cargs *)cargs;
774 /*
775 * flush all the buffers associated with this node
776 */
777 (void) buf_invalidateblks(vp, 0, 0, 0);
778
779 cp = VTOC(vp);
780 /*
781 * Remove any directory hints
782 */
783 if (vnode_isdir(vp))
784 hfs_reldirhints(cp, 0);
785
786 /*
787 * Re-read cnode data for all active vnodes (non-metadata files).
788 */
789 if (!vnode_issystem(vp) && !VNODE_IS_RSRC(vp) && (cp->c_fileid >= kHFSFirstUserCatalogNodeID)) {
790 struct cat_fork *datafork;
791 struct cat_desc desc;
792
793 datafork = cp->c_datafork ? &cp->c_datafork->ff_data : NULL;
794
795 /* lookup by fileID since name could have changed */
796 lockflags = hfs_systemfile_lock(args->hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
797 args->error = cat_idlookup(args->hfsmp, cp->c_fileid, 0, 0, &desc, &cp->c_attr, datafork);
798 hfs_systemfile_unlock(args->hfsmp, lockflags);
799 if (args->error) {
800 return (VNODE_RETURNED_DONE);
801 }
802
803 /* update cnode's catalog descriptor */
804 (void) replace_desc(cp, &desc);
805 }
806 return (VNODE_RETURNED);
807 }
808
809 /*
810 * Reload all incore data for a filesystem (used after running fsck on
811 * the root filesystem and finding things to fix). The filesystem must
812 * be mounted read-only.
813 *
814 * Things to do to update the mount:
815 * invalidate all cached meta-data.
816 * invalidate all inactive vnodes.
817 * invalidate all cached file data.
818 * re-read volume header from disk.
819 * re-load meta-file info (extents, file size).
820 * re-load B-tree header data.
821 * re-read cnode data for all active vnodes.
822 */
823 int
824 hfs_reload(struct mount *mountp)
825 {
826 register struct vnode *devvp;
827 struct buf *bp;
828 int error, i;
829 struct hfsmount *hfsmp;
830 struct HFSPlusVolumeHeader *vhp;
831 ExtendedVCB *vcb;
832 struct filefork *forkp;
833 struct cat_desc cndesc;
834 struct hfs_reload_cargs args;
835 daddr64_t priIDSector;
836
837 hfsmp = VFSTOHFS(mountp);
838 vcb = HFSTOVCB(hfsmp);
839
840 if (vcb->vcbSigWord == kHFSSigWord)
841 return (EINVAL); /* rooting from HFS is not supported! */
842
843 /*
844 * Invalidate all cached meta-data.
845 */
846 devvp = hfsmp->hfs_devvp;
847 if (buf_invalidateblks(devvp, 0, 0, 0))
848 panic("hfs_reload: dirty1");
849
850 args.hfsmp = hfsmp;
851 args.error = 0;
852 /*
853 * hfs_reload_callback will be called for each vnode
854 * hung off of this mount point that can't be recycled...
855 * vnode_iterate will recycle those that it can (the VNODE_RELOAD option)
856 * the vnode will be in an 'unbusy' state (VNODE_WAIT) and
857 * properly referenced and unreferenced around the callback
858 */
859 vnode_iterate(mountp, VNODE_RELOAD | VNODE_WAIT, hfs_reload_callback, (void *)&args);
860
861 if (args.error)
862 return (args.error);
863
864 /*
865 * Re-read VolumeHeader from disk.
866 */
867 priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
868 HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size));
869
870 error = (int)buf_meta_bread(hfsmp->hfs_devvp,
871 HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys),
872 hfsmp->hfs_physical_block_size, NOCRED, &bp);
873 if (error) {
874 if (bp != NULL)
875 buf_brelse(bp);
876 return (error);
877 }
878
879 vhp = (HFSPlusVolumeHeader *) (buf_dataptr(bp) + HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size));
880
881 /* Do a quick sanity check */
882 if ((SWAP_BE16(vhp->signature) != kHFSPlusSigWord &&
883 SWAP_BE16(vhp->signature) != kHFSXSigWord) ||
884 (SWAP_BE16(vhp->version) != kHFSPlusVersion &&
885 SWAP_BE16(vhp->version) != kHFSXVersion) ||
886 SWAP_BE32(vhp->blockSize) != vcb->blockSize) {
887 buf_brelse(bp);
888 return (EIO);
889 }
890
891 vcb->vcbLsMod = to_bsd_time(SWAP_BE32(vhp->modifyDate));
892 vcb->vcbAtrb = SWAP_BE32 (vhp->attributes);
893 vcb->vcbJinfoBlock = SWAP_BE32(vhp->journalInfoBlock);
894 vcb->vcbClpSiz = SWAP_BE32 (vhp->rsrcClumpSize);
895 vcb->vcbNxtCNID = SWAP_BE32 (vhp->nextCatalogID);
896 vcb->vcbVolBkUp = to_bsd_time(SWAP_BE32(vhp->backupDate));
897 vcb->vcbWrCnt = SWAP_BE32 (vhp->writeCount);
898 vcb->vcbFilCnt = SWAP_BE32 (vhp->fileCount);
899 vcb->vcbDirCnt = SWAP_BE32 (vhp->folderCount);
900 HFS_UPDATE_NEXT_ALLOCATION(vcb, SWAP_BE32 (vhp->nextAllocation));
901 vcb->totalBlocks = SWAP_BE32 (vhp->totalBlocks);
902 vcb->freeBlocks = SWAP_BE32 (vhp->freeBlocks);
903 vcb->encodingsBitmap = SWAP_BE64 (vhp->encodingsBitmap);
904 bcopy(vhp->finderInfo, vcb->vcbFndrInfo, sizeof(vhp->finderInfo));
905 vcb->localCreateDate = SWAP_BE32 (vhp->createDate); /* hfs+ create date is in local time */
906
907 /*
908 * Re-load meta-file vnode data (extent info, file size, etc).
909 */
910 forkp = VTOF((struct vnode *)vcb->extentsRefNum);
911 for (i = 0; i < kHFSPlusExtentDensity; i++) {
912 forkp->ff_extents[i].startBlock =
913 SWAP_BE32 (vhp->extentsFile.extents[i].startBlock);
914 forkp->ff_extents[i].blockCount =
915 SWAP_BE32 (vhp->extentsFile.extents[i].blockCount);
916 }
917 forkp->ff_size = SWAP_BE64 (vhp->extentsFile.logicalSize);
918 forkp->ff_blocks = SWAP_BE32 (vhp->extentsFile.totalBlocks);
919 forkp->ff_clumpsize = SWAP_BE32 (vhp->extentsFile.clumpSize);
920
921
922 forkp = VTOF((struct vnode *)vcb->catalogRefNum);
923 for (i = 0; i < kHFSPlusExtentDensity; i++) {
924 forkp->ff_extents[i].startBlock =
925 SWAP_BE32 (vhp->catalogFile.extents[i].startBlock);
926 forkp->ff_extents[i].blockCount =
927 SWAP_BE32 (vhp->catalogFile.extents[i].blockCount);
928 }
929 forkp->ff_size = SWAP_BE64 (vhp->catalogFile.logicalSize);
930 forkp->ff_blocks = SWAP_BE32 (vhp->catalogFile.totalBlocks);
931 forkp->ff_clumpsize = SWAP_BE32 (vhp->catalogFile.clumpSize);
932
933 if (hfsmp->hfs_attribute_vp) {
934 forkp = VTOF(hfsmp->hfs_attribute_vp);
935 for (i = 0; i < kHFSPlusExtentDensity; i++) {
936 forkp->ff_extents[i].startBlock =
937 SWAP_BE32 (vhp->attributesFile.extents[i].startBlock);
938 forkp->ff_extents[i].blockCount =
939 SWAP_BE32 (vhp->attributesFile.extents[i].blockCount);
940 }
941 forkp->ff_size = SWAP_BE64 (vhp->attributesFile.logicalSize);
942 forkp->ff_blocks = SWAP_BE32 (vhp->attributesFile.totalBlocks);
943 forkp->ff_clumpsize = SWAP_BE32 (vhp->attributesFile.clumpSize);
944 }
945
946 forkp = VTOF((struct vnode *)vcb->allocationsRefNum);
947 for (i = 0; i < kHFSPlusExtentDensity; i++) {
948 forkp->ff_extents[i].startBlock =
949 SWAP_BE32 (vhp->allocationFile.extents[i].startBlock);
950 forkp->ff_extents[i].blockCount =
951 SWAP_BE32 (vhp->allocationFile.extents[i].blockCount);
952 }
953 forkp->ff_size = SWAP_BE64 (vhp->allocationFile.logicalSize);
954 forkp->ff_blocks = SWAP_BE32 (vhp->allocationFile.totalBlocks);
955 forkp->ff_clumpsize = SWAP_BE32 (vhp->allocationFile.clumpSize);
956
957 buf_brelse(bp);
958 vhp = NULL;
959
960 /*
961 * Re-load B-tree header data
962 */
963 forkp = VTOF((struct vnode *)vcb->extentsRefNum);
964 if ( (error = MacToVFSError( BTReloadData((FCB*)forkp) )) )
965 return (error);
966
967 forkp = VTOF((struct vnode *)vcb->catalogRefNum);
968 if ( (error = MacToVFSError( BTReloadData((FCB*)forkp) )) )
969 return (error);
970
971 if (hfsmp->hfs_attribute_vp) {
972 forkp = VTOF(hfsmp->hfs_attribute_vp);
973 if ( (error = MacToVFSError( BTReloadData((FCB*)forkp) )) )
974 return (error);
975 }
976
977 /* Reload the volume name */
978 if ((error = cat_idlookup(hfsmp, kHFSRootFolderID, 0, 0, &cndesc, NULL, NULL)))
979 return (error);
980 vcb->volumeNameEncodingHint = cndesc.cd_encoding;
981 bcopy(cndesc.cd_nameptr, vcb->vcbVN, min(255, cndesc.cd_namelen));
982 cat_releasedesc(&cndesc);
983
984 /* Re-establish private/hidden directories. */
985 hfs_privatedir_init(hfsmp, FILE_HARDLINKS);
986 hfs_privatedir_init(hfsmp, DIR_HARDLINKS);
987
988 /* In case any volume information changed to trigger a notification */
989 hfs_generate_volume_notifications(hfsmp);
990
991 return (0);
992 }
993
994 __unused
995 static uint64_t tv_to_usecs(struct timeval *tv)
996 {
997 return tv->tv_sec * 1000000ULL + tv->tv_usec;
998 }
999
1000 // Returns TRUE if b - a >= usecs
1001 static boolean_t hfs_has_elapsed (const struct timeval *a,
1002 const struct timeval *b,
1003 uint64_t usecs)
1004 {
1005 struct timeval diff;
1006 timersub(b, a, &diff);
1007 return diff.tv_sec * 1000000ULL + diff.tv_usec >= usecs;
1008 }
1009
1010 static void
1011 hfs_syncer(void *arg0, void *unused)
1012 {
1013 #pragma unused(unused)
1014
1015 struct hfsmount *hfsmp = arg0;
1016 struct timeval now;
1017
1018 microuptime(&now);
1019
1020 KERNEL_DEBUG_CONSTANT(HFSDBG_SYNCER | DBG_FUNC_START, hfsmp,
1021 tv_to_usecs(&now),
1022 tv_to_usecs(&hfsmp->hfs_mp->mnt_last_write_completed_timestamp),
1023 hfsmp->hfs_mp->mnt_pending_write_size, 0);
1024
1025 hfs_syncer_lock(hfsmp);
1026
1027 if (!hfsmp->hfs_syncer) {
1028 // hfs_unmount is waiting for us leave now and let it do the sync
1029 hfsmp->hfs_sync_incomplete = FALSE;
1030 hfs_syncer_unlock(hfsmp);
1031 hfs_syncer_wakeup(hfsmp);
1032 return;
1033 }
1034
1035 /* Check to see whether we should flush now: either the oldest is
1036 > HFS_MAX_META_DELAY or HFS_META_DELAY has elapsed since the
1037 request and there are no pending writes. */
1038
1039 boolean_t flush_now = FALSE;
1040
1041 if (hfs_has_elapsed(&hfsmp->hfs_sync_req_oldest, &now, HFS_MAX_META_DELAY))
1042 flush_now = TRUE;
1043 else if (!hfsmp->hfs_mp->mnt_pending_write_size) {
1044 /* N.B. accessing mnt_last_write_completed_timestamp is not thread safe, but
1045 it won't matter for what we're using it for. */
1046 if (hfs_has_elapsed(&hfsmp->hfs_mp->mnt_last_write_completed_timestamp,
1047 &now,
1048 HFS_META_DELAY)) {
1049 flush_now = TRUE;
1050 }
1051 }
1052
1053 if (!flush_now) {
1054 thread_call_t syncer = hfsmp->hfs_syncer;
1055
1056 hfs_syncer_unlock(hfsmp);
1057
1058 hfs_syncer_queue(syncer);
1059
1060 return;
1061 }
1062
1063 timerclear(&hfsmp->hfs_sync_req_oldest);
1064
1065 hfs_syncer_unlock(hfsmp);
1066
1067 KERNEL_DEBUG_CONSTANT(HFSDBG_SYNCER_TIMED | DBG_FUNC_START,
1068 tv_to_usecs(&now),
1069 tv_to_usecs(&hfsmp->hfs_mp->mnt_last_write_completed_timestamp),
1070 tv_to_usecs(&hfsmp->hfs_mp->mnt_last_write_issued_timestamp),
1071 hfsmp->hfs_mp->mnt_pending_write_size, 0);
1072
1073 if (hfsmp->hfs_syncer_thread) {
1074 printf("hfs: syncer already running!\n");
1075 return;
1076 }
1077
1078 hfsmp->hfs_syncer_thread = current_thread();
1079
1080 if (hfs_start_transaction(hfsmp) != 0) // so we hold off any new writes
1081 goto out;
1082
1083 /*
1084 * We intentionally do a synchronous flush (of the journal or entire volume) here.
1085 * For journaled volumes, this means we wait until the metadata blocks are written
1086 * to both the journal and their final locations (in the B-trees, etc.).
1087 *
1088 * This tends to avoid interleaving the metadata writes with other writes (for
1089 * example, user data, or to the journal when a later transaction notices that
1090 * an earlier transaction has finished its async writes, and then updates the
1091 * journal start in the journal header). Avoiding interleaving of writes is
1092 * very good for performance on simple flash devices like SD cards, thumb drives;
1093 * and on devices like floppies. Since removable devices tend to be this kind of
1094 * simple device, doing a synchronous flush actually improves performance in
1095 * practice.
1096 *
1097 * NOTE: For non-journaled volumes, the call to hfs_sync will also cause dirty
1098 * user data to be written.
1099 */
1100 if (hfsmp->jnl) {
1101 hfs_journal_flush(hfsmp, TRUE);
1102 } else {
1103 hfs_sync(hfsmp->hfs_mp, MNT_WAIT, vfs_context_kernel());
1104 }
1105
1106 KERNEL_DEBUG_CONSTANT(HFSDBG_SYNCER_TIMED | DBG_FUNC_END,
1107 (microuptime(&now), tv_to_usecs(&now)),
1108 tv_to_usecs(&hfsmp->hfs_mp->mnt_last_write_completed_timestamp),
1109 tv_to_usecs(&hfsmp->hfs_mp->mnt_last_write_issued_timestamp),
1110 hfsmp->hfs_mp->mnt_pending_write_size, 0);
1111
1112 hfs_end_transaction(hfsmp);
1113
1114 out:
1115
1116 hfsmp->hfs_syncer_thread = NULL;
1117
1118 hfs_syncer_lock(hfsmp);
1119
1120 // If hfs_unmount lets us and we missed a sync, schedule again
1121 if (hfsmp->hfs_syncer && timerisset(&hfsmp->hfs_sync_req_oldest)) {
1122 thread_call_t syncer = hfsmp->hfs_syncer;
1123
1124 hfs_syncer_unlock(hfsmp);
1125
1126 hfs_syncer_queue(syncer);
1127 } else {
1128 hfsmp->hfs_sync_incomplete = FALSE;
1129 hfs_syncer_unlock(hfsmp);
1130 hfs_syncer_wakeup(hfsmp);
1131 }
1132
1133 /* BE CAREFUL WHAT YOU ADD HERE: at this point hfs_unmount is free
1134 to continue and therefore hfsmp might be invalid. */
1135
1136 KERNEL_DEBUG_CONSTANT(HFSDBG_SYNCER | DBG_FUNC_END, 0, 0, 0, 0, 0);
1137 }
1138
1139
1140 extern int IOBSDIsMediaEjectable( const char *cdev_name );
1141
1142 /*
1143 * Call into the allocator code and perform a full scan of the bitmap file.
1144 *
1145 * This allows us to TRIM unallocated ranges if needed, and also to build up
1146 * an in-memory summary table of the state of the allocated blocks.
1147 */
1148 void hfs_scan_blocks (struct hfsmount *hfsmp) {
1149 /*
1150 * Take the allocation file lock. Journal transactions will block until
1151 * we're done here.
1152 */
1153
1154 int flags = hfs_systemfile_lock(hfsmp, SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
1155
1156 /*
1157 * We serialize here with the HFS mount lock as we're mounting.
1158 *
1159 * The mount can only proceed once this thread has acquired the bitmap
1160 * lock, since we absolutely do not want someone else racing in and
1161 * getting the bitmap lock, doing a read/write of the bitmap file,
1162 * then us getting the bitmap lock.
1163 *
1164 * To prevent this, the mount thread takes the HFS mount mutex, starts us
1165 * up, then immediately msleeps on the scan_var variable in the mount
1166 * point as a condition variable. This serialization is safe since
1167 * if we race in and try to proceed while they're still holding the lock,
1168 * we'll block trying to acquire the global lock. Since the mount thread
1169 * acquires the HFS mutex before starting this function in a new thread,
1170 * any lock acquisition on our part must be linearizably AFTER the mount thread's.
1171 *
1172 * Note that the HFS mount mutex is always taken last, and always for only
1173 * a short time. In this case, we just take it long enough to mark the
1174 * scan-in-flight bit.
1175 */
1176 (void) hfs_lock_mount (hfsmp);
1177 hfsmp->scan_var |= HFS_ALLOCATOR_SCAN_INFLIGHT;
1178 wakeup((caddr_t) &hfsmp->scan_var);
1179 hfs_unlock_mount (hfsmp);
1180
1181 /* Initialize the summary table */
1182 if (hfs_init_summary (hfsmp)) {
1183 printf("hfs: could not initialize summary table for %s\n", hfsmp->vcbVN);
1184 }
1185
1186 /*
1187 * ScanUnmapBlocks assumes that the bitmap lock is held when you
1188 * call the function. We don't care if there were any errors issuing unmaps.
1189 *
1190 * It will also attempt to build up the summary table for subsequent
1191 * allocator use, as configured.
1192 */
1193 (void) ScanUnmapBlocks(hfsmp);
1194
1195 hfsmp->scan_var |= HFS_ALLOCATOR_SCAN_COMPLETED;
1196
1197 hfs_systemfile_unlock(hfsmp, flags);
1198 }
1199
1200 static int hfs_root_unmounted_cleanly = 0;
1201
1202 SYSCTL_DECL(_vfs_generic);
1203 SYSCTL_INT(_vfs_generic, OID_AUTO, root_unmounted_cleanly, CTLFLAG_RD, &hfs_root_unmounted_cleanly, 0, "Root filesystem was unmounted cleanly");
1204
1205 /*
1206 * Common code for mount and mountroot
1207 */
1208 int
1209 hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args,
1210 int journal_replay_only, vfs_context_t context)
1211 {
1212 struct proc *p = vfs_context_proc(context);
1213 int retval = E_NONE;
1214 struct hfsmount *hfsmp = NULL;
1215 struct buf *bp;
1216 dev_t dev;
1217 HFSMasterDirectoryBlock *mdbp = NULL;
1218 int ronly;
1219 #if QUOTA
1220 int i;
1221 #endif
1222 int mntwrapper;
1223 kauth_cred_t cred;
1224 u_int64_t disksize;
1225 daddr64_t log_blkcnt;
1226 u_int32_t log_blksize;
1227 u_int32_t phys_blksize;
1228 u_int32_t minblksize;
1229 u_int32_t iswritable;
1230 daddr64_t mdb_offset;
1231 int isvirtual = 0;
1232 int isroot = 0;
1233 u_int32_t device_features = 0;
1234 int isssd;
1235
1236 if (args == NULL) {
1237 /* only hfs_mountroot passes us NULL as the 'args' argument */
1238 isroot = 1;
1239 }
1240
1241 ronly = vfs_isrdonly(mp);
1242 dev = vnode_specrdev(devvp);
1243 cred = p ? vfs_context_ucred(context) : NOCRED;
1244 mntwrapper = 0;
1245
1246 bp = NULL;
1247 hfsmp = NULL;
1248 mdbp = NULL;
1249 minblksize = kHFSBlockSize;
1250
1251 /* Advisory locking should be handled at the VFS layer */
1252 vfs_setlocklocal(mp);
1253
1254 /* Get the logical block size (treated as physical block size everywhere) */
1255 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, (caddr_t)&log_blksize, 0, context)) {
1256 if (HFS_MOUNT_DEBUG) {
1257 printf("hfs_mountfs: DKIOCGETBLOCKSIZE failed\n");
1258 }
1259 retval = ENXIO;
1260 goto error_exit;
1261 }
1262 if (log_blksize == 0 || log_blksize > 1024*1024*1024) {
1263 printf("hfs: logical block size 0x%x looks bad. Not mounting.\n", log_blksize);
1264 retval = ENXIO;
1265 goto error_exit;
1266 }
1267
1268 /* Get the physical block size. */
1269 retval = VNOP_IOCTL(devvp, DKIOCGETPHYSICALBLOCKSIZE, (caddr_t)&phys_blksize, 0, context);
1270 if (retval) {
1271 if ((retval != ENOTSUP) && (retval != ENOTTY)) {
1272 if (HFS_MOUNT_DEBUG) {
1273 printf("hfs_mountfs: DKIOCGETPHYSICALBLOCKSIZE failed\n");
1274 }
1275 retval = ENXIO;
1276 goto error_exit;
1277 }
1278 /* If device does not support this ioctl, assume that physical
1279 * block size is same as logical block size
1280 */
1281 phys_blksize = log_blksize;
1282 }
1283 if (phys_blksize == 0 || phys_blksize > MAXBSIZE) {
1284 printf("hfs: physical block size 0x%x looks bad. Not mounting.\n", phys_blksize);
1285 retval = ENXIO;
1286 goto error_exit;
1287 }
1288
1289 /* Switch to 512 byte sectors (temporarily) */
1290 if (log_blksize > 512) {
1291 u_int32_t size512 = 512;
1292
1293 if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&size512, FWRITE, context)) {
1294 if (HFS_MOUNT_DEBUG) {
1295 printf("hfs_mountfs: DKIOCSETBLOCKSIZE failed \n");
1296 }
1297 retval = ENXIO;
1298 goto error_exit;
1299 }
1300 }
1301 /* Get the number of 512 byte physical blocks. */
1302 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
1303 /* resetting block size may fail if getting block count did */
1304 (void)VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context);
1305 if (HFS_MOUNT_DEBUG) {
1306 printf("hfs_mountfs: DKIOCGETBLOCKCOUNT failed\n");
1307 }
1308 retval = ENXIO;
1309 goto error_exit;
1310 }
1311 /* Compute an accurate disk size (i.e. within 512 bytes) */
1312 disksize = (u_int64_t)log_blkcnt * (u_int64_t)512;
1313
1314 /*
1315 * On Tiger it is not necessary to switch the device
1316 * block size to be 4k if there are more than 31-bits
1317 * worth of blocks but to insure compatibility with
1318 * pre-Tiger systems we have to do it.
1319 *
1320 * If the device size is not a multiple of 4K (8 * 512), then
1321 * switching the logical block size isn't going to help because
1322 * we will be unable to write the alternate volume header.
1323 * In this case, just leave the logical block size unchanged.
1324 */
1325 if (log_blkcnt > 0x000000007fffffff && (log_blkcnt & 7) == 0) {
1326 minblksize = log_blksize = 4096;
1327 if (phys_blksize < log_blksize)
1328 phys_blksize = log_blksize;
1329 }
1330
1331 /*
1332 * The cluster layer is not currently prepared to deal with a logical
1333 * block size larger than the system's page size. (It can handle
1334 * blocks per page, but not multiple pages per block.) So limit the
1335 * logical block size to the page size.
1336 */
1337 if (log_blksize > PAGE_SIZE) {
1338 log_blksize = PAGE_SIZE;
1339 }
1340
1341 /* Now switch to our preferred physical block size. */
1342 if (log_blksize > 512) {
1343 if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) {
1344 if (HFS_MOUNT_DEBUG) {
1345 printf("hfs_mountfs: DKIOCSETBLOCKSIZE (2) failed\n");
1346 }
1347 retval = ENXIO;
1348 goto error_exit;
1349 }
1350 /* Get the count of physical blocks. */
1351 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
1352 if (HFS_MOUNT_DEBUG) {
1353 printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (2) failed\n");
1354 }
1355 retval = ENXIO;
1356 goto error_exit;
1357 }
1358 }
1359 /*
1360 * At this point:
1361 * minblksize is the minimum physical block size
1362 * log_blksize has our preferred physical block size
1363 * log_blkcnt has the total number of physical blocks
1364 */
1365
1366 mdb_offset = (daddr64_t)HFS_PRI_SECTOR(log_blksize);
1367 if ((retval = (int)buf_meta_bread(devvp,
1368 HFS_PHYSBLK_ROUNDDOWN(mdb_offset, (phys_blksize/log_blksize)),
1369 phys_blksize, cred, &bp))) {
1370 if (HFS_MOUNT_DEBUG) {
1371 printf("hfs_mountfs: buf_meta_bread failed with %d\n", retval);
1372 }
1373 goto error_exit;
1374 }
1375 MALLOC(mdbp, HFSMasterDirectoryBlock *, kMDBSize, M_TEMP, M_WAITOK);
1376 if (mdbp == NULL) {
1377 retval = ENOMEM;
1378 if (HFS_MOUNT_DEBUG) {
1379 printf("hfs_mountfs: MALLOC failed\n");
1380 }
1381 goto error_exit;
1382 }
1383 bcopy((char *)buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize), mdbp, kMDBSize);
1384 buf_brelse(bp);
1385 bp = NULL;
1386
1387 MALLOC(hfsmp, struct hfsmount *, sizeof(struct hfsmount), M_HFSMNT, M_WAITOK);
1388 if (hfsmp == NULL) {
1389 if (HFS_MOUNT_DEBUG) {
1390 printf("hfs_mountfs: MALLOC (2) failed\n");
1391 }
1392 retval = ENOMEM;
1393 goto error_exit;
1394 }
1395 bzero(hfsmp, sizeof(struct hfsmount));
1396
1397 hfs_chashinit_finish(hfsmp);
1398
1399 /* Init the ID lookup hashtable */
1400 hfs_idhash_init (hfsmp);
1401
1402 /*
1403 * See if the disk supports unmap (trim).
1404 *
1405 * NOTE: vfs_init_io_attributes has not been called yet, so we can't use the io_flags field
1406 * returned by vfs_ioattr. We need to call VNOP_IOCTL ourselves.
1407 */
1408 if (VNOP_IOCTL(devvp, DKIOCGETFEATURES, (caddr_t)&device_features, 0, context) == 0) {
1409 if (device_features & DK_FEATURE_UNMAP) {
1410 hfsmp->hfs_flags |= HFS_UNMAP;
1411 }
1412 }
1413
1414 /*
1415 * See if the disk is a solid state device, too. We need this to decide what to do about
1416 * hotfiles.
1417 */
1418 if (VNOP_IOCTL(devvp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, context) == 0) {
1419 if (isssd) {
1420 hfsmp->hfs_flags |= HFS_SSD;
1421 }
1422 }
1423
1424
1425 /*
1426 * Init the volume information structure
1427 */
1428
1429 lck_mtx_init(&hfsmp->hfs_mutex, hfs_mutex_group, hfs_lock_attr);
1430 lck_mtx_init(&hfsmp->hfc_mutex, hfs_mutex_group, hfs_lock_attr);
1431 lck_rw_init(&hfsmp->hfs_global_lock, hfs_rwlock_group, hfs_lock_attr);
1432 lck_spin_init(&hfsmp->vcbFreeExtLock, hfs_spinlock_group, hfs_lock_attr);
1433
1434 vfs_setfsprivate(mp, hfsmp);
1435 hfsmp->hfs_mp = mp; /* Make VFSTOHFS work */
1436 hfsmp->hfs_raw_dev = vnode_specrdev(devvp);
1437 hfsmp->hfs_devvp = devvp;
1438 vnode_ref(devvp); /* Hold a ref on the device, dropped when hfsmp is freed. */
1439 hfsmp->hfs_logical_block_size = log_blksize;
1440 hfsmp->hfs_logical_block_count = log_blkcnt;
1441 hfsmp->hfs_logical_bytes = (uint64_t) log_blksize * (uint64_t) log_blkcnt;
1442 hfsmp->hfs_physical_block_size = phys_blksize;
1443 hfsmp->hfs_log_per_phys = (phys_blksize / log_blksize);
1444 hfsmp->hfs_flags |= HFS_WRITEABLE_MEDIA;
1445 if (ronly)
1446 hfsmp->hfs_flags |= HFS_READ_ONLY;
1447 if (((unsigned int)vfs_flags(mp)) & MNT_UNKNOWNPERMISSIONS)
1448 hfsmp->hfs_flags |= HFS_UNKNOWN_PERMS;
1449
1450 #if QUOTA
1451 for (i = 0; i < MAXQUOTAS; i++)
1452 dqfileinit(&hfsmp->hfs_qfiles[i]);
1453 #endif
1454
1455 if (args) {
1456 hfsmp->hfs_uid = (args->hfs_uid == (uid_t)VNOVAL) ? UNKNOWNUID : args->hfs_uid;
1457 if (hfsmp->hfs_uid == 0xfffffffd) hfsmp->hfs_uid = UNKNOWNUID;
1458 hfsmp->hfs_gid = (args->hfs_gid == (gid_t)VNOVAL) ? UNKNOWNGID : args->hfs_gid;
1459 if (hfsmp->hfs_gid == 0xfffffffd) hfsmp->hfs_gid = UNKNOWNGID;
1460 vfs_setowner(mp, hfsmp->hfs_uid, hfsmp->hfs_gid); /* tell the VFS */
1461 if (args->hfs_mask != (mode_t)VNOVAL) {
1462 hfsmp->hfs_dir_mask = args->hfs_mask & ALLPERMS;
1463 if (args->flags & HFSFSMNT_NOXONFILES) {
1464 hfsmp->hfs_file_mask = (args->hfs_mask & DEFFILEMODE);
1465 } else {
1466 hfsmp->hfs_file_mask = args->hfs_mask & ALLPERMS;
1467 }
1468 } else {
1469 hfsmp->hfs_dir_mask = UNKNOWNPERMISSIONS & ALLPERMS; /* 0777: rwx---rwx */
1470 hfsmp->hfs_file_mask = UNKNOWNPERMISSIONS & DEFFILEMODE; /* 0666: no --x by default? */
1471 }
1472 if ((args->flags != (int)VNOVAL) && (args->flags & HFSFSMNT_WRAPPER))
1473 mntwrapper = 1;
1474 } else {
1475 /* Even w/o explicit mount arguments, MNT_UNKNOWNPERMISSIONS requires setting up uid, gid, and mask: */
1476 if (((unsigned int)vfs_flags(mp)) & MNT_UNKNOWNPERMISSIONS) {
1477 hfsmp->hfs_uid = UNKNOWNUID;
1478 hfsmp->hfs_gid = UNKNOWNGID;
1479 vfs_setowner(mp, hfsmp->hfs_uid, hfsmp->hfs_gid); /* tell the VFS */
1480 hfsmp->hfs_dir_mask = UNKNOWNPERMISSIONS & ALLPERMS; /* 0777: rwx---rwx */
1481 hfsmp->hfs_file_mask = UNKNOWNPERMISSIONS & DEFFILEMODE; /* 0666: no --x by default? */
1482 }
1483 }
1484
1485 /* Find out if disk media is writable. */
1486 if (VNOP_IOCTL(devvp, DKIOCISWRITABLE, (caddr_t)&iswritable, 0, context) == 0) {
1487 if (iswritable)
1488 hfsmp->hfs_flags |= HFS_WRITEABLE_MEDIA;
1489 else
1490 hfsmp->hfs_flags &= ~HFS_WRITEABLE_MEDIA;
1491 }
1492
1493 // record the current time at which we're mounting this volume
1494 struct timeval tv;
1495 microtime(&tv);
1496 hfsmp->hfs_mount_time = tv.tv_sec;
1497
1498 /* Mount a standard HFS disk */
1499 if ((SWAP_BE16(mdbp->drSigWord) == kHFSSigWord) &&
1500 (mntwrapper || (SWAP_BE16(mdbp->drEmbedSigWord) != kHFSPlusSigWord))) {
1501 #if CONFIG_HFS_STD
1502 /* On 10.6 and beyond, non read-only mounts for HFS standard vols get rejected */
1503 if (vfs_isrdwr(mp)) {
1504 retval = EROFS;
1505 goto error_exit;
1506 }
1507
1508 printf("hfs_mountfs: Mounting HFS Standard volumes was deprecated in Mac OS 10.7 \n");
1509
1510 /* Treat it as if it's read-only and not writeable */
1511 hfsmp->hfs_flags |= HFS_READ_ONLY;
1512 hfsmp->hfs_flags &= ~HFS_WRITEABLE_MEDIA;
1513
1514 /* If only journal replay is requested, exit immediately */
1515 if (journal_replay_only) {
1516 retval = 0;
1517 goto error_exit;
1518 }
1519
1520 if ((vfs_flags(mp) & MNT_ROOTFS)) {
1521 retval = EINVAL; /* Cannot root from HFS standard disks */
1522 goto error_exit;
1523 }
1524 /* HFS disks can only use 512 byte physical blocks */
1525 if (log_blksize > kHFSBlockSize) {
1526 log_blksize = kHFSBlockSize;
1527 if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) {
1528 retval = ENXIO;
1529 goto error_exit;
1530 }
1531 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
1532 retval = ENXIO;
1533 goto error_exit;
1534 }
1535 hfsmp->hfs_logical_block_size = log_blksize;
1536 hfsmp->hfs_logical_block_count = log_blkcnt;
1537 hfsmp->hfs_logical_bytes = (uint64_t) log_blksize * (uint64_t) log_blkcnt;
1538 hfsmp->hfs_physical_block_size = log_blksize;
1539 hfsmp->hfs_log_per_phys = 1;
1540 }
1541 if (args) {
1542 hfsmp->hfs_encoding = args->hfs_encoding;
1543 HFSTOVCB(hfsmp)->volumeNameEncodingHint = args->hfs_encoding;
1544
1545 /* establish the timezone */
1546 gTimeZone = args->hfs_timezone;
1547 }
1548
1549 retval = hfs_getconverter(hfsmp->hfs_encoding, &hfsmp->hfs_get_unicode,
1550 &hfsmp->hfs_get_hfsname);
1551 if (retval)
1552 goto error_exit;
1553
1554 retval = hfs_MountHFSVolume(hfsmp, mdbp, p);
1555 if (retval)
1556 (void) hfs_relconverter(hfsmp->hfs_encoding);
1557 #else
1558 /* On platforms where HFS Standard is not supported, deny the mount altogether */
1559 retval = EINVAL;
1560 goto error_exit;
1561 #endif
1562
1563 }
1564 else { /* Mount an HFS Plus disk */
1565 HFSPlusVolumeHeader *vhp;
1566 off_t embeddedOffset;
1567 int jnl_disable = 0;
1568
1569 /* Get the embedded Volume Header */
1570 if (SWAP_BE16(mdbp->drEmbedSigWord) == kHFSPlusSigWord) {
1571 embeddedOffset = SWAP_BE16(mdbp->drAlBlSt) * kHFSBlockSize;
1572 embeddedOffset += (u_int64_t)SWAP_BE16(mdbp->drEmbedExtent.startBlock) *
1573 (u_int64_t)SWAP_BE32(mdbp->drAlBlkSiz);
1574
1575 /*
1576 * If the embedded volume doesn't start on a block
1577 * boundary, then switch the device to a 512-byte
1578 * block size so everything will line up on a block
1579 * boundary.
1580 */
1581 if ((embeddedOffset % log_blksize) != 0) {
1582 printf("hfs_mountfs: embedded volume offset not"
1583 " a multiple of physical block size (%d);"
1584 " switching to 512\n", log_blksize);
1585 log_blksize = 512;
1586 if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE,
1587 (caddr_t)&log_blksize, FWRITE, context)) {
1588
1589 if (HFS_MOUNT_DEBUG) {
1590 printf("hfs_mountfs: DKIOCSETBLOCKSIZE (3) failed\n");
1591 }
1592 retval = ENXIO;
1593 goto error_exit;
1594 }
1595 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT,
1596 (caddr_t)&log_blkcnt, 0, context)) {
1597 if (HFS_MOUNT_DEBUG) {
1598 printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (3) failed\n");
1599 }
1600 retval = ENXIO;
1601 goto error_exit;
1602 }
1603 /* Note: relative block count adjustment */
1604 hfsmp->hfs_logical_block_count *=
1605 hfsmp->hfs_logical_block_size / log_blksize;
1606
1607 /* Update logical /physical block size */
1608 hfsmp->hfs_logical_block_size = log_blksize;
1609 hfsmp->hfs_physical_block_size = log_blksize;
1610
1611 phys_blksize = log_blksize;
1612 hfsmp->hfs_log_per_phys = 1;
1613 }
1614
1615 disksize = (u_int64_t)SWAP_BE16(mdbp->drEmbedExtent.blockCount) *
1616 (u_int64_t)SWAP_BE32(mdbp->drAlBlkSiz);
1617
1618 hfsmp->hfs_logical_block_count = disksize / log_blksize;
1619
1620 hfsmp->hfs_logical_bytes = (uint64_t) hfsmp->hfs_logical_block_count * (uint64_t) hfsmp->hfs_logical_block_size;
1621
1622 mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize));
1623 retval = (int)buf_meta_bread(devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys),
1624 phys_blksize, cred, &bp);
1625 if (retval) {
1626 if (HFS_MOUNT_DEBUG) {
1627 printf("hfs_mountfs: buf_meta_bread (2) failed with %d\n", retval);
1628 }
1629 goto error_exit;
1630 }
1631 bcopy((char *)buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize), mdbp, 512);
1632 buf_brelse(bp);
1633 bp = NULL;
1634 vhp = (HFSPlusVolumeHeader*) mdbp;
1635
1636 }
1637 else { /* pure HFS+ */
1638 embeddedOffset = 0;
1639 vhp = (HFSPlusVolumeHeader*) mdbp;
1640 }
1641
1642 if (isroot) {
1643 hfs_root_unmounted_cleanly = ((SWAP_BE32(vhp->attributes) & kHFSVolumeUnmountedMask) != 0);
1644 }
1645
1646 /*
1647 * On inconsistent disks, do not allow read-write mount
1648 * unless it is the boot volume being mounted. We also
1649 * always want to replay the journal if the journal_replay_only
1650 * flag is set because that will (most likely) get the
1651 * disk into a consistent state before fsck_hfs starts
1652 * looking at it.
1653 */
1654 if ( !(vfs_flags(mp) & MNT_ROOTFS)
1655 && (SWAP_BE32(vhp->attributes) & kHFSVolumeInconsistentMask)
1656 && !journal_replay_only
1657 && !(hfsmp->hfs_flags & HFS_READ_ONLY)) {
1658
1659 if (HFS_MOUNT_DEBUG) {
1660 printf("hfs_mountfs: failed to mount non-root inconsistent disk\n");
1661 }
1662 retval = EINVAL;
1663 goto error_exit;
1664 }
1665
1666
1667 // XXXdbg
1668 //
1669 hfsmp->jnl = NULL;
1670 hfsmp->jvp = NULL;
1671 if (args != NULL && (args->flags & HFSFSMNT_EXTENDED_ARGS) &&
1672 args->journal_disable) {
1673 jnl_disable = 1;
1674 }
1675
1676 //
1677 // We only initialize the journal here if the last person
1678 // to mount this volume was journaling aware. Otherwise
1679 // we delay journal initialization until later at the end
1680 // of hfs_MountHFSPlusVolume() because the last person who
1681 // mounted it could have messed things up behind our back
1682 // (so we need to go find the .journal file, make sure it's
1683 // the right size, re-sync up if it was moved, etc).
1684 //
1685 if ( (SWAP_BE32(vhp->lastMountedVersion) == kHFSJMountVersion)
1686 && (SWAP_BE32(vhp->attributes) & kHFSVolumeJournaledMask)
1687 && !jnl_disable) {
1688
1689 // if we're able to init the journal, mark the mount
1690 // point as journaled.
1691 //
1692 if ((retval = hfs_early_journal_init(hfsmp, vhp, args, embeddedOffset, mdb_offset, mdbp, cred)) == 0) {
1693 vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
1694 } else {
1695 if (retval == EROFS) {
1696 // EROFS is a special error code that means the volume has an external
1697 // journal which we couldn't find. in that case we do not want to
1698 // rewrite the volume header - we'll just refuse to mount the volume.
1699 if (HFS_MOUNT_DEBUG) {
1700 printf("hfs_mountfs: hfs_early_journal_init indicated external jnl \n");
1701 }
1702 retval = EINVAL;
1703 goto error_exit;
1704 }
1705
1706 // if the journal failed to open, then set the lastMountedVersion
1707 // to be "FSK!" which fsck_hfs will see and force the fsck instead
1708 // of just bailing out because the volume is journaled.
1709 if (!ronly) {
1710 if (HFS_MOUNT_DEBUG) {
1711 printf("hfs_mountfs: hfs_early_journal_init failed, setting to FSK \n");
1712 }
1713
1714 HFSPlusVolumeHeader *jvhp;
1715
1716 hfsmp->hfs_flags |= HFS_NEED_JNL_RESET;
1717
1718 if (mdb_offset == 0) {
1719 mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize));
1720 }
1721
1722 bp = NULL;
1723 retval = (int)buf_meta_bread(devvp,
1724 HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys),
1725 phys_blksize, cred, &bp);
1726 if (retval == 0) {
1727 jvhp = (HFSPlusVolumeHeader *)(buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize));
1728
1729 if (SWAP_BE16(jvhp->signature) == kHFSPlusSigWord || SWAP_BE16(jvhp->signature) == kHFSXSigWord) {
1730 printf ("hfs(1): Journal replay fail. Writing lastMountVersion as FSK!\n");
1731 jvhp->lastMountedVersion = SWAP_BE32(kFSKMountVersion);
1732 buf_bwrite(bp);
1733 } else {
1734 buf_brelse(bp);
1735 }
1736 bp = NULL;
1737 } else if (bp) {
1738 buf_brelse(bp);
1739 // clear this so the error exit path won't try to use it
1740 bp = NULL;
1741 }
1742 }
1743
1744 // if this isn't the root device just bail out.
1745 // If it is the root device we just continue on
1746 // in the hopes that fsck_hfs will be able to
1747 // fix any damage that exists on the volume.
1748 if ( !(vfs_flags(mp) & MNT_ROOTFS)) {
1749 if (HFS_MOUNT_DEBUG) {
1750 printf("hfs_mountfs: hfs_early_journal_init failed, erroring out \n");
1751 }
1752 retval = EINVAL;
1753 goto error_exit;
1754 }
1755 }
1756 }
1757 // XXXdbg
1758
1759 /* Either the journal is replayed successfully, or there
1760 * was nothing to replay, or no journal exists. In any case,
1761 * return success.
1762 */
1763 if (journal_replay_only) {
1764 retval = 0;
1765 goto error_exit;
1766 }
1767
1768 (void) hfs_getconverter(0, &hfsmp->hfs_get_unicode, &hfsmp->hfs_get_hfsname);
1769
1770 retval = hfs_MountHFSPlusVolume(hfsmp, vhp, embeddedOffset, disksize, p, args, cred);
1771 /*
1772 * If the backend didn't like our physical blocksize
1773 * then retry with physical blocksize of 512.
1774 */
1775 if ((retval == ENXIO) && (log_blksize > 512) && (log_blksize != minblksize)) {
1776 printf("hfs_mountfs: could not use physical block size "
1777 "(%d) switching to 512\n", log_blksize);
1778 log_blksize = 512;
1779 if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) {
1780 if (HFS_MOUNT_DEBUG) {
1781 printf("hfs_mountfs: DKIOCSETBLOCKSIZE (4) failed \n");
1782 }
1783 retval = ENXIO;
1784 goto error_exit;
1785 }
1786 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
1787 if (HFS_MOUNT_DEBUG) {
1788 printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (4) failed \n");
1789 }
1790 retval = ENXIO;
1791 goto error_exit;
1792 }
1793 devvp->v_specsize = log_blksize;
1794 /* Note: relative block count adjustment (in case this is an embedded volume). */
1795 hfsmp->hfs_logical_block_count *= hfsmp->hfs_logical_block_size / log_blksize;
1796 hfsmp->hfs_logical_block_size = log_blksize;
1797 hfsmp->hfs_log_per_phys = hfsmp->hfs_physical_block_size / log_blksize;
1798
1799 hfsmp->hfs_logical_bytes = (uint64_t) hfsmp->hfs_logical_block_count * (uint64_t) hfsmp->hfs_logical_block_size;
1800
1801 if (hfsmp->jnl && hfsmp->jvp == devvp) {
1802 // close and re-open this with the new block size
1803 journal_close(hfsmp->jnl);
1804 hfsmp->jnl = NULL;
1805 if (hfs_early_journal_init(hfsmp, vhp, args, embeddedOffset, mdb_offset, mdbp, cred) == 0) {
1806 vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
1807 } else {
1808 // if the journal failed to open, then set the lastMountedVersion
1809 // to be "FSK!" which fsck_hfs will see and force the fsck instead
1810 // of just bailing out because the volume is journaled.
1811 if (!ronly) {
1812 if (HFS_MOUNT_DEBUG) {
1813 printf("hfs_mountfs: hfs_early_journal_init (2) resetting.. \n");
1814 }
1815 HFSPlusVolumeHeader *jvhp;
1816
1817 hfsmp->hfs_flags |= HFS_NEED_JNL_RESET;
1818
1819 if (mdb_offset == 0) {
1820 mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize));
1821 }
1822
1823 bp = NULL;
1824 retval = (int)buf_meta_bread(devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys),
1825 phys_blksize, cred, &bp);
1826 if (retval == 0) {
1827 jvhp = (HFSPlusVolumeHeader *)(buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize));
1828
1829 if (SWAP_BE16(jvhp->signature) == kHFSPlusSigWord || SWAP_BE16(jvhp->signature) == kHFSXSigWord) {
1830 printf ("hfs(2): Journal replay fail. Writing lastMountVersion as FSK!\n");
1831 jvhp->lastMountedVersion = SWAP_BE32(kFSKMountVersion);
1832 buf_bwrite(bp);
1833 } else {
1834 buf_brelse(bp);
1835 }
1836 bp = NULL;
1837 } else if (bp) {
1838 buf_brelse(bp);
1839 // clear this so the error exit path won't try to use it
1840 bp = NULL;
1841 }
1842 }
1843
1844 // if this isn't the root device just bail out.
1845 // If it is the root device we just continue on
1846 // in the hopes that fsck_hfs will be able to
1847 // fix any damage that exists on the volume.
1848 if ( !(vfs_flags(mp) & MNT_ROOTFS)) {
1849 if (HFS_MOUNT_DEBUG) {
1850 printf("hfs_mountfs: hfs_early_journal_init (2) failed \n");
1851 }
1852 retval = EINVAL;
1853 goto error_exit;
1854 }
1855 }
1856 }
1857
1858 /* Try again with a smaller block size... */
1859 retval = hfs_MountHFSPlusVolume(hfsmp, vhp, embeddedOffset, disksize, p, args, cred);
1860 if (retval && HFS_MOUNT_DEBUG) {
1861 printf("hfs_MountHFSPlusVolume (late) returned %d\n",retval);
1862 }
1863 }
1864 if (retval)
1865 (void) hfs_relconverter(0);
1866 }
1867
1868 // save off a snapshot of the mtime from the previous mount
1869 // (for matador).
1870 hfsmp->hfs_last_mounted_mtime = hfsmp->hfs_mtime;
1871
1872 if ( retval ) {
1873 if (HFS_MOUNT_DEBUG) {
1874 printf("hfs_mountfs: encountered failure %d \n", retval);
1875 }
1876 goto error_exit;
1877 }
1878
1879 mp->mnt_vfsstat.f_fsid.val[0] = dev;
1880 mp->mnt_vfsstat.f_fsid.val[1] = vfs_typenum(mp);
1881 vfs_setmaxsymlen(mp, 0);
1882
1883 mp->mnt_vtable->vfc_vfsflags |= VFC_VFSNATIVEXATTR;
1884 #if NAMEDSTREAMS
1885 mp->mnt_kern_flag |= MNTK_NAMED_STREAMS;
1886 #endif
1887 if ((hfsmp->hfs_flags & HFS_STANDARD) == 0 ) {
1888 /* Tell VFS that we support directory hard links. */
1889 mp->mnt_vtable->vfc_vfsflags |= VFC_VFSDIRLINKS;
1890 }
1891 #if CONFIG_HFS_STD
1892 else {
1893 /* HFS standard doesn't support extended readdir! */
1894 mount_set_noreaddirext (mp);
1895 }
1896 #endif
1897
1898 if (args) {
1899 /*
1900 * Set the free space warning levels for a non-root volume:
1901 *
1902 * Set the "danger" limit to 1% of the volume size or 100MB, whichever
1903 * is less. Set the "warning" limit to 2% of the volume size or 150MB,
1904 * whichever is less. And last, set the "desired" freespace level to
1905 * to 3% of the volume size or 200MB, whichever is less.
1906 */
1907 hfsmp->hfs_freespace_notify_dangerlimit =
1908 MIN(HFS_VERYLOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize,
1909 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_VERYLOWDISKTRIGGERFRACTION);
1910 hfsmp->hfs_freespace_notify_warninglimit =
1911 MIN(HFS_LOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize,
1912 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_LOWDISKTRIGGERFRACTION);
1913 hfsmp->hfs_freespace_notify_desiredlevel =
1914 MIN(HFS_LOWDISKSHUTOFFLEVEL / HFSTOVCB(hfsmp)->blockSize,
1915 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_LOWDISKSHUTOFFFRACTION);
1916 } else {
1917 /*
1918 * Set the free space warning levels for the root volume:
1919 *
1920 * Set the "danger" limit to 5% of the volume size or 512MB, whichever
1921 * is less. Set the "warning" limit to 10% of the volume size or 1GB,
1922 * whichever is less. And last, set the "desired" freespace level to
1923 * to 11% of the volume size or 1.25GB, whichever is less.
1924 */
1925 hfsmp->hfs_freespace_notify_dangerlimit =
1926 MIN(HFS_ROOTVERYLOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize,
1927 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTVERYLOWDISKTRIGGERFRACTION);
1928 hfsmp->hfs_freespace_notify_warninglimit =
1929 MIN(HFS_ROOTLOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize,
1930 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTLOWDISKTRIGGERFRACTION);
1931 hfsmp->hfs_freespace_notify_desiredlevel =
1932 MIN(HFS_ROOTLOWDISKSHUTOFFLEVEL / HFSTOVCB(hfsmp)->blockSize,
1933 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTLOWDISKSHUTOFFFRACTION);
1934 };
1935
1936 /* Check if the file system exists on virtual device, like disk image */
1937 if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, context) == 0) {
1938 if (isvirtual) {
1939 hfsmp->hfs_flags |= HFS_VIRTUAL_DEVICE;
1940 }
1941 }
1942
1943 /* do not allow ejectability checks on the root device */
1944 if (isroot == 0) {
1945 if ((hfsmp->hfs_flags & HFS_VIRTUAL_DEVICE) == 0 &&
1946 IOBSDIsMediaEjectable(mp->mnt_vfsstat.f_mntfromname)) {
1947 hfsmp->hfs_syncer = thread_call_allocate(hfs_syncer, hfsmp);
1948 if (hfsmp->hfs_syncer == NULL) {
1949 printf("hfs: failed to allocate syncer thread callback for %s (%s)\n",
1950 mp->mnt_vfsstat.f_mntfromname, mp->mnt_vfsstat.f_mntonname);
1951 }
1952 }
1953 }
1954
1955 printf("hfs: mounted %s on device %s\n", (hfsmp->vcbVN ? (const char*) hfsmp->vcbVN : "unknown"),
1956 (devvp->v_name ? devvp->v_name : (isroot ? "root_device": "unknown device")));
1957
1958 /*
1959 * Start looking for free space to drop below this level and generate a
1960 * warning immediately if needed:
1961 */
1962 hfsmp->hfs_notification_conditions = 0;
1963 hfs_generate_volume_notifications(hfsmp);
1964
1965 if (ronly == 0) {
1966 (void) hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
1967 }
1968 FREE(mdbp, M_TEMP);
1969 return (0);
1970
1971 error_exit:
1972 if (bp)
1973 buf_brelse(bp);
1974 if (mdbp)
1975 FREE(mdbp, M_TEMP);
1976
1977 if (hfsmp && hfsmp->jvp && hfsmp->jvp != hfsmp->hfs_devvp) {
1978 vnode_clearmountedon(hfsmp->jvp);
1979 (void)VNOP_CLOSE(hfsmp->jvp, ronly ? FREAD : FREAD|FWRITE, vfs_context_kernel());
1980 hfsmp->jvp = NULL;
1981 }
1982 if (hfsmp) {
1983 if (hfsmp->hfs_devvp) {
1984 vnode_rele(hfsmp->hfs_devvp);
1985 }
1986 hfs_locks_destroy(hfsmp);
1987 hfs_delete_chash(hfsmp);
1988 hfs_idhash_destroy (hfsmp);
1989
1990 FREE(hfsmp, M_HFSMNT);
1991 vfs_setfsprivate(mp, NULL);
1992 }
1993 return (retval);
1994 }
1995
1996
1997 /*
1998 * Make a filesystem operational.
1999 * Nothing to do at the moment.
2000 */
2001 /* ARGSUSED */
2002 static int
2003 hfs_start(__unused struct mount *mp, __unused int flags, __unused vfs_context_t context)
2004 {
2005 return (0);
2006 }
2007
2008
2009 /*
2010 * unmount system call
2011 */
2012 int
2013 hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context)
2014 {
2015 struct proc *p = vfs_context_proc(context);
2016 struct hfsmount *hfsmp = VFSTOHFS(mp);
2017 int retval = E_NONE;
2018 int flags;
2019 int force;
2020 int started_tr = 0;
2021
2022 flags = 0;
2023 force = 0;
2024 if (mntflags & MNT_FORCE) {
2025 flags |= FORCECLOSE;
2026 force = 1;
2027 }
2028
2029 printf("hfs: unmount initiated on %s on device %s\n",
2030 (hfsmp->vcbVN ? (const char*) hfsmp->vcbVN : "unknown"),
2031 (hfsmp->hfs_devvp ? ((hfsmp->hfs_devvp->v_name ? hfsmp->hfs_devvp->v_name : "unknown device")) : "unknown device"));
2032
2033 if ((retval = hfs_flushfiles(mp, flags, p)) && !force)
2034 return (retval);
2035
2036 if (hfsmp->hfs_flags & HFS_METADATA_ZONE)
2037 (void) hfs_recording_suspend(hfsmp);
2038
2039 hfs_syncer_free(hfsmp);
2040
2041 if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) {
2042 if (hfsmp->hfs_summary_table) {
2043 int err = 0;
2044 /*
2045 * Take the bitmap lock to serialize against a concurrent bitmap scan still in progress
2046 */
2047 if (hfsmp->hfs_allocation_vp) {
2048 err = hfs_lock (VTOC(hfsmp->hfs_allocation_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2049 }
2050 FREE (hfsmp->hfs_summary_table, M_TEMP);
2051 hfsmp->hfs_summary_table = NULL;
2052 hfsmp->hfs_flags &= ~HFS_SUMMARY_TABLE;
2053
2054 if (err == 0 && hfsmp->hfs_allocation_vp){
2055 hfs_unlock (VTOC(hfsmp->hfs_allocation_vp));
2056 }
2057
2058 }
2059 }
2060
2061 /*
2062 * Flush out the b-trees, volume bitmap and Volume Header
2063 */
2064 if ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0) {
2065 retval = hfs_start_transaction(hfsmp);
2066 if (retval == 0) {
2067 started_tr = 1;
2068 } else if (!force) {
2069 goto err_exit;
2070 }
2071
2072 if (hfsmp->hfs_startup_vp) {
2073 (void) hfs_lock(VTOC(hfsmp->hfs_startup_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2074 retval = hfs_fsync(hfsmp->hfs_startup_vp, MNT_WAIT, 0, p);
2075 hfs_unlock(VTOC(hfsmp->hfs_startup_vp));
2076 if (retval && !force)
2077 goto err_exit;
2078 }
2079
2080 if (hfsmp->hfs_attribute_vp) {
2081 (void) hfs_lock(VTOC(hfsmp->hfs_attribute_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2082 retval = hfs_fsync(hfsmp->hfs_attribute_vp, MNT_WAIT, 0, p);
2083 hfs_unlock(VTOC(hfsmp->hfs_attribute_vp));
2084 if (retval && !force)
2085 goto err_exit;
2086 }
2087
2088 (void) hfs_lock(VTOC(hfsmp->hfs_catalog_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2089 retval = hfs_fsync(hfsmp->hfs_catalog_vp, MNT_WAIT, 0, p);
2090 hfs_unlock(VTOC(hfsmp->hfs_catalog_vp));
2091 if (retval && !force)
2092 goto err_exit;
2093
2094 (void) hfs_lock(VTOC(hfsmp->hfs_extents_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2095 retval = hfs_fsync(hfsmp->hfs_extents_vp, MNT_WAIT, 0, p);
2096 hfs_unlock(VTOC(hfsmp->hfs_extents_vp));
2097 if (retval && !force)
2098 goto err_exit;
2099
2100 if (hfsmp->hfs_allocation_vp) {
2101 (void) hfs_lock(VTOC(hfsmp->hfs_allocation_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2102 retval = hfs_fsync(hfsmp->hfs_allocation_vp, MNT_WAIT, 0, p);
2103 hfs_unlock(VTOC(hfsmp->hfs_allocation_vp));
2104 if (retval && !force)
2105 goto err_exit;
2106 }
2107
2108 if (hfsmp->hfc_filevp && vnode_issystem(hfsmp->hfc_filevp)) {
2109 retval = hfs_fsync(hfsmp->hfc_filevp, MNT_WAIT, 0, p);
2110 if (retval && !force)
2111 goto err_exit;
2112 }
2113
2114 /* If runtime corruption was detected, indicate that the volume
2115 * was not unmounted cleanly.
2116 */
2117 if (hfsmp->vcbAtrb & kHFSVolumeInconsistentMask) {
2118 HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeUnmountedMask;
2119 } else {
2120 HFSTOVCB(hfsmp)->vcbAtrb |= kHFSVolumeUnmountedMask;
2121 }
2122
2123 if (hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) {
2124 int i;
2125 u_int32_t min_start = hfsmp->totalBlocks;
2126
2127 // set the nextAllocation pointer to the smallest free block number
2128 // we've seen so on the next mount we won't rescan unnecessarily
2129 lck_spin_lock(&hfsmp->vcbFreeExtLock);
2130 for(i=0; i < (int)hfsmp->vcbFreeExtCnt; i++) {
2131 if (hfsmp->vcbFreeExt[i].startBlock < min_start) {
2132 min_start = hfsmp->vcbFreeExt[i].startBlock;
2133 }
2134 }
2135 lck_spin_unlock(&hfsmp->vcbFreeExtLock);
2136 if (min_start < hfsmp->nextAllocation) {
2137 hfsmp->nextAllocation = min_start;
2138 }
2139 }
2140
2141 retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
2142 if (retval) {
2143 HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeUnmountedMask;
2144 if (!force)
2145 goto err_exit; /* could not flush everything */
2146 }
2147
2148 if (started_tr) {
2149 hfs_end_transaction(hfsmp);
2150 started_tr = 0;
2151 }
2152 }
2153
2154 if (hfsmp->jnl) {
2155 hfs_journal_flush(hfsmp, FALSE);
2156 }
2157
2158 /*
2159 * Invalidate our caches and release metadata vnodes
2160 */
2161 (void) hfsUnmount(hfsmp, p);
2162
2163 #if CONFIG_HFS_STD
2164 if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord) {
2165 (void) hfs_relconverter(hfsmp->hfs_encoding);
2166 }
2167 #endif
2168
2169 // XXXdbg
2170 if (hfsmp->jnl) {
2171 journal_close(hfsmp->jnl);
2172 hfsmp->jnl = NULL;
2173 }
2174
2175 VNOP_FSYNC(hfsmp->hfs_devvp, MNT_WAIT, context);
2176
2177 if (hfsmp->jvp && hfsmp->jvp != hfsmp->hfs_devvp) {
2178 vnode_clearmountedon(hfsmp->jvp);
2179 retval = VNOP_CLOSE(hfsmp->jvp,
2180 hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE,
2181 vfs_context_kernel());
2182 vnode_put(hfsmp->jvp);
2183 hfsmp->jvp = NULL;
2184 }
2185 // XXXdbg
2186
2187 /*
2188 * Last chance to dump unreferenced system files.
2189 */
2190 (void) vflush(mp, NULLVP, FORCECLOSE);
2191
2192 #if HFS_SPARSE_DEV
2193 /* Drop our reference on the backing fs (if any). */
2194 if ((hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) && hfsmp->hfs_backingfs_rootvp) {
2195 struct vnode * tmpvp;
2196
2197 hfsmp->hfs_flags &= ~HFS_HAS_SPARSE_DEVICE;
2198 tmpvp = hfsmp->hfs_backingfs_rootvp;
2199 hfsmp->hfs_backingfs_rootvp = NULLVP;
2200 vnode_rele(tmpvp);
2201 }
2202 #endif /* HFS_SPARSE_DEV */
2203
2204 vnode_rele(hfsmp->hfs_devvp);
2205
2206 hfs_locks_destroy(hfsmp);
2207 hfs_delete_chash(hfsmp);
2208 hfs_idhash_destroy(hfsmp);
2209 FREE(hfsmp, M_HFSMNT);
2210
2211 return (0);
2212
2213 err_exit:
2214 if (started_tr) {
2215 hfs_end_transaction(hfsmp);
2216 }
2217 return retval;
2218 }
2219
2220
2221 /*
2222 * Return the root of a filesystem.
2223 */
2224 static int
2225 hfs_vfs_root(struct mount *mp, struct vnode **vpp, __unused vfs_context_t context)
2226 {
2227 return hfs_vget(VFSTOHFS(mp), (cnid_t)kHFSRootFolderID, vpp, 1, 0);
2228 }
2229
2230
2231 /*
2232 * Do operations associated with quotas
2233 */
2234 #if !QUOTA
2235 static int
2236 hfs_quotactl(__unused struct mount *mp, __unused int cmds, __unused uid_t uid, __unused caddr_t datap, __unused vfs_context_t context)
2237 {
2238 return (ENOTSUP);
2239 }
2240 #else
2241 static int
2242 hfs_quotactl(struct mount *mp, int cmds, uid_t uid, caddr_t datap, vfs_context_t context)
2243 {
2244 struct proc *p = vfs_context_proc(context);
2245 int cmd, type, error;
2246
2247 if (uid == ~0U)
2248 uid = kauth_cred_getuid(vfs_context_ucred(context));
2249 cmd = cmds >> SUBCMDSHIFT;
2250
2251 switch (cmd) {
2252 case Q_SYNC:
2253 case Q_QUOTASTAT:
2254 break;
2255 case Q_GETQUOTA:
2256 if (uid == kauth_cred_getuid(vfs_context_ucred(context)))
2257 break;
2258 /* fall through */
2259 default:
2260 if ( (error = vfs_context_suser(context)) )
2261 return (error);
2262 }
2263
2264 type = cmds & SUBCMDMASK;
2265 if ((u_int)type >= MAXQUOTAS)
2266 return (EINVAL);
2267 if (vfs_busy(mp, LK_NOWAIT))
2268 return (0);
2269
2270 switch (cmd) {
2271
2272 case Q_QUOTAON:
2273 error = hfs_quotaon(p, mp, type, datap);
2274 break;
2275
2276 case Q_QUOTAOFF:
2277 error = hfs_quotaoff(p, mp, type);
2278 break;
2279
2280 case Q_SETQUOTA:
2281 error = hfs_setquota(mp, uid, type, datap);
2282 break;
2283
2284 case Q_SETUSE:
2285 error = hfs_setuse(mp, uid, type, datap);
2286 break;
2287
2288 case Q_GETQUOTA:
2289 error = hfs_getquota(mp, uid, type, datap);
2290 break;
2291
2292 case Q_SYNC:
2293 error = hfs_qsync(mp);
2294 break;
2295
2296 case Q_QUOTASTAT:
2297 error = hfs_quotastat(mp, type, datap);
2298 break;
2299
2300 default:
2301 error = EINVAL;
2302 break;
2303 }
2304 vfs_unbusy(mp);
2305
2306 return (error);
2307 }
2308 #endif /* QUOTA */
2309
2310 /* Subtype is composite of bits */
2311 #define HFS_SUBTYPE_JOURNALED 0x01
2312 #define HFS_SUBTYPE_CASESENSITIVE 0x02
2313 /* bits 2 - 6 reserved */
2314 #define HFS_SUBTYPE_STANDARDHFS 0x80
2315
2316 /*
2317 * Get file system statistics.
2318 */
2319 int
2320 hfs_statfs(struct mount *mp, register struct vfsstatfs *sbp, __unused vfs_context_t context)
2321 {
2322 ExtendedVCB *vcb = VFSTOVCB(mp);
2323 struct hfsmount *hfsmp = VFSTOHFS(mp);
2324 u_int32_t freeCNIDs;
2325 u_int16_t subtype = 0;
2326
2327 freeCNIDs = (u_int32_t)0xFFFFFFFF - (u_int32_t)vcb->vcbNxtCNID;
2328
2329 sbp->f_bsize = (u_int32_t)vcb->blockSize;
2330 sbp->f_iosize = (size_t)cluster_max_io_size(mp, 0);
2331 sbp->f_blocks = (u_int64_t)((u_int32_t)vcb->totalBlocks);
2332 sbp->f_bfree = (u_int64_t)((u_int32_t )hfs_freeblks(hfsmp, 0));
2333 sbp->f_bavail = (u_int64_t)((u_int32_t )hfs_freeblks(hfsmp, 1));
2334 sbp->f_files = (u_int64_t)((u_int32_t )(vcb->totalBlocks - 2)); /* max files is constrained by total blocks */
2335 sbp->f_ffree = (u_int64_t)((u_int32_t )(MIN(freeCNIDs, sbp->f_bavail)));
2336
2337 /*
2338 * Subtypes (flavors) for HFS
2339 * 0: Mac OS Extended
2340 * 1: Mac OS Extended (Journaled)
2341 * 2: Mac OS Extended (Case Sensitive)
2342 * 3: Mac OS Extended (Case Sensitive, Journaled)
2343 * 4 - 127: Reserved
2344 * 128: Mac OS Standard
2345 *
2346 */
2347 if ((hfsmp->hfs_flags & HFS_STANDARD) == 0) {
2348 /* HFS+ & variants */
2349 if (hfsmp->jnl) {
2350 subtype |= HFS_SUBTYPE_JOURNALED;
2351 }
2352 if (hfsmp->hfs_flags & HFS_CASE_SENSITIVE) {
2353 subtype |= HFS_SUBTYPE_CASESENSITIVE;
2354 }
2355 }
2356 #if CONFIG_HFS_STD
2357 else {
2358 /* HFS standard */
2359 subtype = HFS_SUBTYPE_STANDARDHFS;
2360 }
2361 #endif
2362 sbp->f_fssubtype = subtype;
2363
2364 return (0);
2365 }
2366
2367
2368 //
2369 // XXXdbg -- this is a callback to be used by the journal to
2370 // get meta data blocks flushed out to disk.
2371 //
2372 // XXXdbg -- be smarter and don't flush *every* block on each
2373 // call. try to only flush some so we don't wind up
2374 // being too synchronous.
2375 //
2376 __private_extern__
2377 void
2378 hfs_sync_metadata(void *arg)
2379 {
2380 struct mount *mp = (struct mount *)arg;
2381 struct hfsmount *hfsmp;
2382 ExtendedVCB *vcb;
2383 buf_t bp;
2384 int retval;
2385 daddr64_t priIDSector;
2386 hfsmp = VFSTOHFS(mp);
2387 vcb = HFSTOVCB(hfsmp);
2388
2389 // now make sure the super block is flushed
2390 priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
2391 HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size));
2392
2393 retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
2394 HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys),
2395 hfsmp->hfs_physical_block_size, NOCRED, &bp);
2396 if ((retval != 0 ) && (retval != ENXIO)) {
2397 printf("hfs_sync_metadata: can't read volume header at %d! (retval 0x%x)\n",
2398 (int)priIDSector, retval);
2399 }
2400
2401 if (retval == 0 && ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI)) {
2402 buf_bwrite(bp);
2403 } else if (bp) {
2404 buf_brelse(bp);
2405 }
2406
2407 /* Note that these I/Os bypass the journal (no calls to journal_start_modify_block) */
2408
2409 // the alternate super block...
2410 // XXXdbg - we probably don't need to do this each and every time.
2411 // hfs_btreeio.c:FlushAlternate() should flag when it was
2412 // written...
2413 if (hfsmp->hfs_partition_avh_sector) {
2414 retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
2415 HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_partition_avh_sector, hfsmp->hfs_log_per_phys),
2416 hfsmp->hfs_physical_block_size, NOCRED, &bp);
2417 if (retval == 0 && ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI)) {
2418 /*
2419 * note this I/O can fail if the partition shrank behind our backs!
2420 * So failure should be OK here.
2421 */
2422 buf_bwrite(bp);
2423 } else if (bp) {
2424 buf_brelse(bp);
2425 }
2426 }
2427
2428 /* Is the FS's idea of the AVH different than the partition ? */
2429 if ((hfsmp->hfs_fs_avh_sector) && (hfsmp->hfs_partition_avh_sector != hfsmp->hfs_fs_avh_sector)) {
2430 retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
2431 HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_fs_avh_sector, hfsmp->hfs_log_per_phys),
2432 hfsmp->hfs_physical_block_size, NOCRED, &bp);
2433 if (retval == 0 && ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI)) {
2434 buf_bwrite(bp);
2435 } else if (bp) {
2436 buf_brelse(bp);
2437 }
2438 }
2439
2440 }
2441
2442
2443 struct hfs_sync_cargs {
2444 kauth_cred_t cred;
2445 struct proc *p;
2446 int waitfor;
2447 int error;
2448 };
2449
2450
2451 static int
2452 hfs_sync_callback(struct vnode *vp, void *cargs)
2453 {
2454 struct cnode *cp;
2455 struct hfs_sync_cargs *args;
2456 int error;
2457
2458 args = (struct hfs_sync_cargs *)cargs;
2459
2460 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0) {
2461 return (VNODE_RETURNED);
2462 }
2463 cp = VTOC(vp);
2464
2465 if ((cp->c_flag & C_MODIFIED) ||
2466 (cp->c_touch_acctime | cp->c_touch_chgtime | cp->c_touch_modtime) ||
2467 vnode_hasdirtyblks(vp)) {
2468 error = hfs_fsync(vp, args->waitfor, 0, args->p);
2469
2470 if (error)
2471 args->error = error;
2472 }
2473 hfs_unlock(cp);
2474 return (VNODE_RETURNED);
2475 }
2476
2477
2478
2479 /*
2480 * Go through the disk queues to initiate sandbagged IO;
2481 * go through the inodes to write those that have been modified;
2482 * initiate the writing of the super block if it has been modified.
2483 *
2484 * Note: we are always called with the filesystem marked `MPBUSY'.
2485 */
2486 int
2487 hfs_sync(struct mount *mp, int waitfor, vfs_context_t context)
2488 {
2489 struct proc *p = vfs_context_proc(context);
2490 struct cnode *cp;
2491 struct hfsmount *hfsmp;
2492 ExtendedVCB *vcb;
2493 struct vnode *meta_vp[4];
2494 int i;
2495 int error, allerror = 0;
2496 struct hfs_sync_cargs args;
2497
2498 hfsmp = VFSTOHFS(mp);
2499
2500 // Back off if hfs_changefs or a freeze is underway
2501 hfs_lock_mount(hfsmp);
2502 if ((hfsmp->hfs_flags & HFS_IN_CHANGEFS)
2503 || hfsmp->hfs_freeze_state != HFS_THAWED) {
2504 hfs_unlock_mount(hfsmp);
2505 return 0;
2506 }
2507
2508 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
2509 hfs_unlock_mount(hfsmp);
2510 return (EROFS);
2511 }
2512
2513 ++hfsmp->hfs_syncers;
2514 hfs_unlock_mount(hfsmp);
2515
2516 args.cred = kauth_cred_get();
2517 args.waitfor = waitfor;
2518 args.p = p;
2519 args.error = 0;
2520 /*
2521 * hfs_sync_callback will be called for each vnode
2522 * hung off of this mount point... the vnode will be
2523 * properly referenced and unreferenced around the callback
2524 */
2525 vnode_iterate(mp, 0, hfs_sync_callback, (void *)&args);
2526
2527 if (args.error)
2528 allerror = args.error;
2529
2530 vcb = HFSTOVCB(hfsmp);
2531
2532 meta_vp[0] = vcb->extentsRefNum;
2533 meta_vp[1] = vcb->catalogRefNum;
2534 meta_vp[2] = vcb->allocationsRefNum; /* This is NULL for standard HFS */
2535 meta_vp[3] = hfsmp->hfs_attribute_vp; /* Optional file */
2536
2537 /* Now sync our three metadata files */
2538 for (i = 0; i < 4; ++i) {
2539 struct vnode *btvp;
2540
2541 btvp = meta_vp[i];;
2542 if ((btvp==0) || (vnode_mount(btvp) != mp))
2543 continue;
2544
2545 /* XXX use hfs_systemfile_lock instead ? */
2546 (void) hfs_lock(VTOC(btvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2547 cp = VTOC(btvp);
2548
2549 if (((cp->c_flag & C_MODIFIED) == 0) &&
2550 (cp->c_touch_acctime == 0) &&
2551 (cp->c_touch_chgtime == 0) &&
2552 (cp->c_touch_modtime == 0) &&
2553 vnode_hasdirtyblks(btvp) == 0) {
2554 hfs_unlock(VTOC(btvp));
2555 continue;
2556 }
2557 error = vnode_get(btvp);
2558 if (error) {
2559 hfs_unlock(VTOC(btvp));
2560 continue;
2561 }
2562 if ((error = hfs_fsync(btvp, waitfor, 0, p)))
2563 allerror = error;
2564
2565 hfs_unlock(cp);
2566 vnode_put(btvp);
2567 };
2568
2569
2570 #if CONFIG_HFS_STD
2571 /*
2572 * Force stale file system control information to be flushed.
2573 */
2574 if (vcb->vcbSigWord == kHFSSigWord) {
2575 if ((error = VNOP_FSYNC(hfsmp->hfs_devvp, waitfor, context))) {
2576 allerror = error;
2577 }
2578 }
2579 #endif
2580
2581 #if QUOTA
2582 hfs_qsync(mp);
2583 #endif /* QUOTA */
2584
2585 hfs_hotfilesync(hfsmp, vfs_context_kernel());
2586
2587 /*
2588 * Write back modified superblock.
2589 */
2590 if (IsVCBDirty(vcb)) {
2591 error = hfs_flushvolumeheader(hfsmp, waitfor, 0);
2592 if (error)
2593 allerror = error;
2594 }
2595
2596 if (hfsmp->jnl) {
2597 hfs_journal_flush(hfsmp, FALSE);
2598 }
2599
2600 hfs_lock_mount(hfsmp);
2601 boolean_t wake = (!--hfsmp->hfs_syncers
2602 && hfsmp->hfs_freeze_state == HFS_WANT_TO_FREEZE);
2603 hfs_unlock_mount(hfsmp);
2604 if (wake)
2605 wakeup(&hfsmp->hfs_freeze_state);
2606
2607 return (allerror);
2608 }
2609
2610
2611 /*
2612 * File handle to vnode
2613 *
2614 * Have to be really careful about stale file handles:
2615 * - check that the cnode id is valid
2616 * - call hfs_vget() to get the locked cnode
2617 * - check for an unallocated cnode (i_mode == 0)
2618 * - check that the given client host has export rights and return
2619 * those rights via. exflagsp and credanonp
2620 */
2621 static int
2622 hfs_fhtovp(struct mount *mp, int fhlen, unsigned char *fhp, struct vnode **vpp, __unused vfs_context_t context)
2623 {
2624 struct hfsfid *hfsfhp;
2625 struct vnode *nvp;
2626 int result;
2627
2628 *vpp = NULL;
2629 hfsfhp = (struct hfsfid *)fhp;
2630
2631 if (fhlen < (int)sizeof(struct hfsfid))
2632 return (EINVAL);
2633
2634 result = hfs_vget(VFSTOHFS(mp), ntohl(hfsfhp->hfsfid_cnid), &nvp, 0, 0);
2635 if (result) {
2636 if (result == ENOENT)
2637 result = ESTALE;
2638 return result;
2639 }
2640
2641 /*
2642 * We used to use the create time as the gen id of the file handle,
2643 * but it is not static enough because it can change at any point
2644 * via system calls. We still don't have another volume ID or other
2645 * unique identifier to use for a generation ID across reboots that
2646 * persists until the file is removed. Using only the CNID exposes
2647 * us to the potential wrap-around case, but as of 2/2008, it would take
2648 * over 2 months to wrap around if the machine did nothing but allocate
2649 * CNIDs. Using some kind of wrap counter would only be effective if
2650 * each file had the wrap counter associated with it. For now,
2651 * we use only the CNID to identify the file as it's good enough.
2652 */
2653
2654 *vpp = nvp;
2655
2656 hfs_unlock(VTOC(nvp));
2657 return (0);
2658 }
2659
2660
2661 /*
2662 * Vnode pointer to File handle
2663 */
2664 /* ARGSUSED */
2665 static int
2666 hfs_vptofh(struct vnode *vp, int *fhlenp, unsigned char *fhp, __unused vfs_context_t context)
2667 {
2668 struct cnode *cp;
2669 struct hfsfid *hfsfhp;
2670
2671 if (ISHFS(VTOVCB(vp)))
2672 return (ENOTSUP); /* hfs standard is not exportable */
2673
2674 if (*fhlenp < (int)sizeof(struct hfsfid))
2675 return (EOVERFLOW);
2676
2677 cp = VTOC(vp);
2678 hfsfhp = (struct hfsfid *)fhp;
2679 /* only the CNID is used to identify the file now */
2680 hfsfhp->hfsfid_cnid = htonl(cp->c_fileid);
2681 hfsfhp->hfsfid_gen = htonl(cp->c_fileid);
2682 *fhlenp = sizeof(struct hfsfid);
2683
2684 return (0);
2685 }
2686
2687
2688 /*
2689 * Initialize HFS filesystems, done only once per boot.
2690 *
2691 * HFS is not a kext-based file system. This makes it difficult to find
2692 * out when the last HFS file system was unmounted and call hfs_uninit()
2693 * to deallocate data structures allocated in hfs_init(). Therefore we
2694 * never deallocate memory allocated by lock attribute and group initializations
2695 * in this function.
2696 */
2697 static int
2698 hfs_init(__unused struct vfsconf *vfsp)
2699 {
2700 static int done = 0;
2701
2702 if (done)
2703 return (0);
2704 done = 1;
2705 hfs_chashinit();
2706 hfs_converterinit();
2707
2708 BTReserveSetup();
2709
2710 hfs_lock_attr = lck_attr_alloc_init();
2711 hfs_group_attr = lck_grp_attr_alloc_init();
2712 hfs_mutex_group = lck_grp_alloc_init("hfs-mutex", hfs_group_attr);
2713 hfs_rwlock_group = lck_grp_alloc_init("hfs-rwlock", hfs_group_attr);
2714 hfs_spinlock_group = lck_grp_alloc_init("hfs-spinlock", hfs_group_attr);
2715
2716 #if HFS_COMPRESSION
2717 decmpfs_init();
2718 #endif
2719
2720 return (0);
2721 }
2722
2723
2724 /*
2725 * Destroy all locks, mutexes and spinlocks in hfsmp on unmount or failed mount
2726 */
2727 static void
2728 hfs_locks_destroy(struct hfsmount *hfsmp)
2729 {
2730
2731 lck_mtx_destroy(&hfsmp->hfs_mutex, hfs_mutex_group);
2732 lck_mtx_destroy(&hfsmp->hfc_mutex, hfs_mutex_group);
2733 lck_rw_destroy(&hfsmp->hfs_global_lock, hfs_rwlock_group);
2734 lck_spin_destroy(&hfsmp->vcbFreeExtLock, hfs_spinlock_group);
2735
2736 return;
2737 }
2738
2739
2740 static int
2741 hfs_getmountpoint(struct vnode *vp, struct hfsmount **hfsmpp)
2742 {
2743 struct hfsmount * hfsmp;
2744 char fstypename[MFSNAMELEN];
2745
2746 if (vp == NULL)
2747 return (EINVAL);
2748
2749 if (!vnode_isvroot(vp))
2750 return (EINVAL);
2751
2752 vnode_vfsname(vp, fstypename);
2753 if (strncmp(fstypename, "hfs", sizeof(fstypename)) != 0)
2754 return (EINVAL);
2755
2756 hfsmp = VTOHFS(vp);
2757
2758 if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord)
2759 return (EINVAL);
2760
2761 *hfsmpp = hfsmp;
2762
2763 return (0);
2764 }
2765
2766 // XXXdbg
2767 #include <sys/filedesc.h>
2768
2769 /*
2770 * HFS filesystem related variables.
2771 */
2772 int
2773 hfs_sysctl(int *name, __unused u_int namelen, user_addr_t oldp, size_t *oldlenp,
2774 user_addr_t newp, size_t newlen, vfs_context_t context)
2775 {
2776 struct proc *p = vfs_context_proc(context);
2777 int error;
2778 struct hfsmount *hfsmp;
2779
2780 /* all sysctl names at this level are terminal */
2781
2782 if (name[0] == HFS_ENCODINGBIAS) {
2783 int bias;
2784
2785 bias = hfs_getencodingbias();
2786 error = sysctl_int(oldp, oldlenp, newp, newlen, &bias);
2787 if (error == 0 && newp)
2788 hfs_setencodingbias(bias);
2789 return (error);
2790
2791 } else if (name[0] == HFS_EXTEND_FS) {
2792 u_int64_t newsize = 0;
2793 vnode_t vp = vfs_context_cwd(context);
2794
2795 if (newp == USER_ADDR_NULL || vp == NULLVP)
2796 return (EINVAL);
2797 if ((error = hfs_getmountpoint(vp, &hfsmp)))
2798 return (error);
2799
2800 /* Start with the 'size' set to the current number of bytes in the filesystem */
2801 newsize = ((uint64_t)hfsmp->totalBlocks) * ((uint64_t)hfsmp->blockSize);
2802
2803 /* now get the new size from userland and over-write our stored value */
2804 error = sysctl_quad(oldp, oldlenp, newp, newlen, (quad_t *)&newsize);
2805 if (error)
2806 return (error);
2807
2808 error = hfs_extendfs(hfsmp, newsize, context);
2809 return (error);
2810
2811 } else if (name[0] == HFS_ENCODINGHINT) {
2812 size_t bufsize;
2813 size_t bytes;
2814 u_int32_t hint;
2815 u_int16_t *unicode_name = NULL;
2816 char *filename = NULL;
2817
2818 if ((newlen <= 0) || (newlen > MAXPATHLEN))
2819 return (EINVAL);
2820
2821 bufsize = MAX(newlen * 3, MAXPATHLEN);
2822 MALLOC(filename, char *, newlen, M_TEMP, M_WAITOK);
2823 if (filename == NULL) {
2824 error = ENOMEM;
2825 goto encodinghint_exit;
2826 }
2827 MALLOC(unicode_name, u_int16_t *, bufsize, M_TEMP, M_WAITOK);
2828 if (unicode_name == NULL) {
2829 error = ENOMEM;
2830 goto encodinghint_exit;
2831 }
2832
2833 error = copyin(newp, (caddr_t)filename, newlen);
2834 if (error == 0) {
2835 error = utf8_decodestr((u_int8_t *)filename, newlen - 1, unicode_name,
2836 &bytes, bufsize, 0, UTF_DECOMPOSED);
2837 if (error == 0) {
2838 hint = hfs_pickencoding(unicode_name, bytes / 2);
2839 error = sysctl_int(oldp, oldlenp, USER_ADDR_NULL, 0, (int32_t *)&hint);
2840 }
2841 }
2842
2843 encodinghint_exit:
2844 if (unicode_name)
2845 FREE(unicode_name, M_TEMP);
2846 if (filename)
2847 FREE(filename, M_TEMP);
2848 return (error);
2849
2850 } else if (name[0] == HFS_ENABLE_JOURNALING) {
2851 // make the file system journaled...
2852 vnode_t vp = vfs_context_cwd(context);
2853 vnode_t jvp;
2854 ExtendedVCB *vcb;
2855 struct cat_attr jnl_attr;
2856 struct cat_attr jinfo_attr;
2857 struct cat_fork jnl_fork;
2858 struct cat_fork jinfo_fork;
2859 buf_t jib_buf;
2860 uint64_t jib_blkno;
2861 uint32_t tmpblkno;
2862 uint64_t journal_byte_offset;
2863 uint64_t journal_size;
2864 vnode_t jib_vp = NULLVP;
2865 struct JournalInfoBlock local_jib;
2866 int err = 0;
2867 void *jnl = NULL;
2868 int lockflags;
2869
2870 /* Only root can enable journaling */
2871 if (!kauth_cred_issuser(kauth_cred_get())) {
2872 return (EPERM);
2873 }
2874 if (vp == NULLVP)
2875 return EINVAL;
2876
2877 hfsmp = VTOHFS(vp);
2878 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
2879 return EROFS;
2880 }
2881 if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord) {
2882 printf("hfs: can't make a plain hfs volume journaled.\n");
2883 return EINVAL;
2884 }
2885
2886 if (hfsmp->jnl) {
2887 printf("hfs: volume @ mp %p is already journaled!\n", vnode_mount(vp));
2888 return EAGAIN;
2889 }
2890 vcb = HFSTOVCB(hfsmp);
2891
2892 /* Set up local copies of the initialization info */
2893 tmpblkno = (uint32_t) name[1];
2894 jib_blkno = (uint64_t) tmpblkno;
2895 journal_byte_offset = (uint64_t) name[2];
2896 journal_byte_offset *= hfsmp->blockSize;
2897 journal_byte_offset += hfsmp->hfsPlusIOPosOffset;
2898 journal_size = (uint64_t)((unsigned)name[3]);
2899
2900 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS, HFS_EXCLUSIVE_LOCK);
2901 if (BTHasContiguousNodes(VTOF(vcb->catalogRefNum)) == 0 ||
2902 BTHasContiguousNodes(VTOF(vcb->extentsRefNum)) == 0) {
2903
2904 printf("hfs: volume has a btree w/non-contiguous nodes. can not enable journaling.\n");
2905 hfs_systemfile_unlock(hfsmp, lockflags);
2906 return EINVAL;
2907 }
2908 hfs_systemfile_unlock(hfsmp, lockflags);
2909
2910 // make sure these both exist!
2911 if ( GetFileInfo(vcb, kHFSRootFolderID, ".journal_info_block", &jinfo_attr, &jinfo_fork) == 0
2912 || GetFileInfo(vcb, kHFSRootFolderID, ".journal", &jnl_attr, &jnl_fork) == 0) {
2913
2914 return EINVAL;
2915 }
2916
2917 /*
2918 * At this point, we have a copy of the metadata that lives in the catalog for the
2919 * journal info block. Compare that the journal info block's single extent matches
2920 * that which was passed into this sysctl.
2921 *
2922 * If it is different, deny the journal enable call.
2923 */
2924 if (jinfo_fork.cf_blocks > 1) {
2925 /* too many blocks */
2926 return EINVAL;
2927 }
2928
2929 if (jinfo_fork.cf_extents[0].startBlock != jib_blkno) {
2930 /* Wrong block */
2931 return EINVAL;
2932 }
2933
2934 /*
2935 * We want to immediately purge the vnode for the JIB.
2936 *
2937 * Because it was written to from userland, there's probably
2938 * a vnode somewhere in the vnode cache (possibly with UBC backed blocks).
2939 * So we bring the vnode into core, then immediately do whatever
2940 * we can to flush/vclean it out. This is because those blocks will be
2941 * interpreted as user data, which may be treated separately on some platforms
2942 * than metadata. If the vnode is gone, then there cannot be backing blocks
2943 * in the UBC.
2944 */
2945 if (hfs_vget (hfsmp, jinfo_attr.ca_fileid, &jib_vp, 1, 0)) {
2946 return EINVAL;
2947 }
2948 /*
2949 * Now we have a vnode for the JIB. recycle it. Because we hold an iocount
2950 * on the vnode, we'll just mark it for termination when the last iocount
2951 * (hopefully ours), is dropped.
2952 */
2953 vnode_recycle (jib_vp);
2954 err = vnode_put (jib_vp);
2955 if (err) {
2956 return EINVAL;
2957 }
2958
2959 /* Initialize the local copy of the JIB (just like hfs.util) */
2960 memset (&local_jib, 'Z', sizeof(struct JournalInfoBlock));
2961 local_jib.flags = SWAP_BE32(kJIJournalInFSMask);
2962 /* Note that the JIB's offset is in bytes */
2963 local_jib.offset = SWAP_BE64(journal_byte_offset);
2964 local_jib.size = SWAP_BE64(journal_size);
2965
2966 /*
2967 * Now write out the local JIB. This essentially overwrites the userland
2968 * copy of the JIB. Read it as BLK_META to treat it as a metadata read/write.
2969 */
2970 jib_buf = buf_getblk (hfsmp->hfs_devvp,
2971 jib_blkno * (hfsmp->blockSize / hfsmp->hfs_logical_block_size),
2972 hfsmp->blockSize, 0, 0, BLK_META);
2973 char* buf_ptr = (char*) buf_dataptr (jib_buf);
2974
2975 /* Zero out the portion of the block that won't contain JIB data */
2976 memset (buf_ptr, 0, hfsmp->blockSize);
2977
2978 bcopy(&local_jib, buf_ptr, sizeof(local_jib));
2979 if (buf_bwrite (jib_buf)) {
2980 return EIO;
2981 }
2982
2983 /* Force a flush track cache */
2984 (void) VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, context);
2985
2986
2987 /* Now proceed with full volume sync */
2988 hfs_sync(hfsmp->hfs_mp, MNT_WAIT, context);
2989
2990 printf("hfs: Initializing the journal (joffset 0x%llx sz 0x%llx)...\n",
2991 (off_t)name[2], (off_t)name[3]);
2992
2993 //
2994 // XXXdbg - note that currently (Sept, 08) hfs_util does not support
2995 // enabling the journal on a separate device so it is safe
2996 // to just copy hfs_devvp here. If hfs_util gets the ability
2997 // to dynamically enable the journal on a separate device then
2998 // we will have to do the same thing as hfs_early_journal_init()
2999 // to locate and open the journal device.
3000 //
3001 jvp = hfsmp->hfs_devvp;
3002 jnl = journal_create(jvp, journal_byte_offset, journal_size,
3003 hfsmp->hfs_devvp,
3004 hfsmp->hfs_logical_block_size,
3005 0,
3006 0,
3007 hfs_sync_metadata, hfsmp->hfs_mp,
3008 hfsmp->hfs_mp);
3009
3010 /*
3011 * Set up the trim callback function so that we can add
3012 * recently freed extents to the free extent cache once
3013 * the transaction that freed them is written to the
3014 * journal on disk.
3015 */
3016 if (jnl)
3017 journal_trim_set_callback(jnl, hfs_trim_callback, hfsmp);
3018
3019 if (jnl == NULL) {
3020 printf("hfs: FAILED to create the journal!\n");
3021 if (jvp && jvp != hfsmp->hfs_devvp) {
3022 vnode_clearmountedon(jvp);
3023 VNOP_CLOSE(jvp, hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE, vfs_context_kernel());
3024 }
3025 jvp = NULL;
3026
3027 return EINVAL;
3028 }
3029
3030 hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK);
3031
3032 /*
3033 * Flush all dirty metadata buffers.
3034 */
3035 buf_flushdirtyblks(hfsmp->hfs_devvp, TRUE, 0, "hfs_sysctl");
3036 buf_flushdirtyblks(hfsmp->hfs_extents_vp, TRUE, 0, "hfs_sysctl");
3037 buf_flushdirtyblks(hfsmp->hfs_catalog_vp, TRUE, 0, "hfs_sysctl");
3038 buf_flushdirtyblks(hfsmp->hfs_allocation_vp, TRUE, 0, "hfs_sysctl");
3039 if (hfsmp->hfs_attribute_vp)
3040 buf_flushdirtyblks(hfsmp->hfs_attribute_vp, TRUE, 0, "hfs_sysctl");
3041
3042 HFSTOVCB(hfsmp)->vcbJinfoBlock = name[1];
3043 HFSTOVCB(hfsmp)->vcbAtrb |= kHFSVolumeJournaledMask;
3044 hfsmp->jvp = jvp;
3045 hfsmp->jnl = jnl;
3046
3047 // save this off for the hack-y check in hfs_remove()
3048 hfsmp->jnl_start = (u_int32_t)name[2];
3049 hfsmp->jnl_size = (off_t)((unsigned)name[3]);
3050 hfsmp->hfs_jnlinfoblkid = jinfo_attr.ca_fileid;
3051 hfsmp->hfs_jnlfileid = jnl_attr.ca_fileid;
3052
3053 vfs_setflags(hfsmp->hfs_mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
3054
3055 hfs_unlock_global (hfsmp);
3056 hfs_flushvolumeheader(hfsmp, MNT_WAIT, 1);
3057
3058 {
3059 fsid_t fsid;
3060
3061 fsid.val[0] = (int32_t)hfsmp->hfs_raw_dev;
3062 fsid.val[1] = (int32_t)vfs_typenum(HFSTOVFS(hfsmp));
3063 vfs_event_signal(&fsid, VQ_UPDATE, (intptr_t)NULL);
3064 }
3065 return 0;
3066 } else if (name[0] == HFS_DISABLE_JOURNALING) {
3067 // clear the journaling bit
3068 vnode_t vp = vfs_context_cwd(context);
3069
3070 /* Only root can disable journaling */
3071 if (!kauth_cred_issuser(kauth_cred_get())) {
3072 return (EPERM);
3073 }
3074 if (vp == NULLVP)
3075 return EINVAL;
3076
3077 hfsmp = VTOHFS(vp);
3078
3079 /*
3080 * Disabling journaling is disallowed on volumes with directory hard links
3081 * because we have not tested the relevant code path.
3082 */
3083 if (hfsmp->hfs_private_attr[DIR_HARDLINKS].ca_entries != 0){
3084 printf("hfs: cannot disable journaling on volumes with directory hardlinks\n");
3085 return EPERM;
3086 }
3087
3088 printf("hfs: disabling journaling for mount @ %p\n", vnode_mount(vp));
3089
3090 hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK);
3091
3092 // Lights out for you buddy!
3093 journal_close(hfsmp->jnl);
3094 hfsmp->jnl = NULL;
3095
3096 if (hfsmp->jvp && hfsmp->jvp != hfsmp->hfs_devvp) {
3097 vnode_clearmountedon(hfsmp->jvp);
3098 VNOP_CLOSE(hfsmp->jvp, hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE, vfs_context_kernel());
3099 vnode_put(hfsmp->jvp);
3100 }
3101 hfsmp->jvp = NULL;
3102 vfs_clearflags(hfsmp->hfs_mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
3103 hfsmp->jnl_start = 0;
3104 hfsmp->hfs_jnlinfoblkid = 0;
3105 hfsmp->hfs_jnlfileid = 0;
3106
3107 HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeJournaledMask;
3108
3109 hfs_unlock_global (hfsmp);
3110
3111 hfs_flushvolumeheader(hfsmp, MNT_WAIT, 1);
3112
3113 {
3114 fsid_t fsid;
3115
3116 fsid.val[0] = (int32_t)hfsmp->hfs_raw_dev;
3117 fsid.val[1] = (int32_t)vfs_typenum(HFSTOVFS(hfsmp));
3118 vfs_event_signal(&fsid, VQ_UPDATE, (intptr_t)NULL);
3119 }
3120 return 0;
3121 } else if (name[0] == HFS_GET_JOURNAL_INFO) {
3122 vnode_t vp = vfs_context_cwd(context);
3123 off_t jnl_start, jnl_size;
3124
3125 if (vp == NULLVP)
3126 return EINVAL;
3127
3128 /* 64-bit processes won't work with this sysctl -- can't fit a pointer into an int! */
3129 if (proc_is64bit(current_proc()))
3130 return EINVAL;
3131
3132 hfsmp = VTOHFS(vp);
3133 if (hfsmp->jnl == NULL) {
3134 jnl_start = 0;
3135 jnl_size = 0;
3136 } else {
3137 jnl_start = (off_t)(hfsmp->jnl_start * HFSTOVCB(hfsmp)->blockSize) + (off_t)HFSTOVCB(hfsmp)->hfsPlusIOPosOffset;
3138 jnl_size = (off_t)hfsmp->jnl_size;
3139 }
3140
3141 if ((error = copyout((caddr_t)&jnl_start, CAST_USER_ADDR_T(name[1]), sizeof(off_t))) != 0) {
3142 return error;
3143 }
3144 if ((error = copyout((caddr_t)&jnl_size, CAST_USER_ADDR_T(name[2]), sizeof(off_t))) != 0) {
3145 return error;
3146 }
3147
3148 return 0;
3149 } else if (name[0] == HFS_SET_PKG_EXTENSIONS) {
3150
3151 return set_package_extensions_table((user_addr_t)((unsigned)name[1]), name[2], name[3]);
3152
3153 } else if (name[0] == VFS_CTL_QUERY) {
3154 struct sysctl_req *req;
3155 union union_vfsidctl vc;
3156 struct mount *mp;
3157 struct vfsquery vq;
3158
3159 req = CAST_DOWN(struct sysctl_req *, oldp); /* we're new style vfs sysctl. */
3160 if (req == NULL) {
3161 return EFAULT;
3162 }
3163
3164 error = SYSCTL_IN(req, &vc, proc_is64bit(p)? sizeof(vc.vc64):sizeof(vc.vc32));
3165 if (error) return (error);
3166
3167 mp = vfs_getvfs(&vc.vc32.vc_fsid); /* works for 32 and 64 */
3168 if (mp == NULL) return (ENOENT);
3169
3170 hfsmp = VFSTOHFS(mp);
3171 bzero(&vq, sizeof(vq));
3172 vq.vq_flags = hfsmp->hfs_notification_conditions;
3173 return SYSCTL_OUT(req, &vq, sizeof(vq));;
3174 } else if (name[0] == HFS_REPLAY_JOURNAL) {
3175 vnode_t devvp = NULL;
3176 int device_fd;
3177 if (namelen != 2) {
3178 return (EINVAL);
3179 }
3180 device_fd = name[1];
3181 error = file_vnode(device_fd, &devvp);
3182 if (error) {
3183 return error;
3184 }
3185 error = vnode_getwithref(devvp);
3186 if (error) {
3187 file_drop(device_fd);
3188 return error;
3189 }
3190 error = hfs_journal_replay(devvp, context);
3191 file_drop(device_fd);
3192 vnode_put(devvp);
3193 return error;
3194 } else if (name[0] == HFS_ENABLE_RESIZE_DEBUG) {
3195 hfs_resize_debug = 1;
3196 printf ("hfs_sysctl: Enabled volume resize debugging.\n");
3197 return 0;
3198 }
3199
3200 return (ENOTSUP);
3201 }
3202
3203 /*
3204 * hfs_vfs_vget is not static since it is used in hfs_readwrite.c to support
3205 * the build_path ioctl. We use it to leverage the code below that updates
3206 * the origin list cache if necessary
3207 */
3208
3209 int
3210 hfs_vfs_vget(struct mount *mp, ino64_t ino, struct vnode **vpp, __unused vfs_context_t context)
3211 {
3212 int error;
3213 int lockflags;
3214 struct hfsmount *hfsmp;
3215
3216 hfsmp = VFSTOHFS(mp);
3217
3218 error = hfs_vget(hfsmp, (cnid_t)ino, vpp, 1, 0);
3219 if (error)
3220 return (error);
3221
3222 /*
3223 * ADLs may need to have their origin state updated
3224 * since build_path needs a valid parent. The same is true
3225 * for hardlinked files as well. There isn't a race window here
3226 * in re-acquiring the cnode lock since we aren't pulling any data
3227 * out of the cnode; instead, we're going to the catalog.
3228 */
3229 if ((VTOC(*vpp)->c_flag & C_HARDLINK) &&
3230 (hfs_lock(VTOC(*vpp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) == 0)) {
3231 cnode_t *cp = VTOC(*vpp);
3232 struct cat_desc cdesc;
3233
3234 if (!hfs_haslinkorigin(cp)) {
3235 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
3236 error = cat_findname(hfsmp, (cnid_t)ino, &cdesc);
3237 hfs_systemfile_unlock(hfsmp, lockflags);
3238 if (error == 0) {
3239 if ((cdesc.cd_parentcnid != hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) &&
3240 (cdesc.cd_parentcnid != hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid)) {
3241 hfs_savelinkorigin(cp, cdesc.cd_parentcnid);
3242 }
3243 cat_releasedesc(&cdesc);
3244 }
3245 }
3246 hfs_unlock(cp);
3247 }
3248 return (0);
3249 }
3250
3251
3252 /*
3253 * Look up an HFS object by ID.
3254 *
3255 * The object is returned with an iocount reference and the cnode locked.
3256 *
3257 * If the object is a file then it will represent the data fork.
3258 */
3259 int
3260 hfs_vget(struct hfsmount *hfsmp, cnid_t cnid, struct vnode **vpp, int skiplock, int allow_deleted)
3261 {
3262 struct vnode *vp = NULLVP;
3263 struct cat_desc cndesc;
3264 struct cat_attr cnattr;
3265 struct cat_fork cnfork;
3266 u_int32_t linkref = 0;
3267 int error;
3268
3269 /* Check for cnids that should't be exported. */
3270 if ((cnid < kHFSFirstUserCatalogNodeID) &&
3271 (cnid != kHFSRootFolderID && cnid != kHFSRootParentID)) {
3272 return (ENOENT);
3273 }
3274 /* Don't export our private directories. */
3275 if (cnid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid ||
3276 cnid == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) {
3277 return (ENOENT);
3278 }
3279 /*
3280 * Check the hash first
3281 */
3282 vp = hfs_chash_getvnode(hfsmp, cnid, 0, skiplock, allow_deleted);
3283 if (vp) {
3284 *vpp = vp;
3285 return(0);
3286 }
3287
3288 bzero(&cndesc, sizeof(cndesc));
3289 bzero(&cnattr, sizeof(cnattr));
3290 bzero(&cnfork, sizeof(cnfork));
3291
3292 /*
3293 * Not in hash, lookup in catalog
3294 */
3295 if (cnid == kHFSRootParentID) {
3296 static char hfs_rootname[] = "/";
3297
3298 cndesc.cd_nameptr = (const u_int8_t *)&hfs_rootname[0];
3299 cndesc.cd_namelen = 1;
3300 cndesc.cd_parentcnid = kHFSRootParentID;
3301 cndesc.cd_cnid = kHFSRootFolderID;
3302 cndesc.cd_flags = CD_ISDIR;
3303
3304 cnattr.ca_fileid = kHFSRootFolderID;
3305 cnattr.ca_linkcount = 1;
3306 cnattr.ca_entries = 1;
3307 cnattr.ca_dircount = 1;
3308 cnattr.ca_mode = (S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO);
3309 } else {
3310 int lockflags;
3311 cnid_t pid;
3312 const char *nameptr;
3313
3314 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
3315 error = cat_idlookup(hfsmp, cnid, 0, 0, &cndesc, &cnattr, &cnfork);
3316 hfs_systemfile_unlock(hfsmp, lockflags);
3317
3318 if (error) {
3319 *vpp = NULL;
3320 return (error);
3321 }
3322
3323 /*
3324 * Check for a raw hardlink inode and save its linkref.
3325 */
3326 pid = cndesc.cd_parentcnid;
3327 nameptr = (const char *)cndesc.cd_nameptr;
3328
3329 if ((pid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) &&
3330 (bcmp(nameptr, HFS_INODE_PREFIX, HFS_INODE_PREFIX_LEN) == 0)) {
3331 linkref = strtoul(&nameptr[HFS_INODE_PREFIX_LEN], NULL, 10);
3332
3333 } else if ((pid == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) &&
3334 (bcmp(nameptr, HFS_DIRINODE_PREFIX, HFS_DIRINODE_PREFIX_LEN) == 0)) {
3335 linkref = strtoul(&nameptr[HFS_DIRINODE_PREFIX_LEN], NULL, 10);
3336
3337 } else if ((pid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) &&
3338 (bcmp(nameptr, HFS_DELETE_PREFIX, HFS_DELETE_PREFIX_LEN) == 0)) {
3339 *vpp = NULL;
3340 cat_releasedesc(&cndesc);
3341 return (ENOENT); /* open unlinked file */
3342 }
3343 }
3344
3345 /*
3346 * Finish initializing cnode descriptor for hardlinks.
3347 *
3348 * We need a valid name and parent for reverse lookups.
3349 */
3350 if (linkref) {
3351 cnid_t lastid;
3352 struct cat_desc linkdesc;
3353 int linkerr = 0;
3354
3355 cnattr.ca_linkref = linkref;
3356 bzero (&linkdesc, sizeof (linkdesc));
3357
3358 /*
3359 * If the caller supplied the raw inode value, then we don't know exactly
3360 * which hardlink they wanted. It's likely that they acquired the raw inode
3361 * value BEFORE the item became a hardlink, in which case, they probably
3362 * want the oldest link. So request the oldest link from the catalog.
3363 *
3364 * Unfortunately, this requires that we iterate through all N hardlinks. On the plus
3365 * side, since we know that we want the last linkID, we can also have this one
3366 * call give us back the name of the last ID, since it's going to have it in-hand...
3367 */
3368 linkerr = hfs_lookup_lastlink (hfsmp, linkref, &lastid, &linkdesc);
3369 if ((linkerr == 0) && (lastid != 0)) {
3370 /*
3371 * Release any lingering buffers attached to our local descriptor.
3372 * Then copy the name and other business into the cndesc
3373 */
3374 cat_releasedesc (&cndesc);
3375 bcopy (&linkdesc, &cndesc, sizeof(linkdesc));
3376 }
3377 /* If it failed, the linkref code will just use whatever it had in-hand below. */
3378 }
3379
3380 if (linkref) {
3381 int newvnode_flags = 0;
3382
3383 error = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr,
3384 &cnfork, &vp, &newvnode_flags);
3385 if (error == 0) {
3386 VTOC(vp)->c_flag |= C_HARDLINK;
3387 vnode_setmultipath(vp);
3388 }
3389 } else {
3390 struct componentname cn;
3391 int newvnode_flags = 0;
3392
3393 /* Supply hfs_getnewvnode with a component name. */
3394 MALLOC_ZONE(cn.cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
3395 cn.cn_nameiop = LOOKUP;
3396 cn.cn_flags = ISLASTCN | HASBUF;
3397 cn.cn_context = NULL;
3398 cn.cn_pnlen = MAXPATHLEN;
3399 cn.cn_nameptr = cn.cn_pnbuf;
3400 cn.cn_namelen = cndesc.cd_namelen;
3401 cn.cn_hash = 0;
3402 cn.cn_consume = 0;
3403 bcopy(cndesc.cd_nameptr, cn.cn_nameptr, cndesc.cd_namelen + 1);
3404
3405 error = hfs_getnewvnode(hfsmp, NULLVP, &cn, &cndesc, 0, &cnattr,
3406 &cnfork, &vp, &newvnode_flags);
3407
3408 if (error == 0 && (VTOC(vp)->c_flag & C_HARDLINK)) {
3409 hfs_savelinkorigin(VTOC(vp), cndesc.cd_parentcnid);
3410 }
3411 FREE_ZONE(cn.cn_pnbuf, cn.cn_pnlen, M_NAMEI);
3412 }
3413 cat_releasedesc(&cndesc);
3414
3415 *vpp = vp;
3416 if (vp && skiplock) {
3417 hfs_unlock(VTOC(vp));
3418 }
3419 return (error);
3420 }
3421
3422
3423 /*
3424 * Flush out all the files in a filesystem.
3425 */
3426 static int
3427 #if QUOTA
3428 hfs_flushfiles(struct mount *mp, int flags, struct proc *p)
3429 #else
3430 hfs_flushfiles(struct mount *mp, int flags, __unused struct proc *p)
3431 #endif /* QUOTA */
3432 {
3433 struct hfsmount *hfsmp;
3434 struct vnode *skipvp = NULLVP;
3435 int error;
3436 int accounted_root_usecounts;
3437 #if QUOTA
3438 int i;
3439 #endif
3440
3441 hfsmp = VFSTOHFS(mp);
3442
3443 accounted_root_usecounts = 0;
3444 #if QUOTA
3445 /*
3446 * The open quota files have an indirect reference on
3447 * the root directory vnode. We must account for this
3448 * extra reference when doing the intial vflush.
3449 */
3450 if (((unsigned int)vfs_flags(mp)) & MNT_QUOTA) {
3451 /* Find out how many quota files we have open. */
3452 for (i = 0; i < MAXQUOTAS; i++) {
3453 if (hfsmp->hfs_qfiles[i].qf_vp != NULLVP)
3454 ++accounted_root_usecounts;
3455 }
3456 }
3457 #endif /* QUOTA */
3458
3459 if (accounted_root_usecounts > 0) {
3460 /* Obtain the root vnode so we can skip over it. */
3461 skipvp = hfs_chash_getvnode(hfsmp, kHFSRootFolderID, 0, 0, 0);
3462 }
3463
3464 error = vflush(mp, skipvp, SKIPSYSTEM | SKIPSWAP | flags);
3465 if (error != 0)
3466 return(error);
3467
3468 error = vflush(mp, skipvp, SKIPSYSTEM | flags);
3469
3470 if (skipvp) {
3471 /*
3472 * See if there are additional references on the
3473 * root vp besides the ones obtained from the open
3474 * quota files and CoreStorage.
3475 */
3476 if ((error == 0) &&
3477 (vnode_isinuse(skipvp, accounted_root_usecounts))) {
3478 error = EBUSY; /* root directory is still open */
3479 }
3480 hfs_unlock(VTOC(skipvp));
3481 /* release the iocount from the hfs_chash_getvnode call above. */
3482 vnode_put(skipvp);
3483 }
3484 if (error && (flags & FORCECLOSE) == 0)
3485 return (error);
3486
3487 #if QUOTA
3488 if (((unsigned int)vfs_flags(mp)) & MNT_QUOTA) {
3489 for (i = 0; i < MAXQUOTAS; i++) {
3490 if (hfsmp->hfs_qfiles[i].qf_vp == NULLVP)
3491 continue;
3492 hfs_quotaoff(p, mp, i);
3493 }
3494 }
3495 #endif /* QUOTA */
3496
3497 if (skipvp) {
3498 error = vflush(mp, NULLVP, SKIPSYSTEM | flags);
3499 }
3500
3501 return (error);
3502 }
3503
3504 /*
3505 * Update volume encoding bitmap (HFS Plus only)
3506 *
3507 * Mark a legacy text encoding as in-use (as needed)
3508 * in the volume header of this HFS+ filesystem.
3509 */
3510 __private_extern__
3511 void
3512 hfs_setencodingbits(struct hfsmount *hfsmp, u_int32_t encoding)
3513 {
3514 #define kIndexMacUkrainian 48 /* MacUkrainian encoding is 152 */
3515 #define kIndexMacFarsi 49 /* MacFarsi encoding is 140 */
3516
3517 u_int32_t index;
3518
3519 switch (encoding) {
3520 case kTextEncodingMacUkrainian:
3521 index = kIndexMacUkrainian;
3522 break;
3523 case kTextEncodingMacFarsi:
3524 index = kIndexMacFarsi;
3525 break;
3526 default:
3527 index = encoding;
3528 break;
3529 }
3530
3531 /* Only mark the encoding as in-use if it wasn't already set */
3532 if (index < 64 && (hfsmp->encodingsBitmap & (u_int64_t)(1ULL << index)) == 0) {
3533 hfs_lock_mount (hfsmp);
3534 hfsmp->encodingsBitmap |= (u_int64_t)(1ULL << index);
3535 MarkVCBDirty(hfsmp);
3536 hfs_unlock_mount(hfsmp);
3537 }
3538 }
3539
3540 /*
3541 * Update volume stats
3542 *
3543 * On journal volumes this will cause a volume header flush
3544 */
3545 int
3546 hfs_volupdate(struct hfsmount *hfsmp, enum volop op, int inroot)
3547 {
3548 struct timeval tv;
3549
3550 microtime(&tv);
3551
3552 hfs_lock_mount (hfsmp);
3553
3554 MarkVCBDirty(hfsmp);
3555 hfsmp->hfs_mtime = tv.tv_sec;
3556
3557 switch (op) {
3558 case VOL_UPDATE:
3559 break;
3560 case VOL_MKDIR:
3561 if (hfsmp->hfs_dircount != 0xFFFFFFFF)
3562 ++hfsmp->hfs_dircount;
3563 if (inroot && hfsmp->vcbNmRtDirs != 0xFFFF)
3564 ++hfsmp->vcbNmRtDirs;
3565 break;
3566 case VOL_RMDIR:
3567 if (hfsmp->hfs_dircount != 0)
3568 --hfsmp->hfs_dircount;
3569 if (inroot && hfsmp->vcbNmRtDirs != 0xFFFF)
3570 --hfsmp->vcbNmRtDirs;
3571 break;
3572 case VOL_MKFILE:
3573 if (hfsmp->hfs_filecount != 0xFFFFFFFF)
3574 ++hfsmp->hfs_filecount;
3575 if (inroot && hfsmp->vcbNmFls != 0xFFFF)
3576 ++hfsmp->vcbNmFls;
3577 break;
3578 case VOL_RMFILE:
3579 if (hfsmp->hfs_filecount != 0)
3580 --hfsmp->hfs_filecount;
3581 if (inroot && hfsmp->vcbNmFls != 0xFFFF)
3582 --hfsmp->vcbNmFls;
3583 break;
3584 }
3585
3586 hfs_unlock_mount (hfsmp);
3587
3588 if (hfsmp->jnl) {
3589 hfs_flushvolumeheader(hfsmp, 0, 0);
3590 }
3591
3592 return (0);
3593 }
3594
3595
3596 #if CONFIG_HFS_STD
3597 /* HFS Standard MDB flush */
3598 static int
3599 hfs_flushMDB(struct hfsmount *hfsmp, int waitfor, int altflush)
3600 {
3601 ExtendedVCB *vcb = HFSTOVCB(hfsmp);
3602 struct filefork *fp;
3603 HFSMasterDirectoryBlock *mdb;
3604 struct buf *bp = NULL;
3605 int retval;
3606 int sector_size;
3607 ByteCount namelen;
3608
3609 sector_size = hfsmp->hfs_logical_block_size;
3610 retval = (int)buf_bread(hfsmp->hfs_devvp, (daddr64_t)HFS_PRI_SECTOR(sector_size), sector_size, NOCRED, &bp);
3611 if (retval) {
3612 if (bp)
3613 buf_brelse(bp);
3614 return retval;
3615 }
3616
3617 hfs_lock_mount (hfsmp);
3618
3619 mdb = (HFSMasterDirectoryBlock *)(buf_dataptr(bp) + HFS_PRI_OFFSET(sector_size));
3620
3621 mdb->drCrDate = SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->hfs_itime)));
3622 mdb->drLsMod = SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->vcbLsMod)));
3623 mdb->drAtrb = SWAP_BE16 (vcb->vcbAtrb);
3624 mdb->drNmFls = SWAP_BE16 (vcb->vcbNmFls);
3625 mdb->drAllocPtr = SWAP_BE16 (vcb->nextAllocation);
3626 mdb->drClpSiz = SWAP_BE32 (vcb->vcbClpSiz);
3627 mdb->drNxtCNID = SWAP_BE32 (vcb->vcbNxtCNID);
3628 mdb->drFreeBks = SWAP_BE16 (vcb->freeBlocks);
3629
3630 namelen = strlen((char *)vcb->vcbVN);
3631 retval = utf8_to_hfs(vcb, namelen, vcb->vcbVN, mdb->drVN);
3632 /* Retry with MacRoman in case that's how it was exported. */
3633 if (retval)
3634 retval = utf8_to_mac_roman(namelen, vcb->vcbVN, mdb->drVN);
3635
3636 mdb->drVolBkUp = SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->vcbVolBkUp)));
3637 mdb->drWrCnt = SWAP_BE32 (vcb->vcbWrCnt);
3638 mdb->drNmRtDirs = SWAP_BE16 (vcb->vcbNmRtDirs);
3639 mdb->drFilCnt = SWAP_BE32 (vcb->vcbFilCnt);
3640 mdb->drDirCnt = SWAP_BE32 (vcb->vcbDirCnt);
3641
3642 bcopy(vcb->vcbFndrInfo, mdb->drFndrInfo, sizeof(mdb->drFndrInfo));
3643
3644 fp = VTOF(vcb->extentsRefNum);
3645 mdb->drXTExtRec[0].startBlock = SWAP_BE16 (fp->ff_extents[0].startBlock);
3646 mdb->drXTExtRec[0].blockCount = SWAP_BE16 (fp->ff_extents[0].blockCount);
3647 mdb->drXTExtRec[1].startBlock = SWAP_BE16 (fp->ff_extents[1].startBlock);
3648 mdb->drXTExtRec[1].blockCount = SWAP_BE16 (fp->ff_extents[1].blockCount);
3649 mdb->drXTExtRec[2].startBlock = SWAP_BE16 (fp->ff_extents[2].startBlock);
3650 mdb->drXTExtRec[2].blockCount = SWAP_BE16 (fp->ff_extents[2].blockCount);
3651 mdb->drXTFlSize = SWAP_BE32 (fp->ff_blocks * vcb->blockSize);
3652 mdb->drXTClpSiz = SWAP_BE32 (fp->ff_clumpsize);
3653 FTOC(fp)->c_flag &= ~C_MODIFIED;
3654
3655 fp = VTOF(vcb->catalogRefNum);
3656 mdb->drCTExtRec[0].startBlock = SWAP_BE16 (fp->ff_extents[0].startBlock);
3657 mdb->drCTExtRec[0].blockCount = SWAP_BE16 (fp->ff_extents[0].blockCount);
3658 mdb->drCTExtRec[1].startBlock = SWAP_BE16 (fp->ff_extents[1].startBlock);
3659 mdb->drCTExtRec[1].blockCount = SWAP_BE16 (fp->ff_extents[1].blockCount);
3660 mdb->drCTExtRec[2].startBlock = SWAP_BE16 (fp->ff_extents[2].startBlock);
3661 mdb->drCTExtRec[2].blockCount = SWAP_BE16 (fp->ff_extents[2].blockCount);
3662 mdb->drCTFlSize = SWAP_BE32 (fp->ff_blocks * vcb->blockSize);
3663 mdb->drCTClpSiz = SWAP_BE32 (fp->ff_clumpsize);
3664 FTOC(fp)->c_flag &= ~C_MODIFIED;
3665
3666 MarkVCBClean( vcb );
3667
3668 hfs_unlock_mount (hfsmp);
3669
3670 /* If requested, flush out the alternate MDB */
3671 if (altflush) {
3672 struct buf *alt_bp = NULL;
3673
3674 if (buf_meta_bread(hfsmp->hfs_devvp, hfsmp->hfs_partition_avh_sector, sector_size, NOCRED, &alt_bp) == 0) {
3675 bcopy(mdb, (char *)buf_dataptr(alt_bp) + HFS_ALT_OFFSET(sector_size), kMDBSize);
3676
3677 (void) VNOP_BWRITE(alt_bp);
3678 } else if (alt_bp)
3679 buf_brelse(alt_bp);
3680 }
3681
3682 if (waitfor != MNT_WAIT)
3683 buf_bawrite(bp);
3684 else
3685 retval = VNOP_BWRITE(bp);
3686
3687 return (retval);
3688 }
3689 #endif
3690
3691 /*
3692 * Flush any dirty in-memory mount data to the on-disk
3693 * volume header.
3694 *
3695 * Note: the on-disk volume signature is intentionally
3696 * not flushed since the on-disk "H+" and "HX" signatures
3697 * are always stored in-memory as "H+".
3698 */
3699 int
3700 hfs_flushvolumeheader(struct hfsmount *hfsmp, int waitfor, int altflush)
3701 {
3702 ExtendedVCB *vcb = HFSTOVCB(hfsmp);
3703 struct filefork *fp;
3704 HFSPlusVolumeHeader *volumeHeader, *altVH;
3705 int retval;
3706 struct buf *bp, *alt_bp;
3707 int i;
3708 daddr64_t priIDSector;
3709 int critical;
3710 u_int16_t signature;
3711 u_int16_t hfsversion;
3712 daddr64_t avh_sector;
3713
3714 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
3715 return(0);
3716 }
3717 #if CONFIG_HFS_STD
3718 if (hfsmp->hfs_flags & HFS_STANDARD) {
3719 return hfs_flushMDB(hfsmp, waitfor, altflush);
3720 }
3721 #endif
3722 critical = altflush;
3723 priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
3724 HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size));
3725
3726 if (hfs_start_transaction(hfsmp) != 0) {
3727 return EINVAL;
3728 }
3729
3730 bp = NULL;
3731 alt_bp = NULL;
3732
3733 retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
3734 HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys),
3735 hfsmp->hfs_physical_block_size, NOCRED, &bp);
3736 if (retval) {
3737 printf("hfs: err %d reading VH blk (vol=%s)\n", retval, vcb->vcbVN);
3738 goto err_exit;
3739 }
3740
3741 volumeHeader = (HFSPlusVolumeHeader *)((char *)buf_dataptr(bp) +
3742 HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size));
3743
3744 /*
3745 * Sanity check what we just read. If it's bad, try the alternate
3746 * instead.
3747 */
3748 signature = SWAP_BE16 (volumeHeader->signature);
3749 hfsversion = SWAP_BE16 (volumeHeader->version);
3750 if ((signature != kHFSPlusSigWord && signature != kHFSXSigWord) ||
3751 (hfsversion < kHFSPlusVersion) || (hfsversion > 100) ||
3752 (SWAP_BE32 (volumeHeader->blockSize) != vcb->blockSize)) {
3753 printf("hfs: corrupt VH on %s, sig 0x%04x, ver %d, blksize %d\n",
3754 vcb->vcbVN, signature, hfsversion,
3755 SWAP_BE32 (volumeHeader->blockSize));
3756 hfs_mark_inconsistent(hfsmp, HFS_INCONSISTENCY_DETECTED);
3757
3758 /* Almost always we read AVH relative to the partition size */
3759 avh_sector = hfsmp->hfs_partition_avh_sector;
3760
3761 if (hfsmp->hfs_partition_avh_sector != hfsmp->hfs_fs_avh_sector) {
3762 /*
3763 * The two altVH offsets do not match --- which means that a smaller file
3764 * system exists in a larger partition. Verify that we have the correct
3765 * alternate volume header sector as per the current parititon size.
3766 * The GPT device that we are mounted on top could have changed sizes
3767 * without us knowing.
3768 *
3769 * We're in a transaction, so it's safe to modify the partition_avh_sector
3770 * field if necessary.
3771 */
3772
3773 uint64_t sector_count;
3774
3775 /* Get underlying device block count */
3776 if ((retval = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCGETBLOCKCOUNT,
3777 (caddr_t)&sector_count, 0, vfs_context_current()))) {
3778 printf("hfs_flushVH: err %d getting block count (%s) \n", retval, vcb->vcbVN);
3779 retval = ENXIO;
3780 goto err_exit;
3781 }
3782
3783 /* Partition size was changed without our knowledge */
3784 if (sector_count != (uint64_t)hfsmp->hfs_logical_block_count) {
3785 hfsmp->hfs_partition_avh_sector = (hfsmp->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
3786 HFS_ALT_SECTOR(hfsmp->hfs_logical_block_size, sector_count);
3787 /* Note: hfs_fs_avh_sector will remain unchanged */
3788 printf ("hfs_flushVH: partition size changed, partition_avh_sector=%qu, fs_avh_sector=%qu\n",
3789 hfsmp->hfs_partition_avh_sector, hfsmp->hfs_fs_avh_sector);
3790
3791 /*
3792 * We just updated the offset for AVH relative to
3793 * the partition size, so the content of that AVH
3794 * will be invalid. But since we are also maintaining
3795 * a valid AVH relative to the file system size, we
3796 * can read it since primary VH and partition AVH
3797 * are not valid.
3798 */
3799 avh_sector = hfsmp->hfs_fs_avh_sector;
3800 }
3801 }
3802
3803 printf ("hfs: trying alternate (for %s) avh_sector=%qu\n",
3804 (avh_sector == hfsmp->hfs_fs_avh_sector) ? "file system" : "partition", avh_sector);
3805
3806 if (avh_sector) {
3807 retval = buf_meta_bread(hfsmp->hfs_devvp,
3808 HFS_PHYSBLK_ROUNDDOWN(avh_sector, hfsmp->hfs_log_per_phys),
3809 hfsmp->hfs_physical_block_size, NOCRED, &alt_bp);
3810 if (retval) {
3811 printf("hfs: err %d reading alternate VH (%s)\n", retval, vcb->vcbVN);
3812 goto err_exit;
3813 }
3814
3815 altVH = (HFSPlusVolumeHeader *)((char *)buf_dataptr(alt_bp) +
3816 HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size));
3817 signature = SWAP_BE16(altVH->signature);
3818 hfsversion = SWAP_BE16(altVH->version);
3819
3820 if ((signature != kHFSPlusSigWord && signature != kHFSXSigWord) ||
3821 (hfsversion < kHFSPlusVersion) || (kHFSPlusVersion > 100) ||
3822 (SWAP_BE32(altVH->blockSize) != vcb->blockSize)) {
3823 printf("hfs: corrupt alternate VH on %s, sig 0x%04x, ver %d, blksize %d\n",
3824 vcb->vcbVN, signature, hfsversion,
3825 SWAP_BE32(altVH->blockSize));
3826 retval = EIO;
3827 goto err_exit;
3828 }
3829
3830 /* The alternate is plausible, so use it. */
3831 bcopy(altVH, volumeHeader, kMDBSize);
3832 buf_brelse(alt_bp);
3833 alt_bp = NULL;
3834 } else {
3835 /* No alternate VH, nothing more we can do. */
3836 retval = EIO;
3837 goto err_exit;
3838 }
3839 }
3840
3841 if (hfsmp->jnl) {
3842 journal_modify_block_start(hfsmp->jnl, bp);
3843 }
3844
3845 /*
3846 * For embedded HFS+ volumes, update create date if it changed
3847 * (ie from a setattrlist call)
3848 */
3849 if ((vcb->hfsPlusIOPosOffset != 0) &&
3850 (SWAP_BE32 (volumeHeader->createDate) != vcb->localCreateDate)) {
3851 struct buf *bp2;
3852 HFSMasterDirectoryBlock *mdb;
3853
3854 retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
3855 HFS_PHYSBLK_ROUNDDOWN(HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size), hfsmp->hfs_log_per_phys),
3856 hfsmp->hfs_physical_block_size, NOCRED, &bp2);
3857 if (retval) {
3858 if (bp2)
3859 buf_brelse(bp2);
3860 retval = 0;
3861 } else {
3862 mdb = (HFSMasterDirectoryBlock *)(buf_dataptr(bp2) +
3863 HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size));
3864
3865 if ( SWAP_BE32 (mdb->drCrDate) != vcb->localCreateDate )
3866 {
3867 if (hfsmp->jnl) {
3868 journal_modify_block_start(hfsmp->jnl, bp2);
3869 }
3870
3871 mdb->drCrDate = SWAP_BE32 (vcb->localCreateDate); /* pick up the new create date */
3872
3873 if (hfsmp->jnl) {
3874 journal_modify_block_end(hfsmp->jnl, bp2, NULL, NULL);
3875 } else {
3876 (void) VNOP_BWRITE(bp2); /* write out the changes */
3877 }
3878 }
3879 else
3880 {
3881 buf_brelse(bp2); /* just release it */
3882 }
3883 }
3884 }
3885
3886 hfs_lock_mount (hfsmp);
3887
3888 /* Note: only update the lower 16 bits worth of attributes */
3889 volumeHeader->attributes = SWAP_BE32 (vcb->vcbAtrb);
3890 volumeHeader->journalInfoBlock = SWAP_BE32 (vcb->vcbJinfoBlock);
3891 if (hfsmp->jnl) {
3892 volumeHeader->lastMountedVersion = SWAP_BE32 (kHFSJMountVersion);
3893 } else {
3894 volumeHeader->lastMountedVersion = SWAP_BE32 (kHFSPlusMountVersion);
3895 }
3896 volumeHeader->createDate = SWAP_BE32 (vcb->localCreateDate); /* volume create date is in local time */
3897 volumeHeader->modifyDate = SWAP_BE32 (to_hfs_time(vcb->vcbLsMod));
3898 volumeHeader->backupDate = SWAP_BE32 (to_hfs_time(vcb->vcbVolBkUp));
3899 volumeHeader->fileCount = SWAP_BE32 (vcb->vcbFilCnt);
3900 volumeHeader->folderCount = SWAP_BE32 (vcb->vcbDirCnt);
3901 volumeHeader->totalBlocks = SWAP_BE32 (vcb->totalBlocks);
3902 volumeHeader->freeBlocks = SWAP_BE32 (vcb->freeBlocks);
3903 volumeHeader->nextAllocation = SWAP_BE32 (vcb->nextAllocation);
3904 volumeHeader->rsrcClumpSize = SWAP_BE32 (vcb->vcbClpSiz);
3905 volumeHeader->dataClumpSize = SWAP_BE32 (vcb->vcbClpSiz);
3906 volumeHeader->nextCatalogID = SWAP_BE32 (vcb->vcbNxtCNID);
3907 volumeHeader->writeCount = SWAP_BE32 (vcb->vcbWrCnt);
3908 volumeHeader->encodingsBitmap = SWAP_BE64 (vcb->encodingsBitmap);
3909
3910 if (bcmp(vcb->vcbFndrInfo, volumeHeader->finderInfo, sizeof(volumeHeader->finderInfo)) != 0) {
3911 bcopy(vcb->vcbFndrInfo, volumeHeader->finderInfo, sizeof(volumeHeader->finderInfo));
3912 critical = 1;
3913 }
3914
3915 /*
3916 * System files are only dirty when altflush is set.
3917 */
3918 if (altflush == 0) {
3919 goto done;
3920 }
3921
3922 /* Sync Extents over-flow file meta data */
3923 fp = VTOF(vcb->extentsRefNum);
3924 if (FTOC(fp)->c_flag & C_MODIFIED) {
3925 for (i = 0; i < kHFSPlusExtentDensity; i++) {
3926 volumeHeader->extentsFile.extents[i].startBlock =
3927 SWAP_BE32 (fp->ff_extents[i].startBlock);
3928 volumeHeader->extentsFile.extents[i].blockCount =
3929 SWAP_BE32 (fp->ff_extents[i].blockCount);
3930 }
3931 volumeHeader->extentsFile.logicalSize = SWAP_BE64 (fp->ff_size);
3932 volumeHeader->extentsFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
3933 volumeHeader->extentsFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize);
3934 FTOC(fp)->c_flag &= ~C_MODIFIED;
3935 }
3936
3937 /* Sync Catalog file meta data */
3938 fp = VTOF(vcb->catalogRefNum);
3939 if (FTOC(fp)->c_flag & C_MODIFIED) {
3940 for (i = 0; i < kHFSPlusExtentDensity; i++) {
3941 volumeHeader->catalogFile.extents[i].startBlock =
3942 SWAP_BE32 (fp->ff_extents[i].startBlock);
3943 volumeHeader->catalogFile.extents[i].blockCount =
3944 SWAP_BE32 (fp->ff_extents[i].blockCount);
3945 }
3946 volumeHeader->catalogFile.logicalSize = SWAP_BE64 (fp->ff_size);
3947 volumeHeader->catalogFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
3948 volumeHeader->catalogFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize);
3949 FTOC(fp)->c_flag &= ~C_MODIFIED;
3950 }
3951
3952 /* Sync Allocation file meta data */
3953 fp = VTOF(vcb->allocationsRefNum);
3954 if (FTOC(fp)->c_flag & C_MODIFIED) {
3955 for (i = 0; i < kHFSPlusExtentDensity; i++) {
3956 volumeHeader->allocationFile.extents[i].startBlock =
3957 SWAP_BE32 (fp->ff_extents[i].startBlock);
3958 volumeHeader->allocationFile.extents[i].blockCount =
3959 SWAP_BE32 (fp->ff_extents[i].blockCount);
3960 }
3961 volumeHeader->allocationFile.logicalSize = SWAP_BE64 (fp->ff_size);
3962 volumeHeader->allocationFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
3963 volumeHeader->allocationFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize);
3964 FTOC(fp)->c_flag &= ~C_MODIFIED;
3965 }
3966
3967 /* Sync Attribute file meta data */
3968 if (hfsmp->hfs_attribute_vp) {
3969 fp = VTOF(hfsmp->hfs_attribute_vp);
3970 for (i = 0; i < kHFSPlusExtentDensity; i++) {
3971 volumeHeader->attributesFile.extents[i].startBlock =
3972 SWAP_BE32 (fp->ff_extents[i].startBlock);
3973 volumeHeader->attributesFile.extents[i].blockCount =
3974 SWAP_BE32 (fp->ff_extents[i].blockCount);
3975 }
3976 FTOC(fp)->c_flag &= ~C_MODIFIED;
3977 volumeHeader->attributesFile.logicalSize = SWAP_BE64 (fp->ff_size);
3978 volumeHeader->attributesFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
3979 volumeHeader->attributesFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize);
3980 }
3981
3982 /* Sync Startup file meta data */
3983 if (hfsmp->hfs_startup_vp) {
3984 fp = VTOF(hfsmp->hfs_startup_vp);
3985 if (FTOC(fp)->c_flag & C_MODIFIED) {
3986 for (i = 0; i < kHFSPlusExtentDensity; i++) {
3987 volumeHeader->startupFile.extents[i].startBlock =
3988 SWAP_BE32 (fp->ff_extents[i].startBlock);
3989 volumeHeader->startupFile.extents[i].blockCount =
3990 SWAP_BE32 (fp->ff_extents[i].blockCount);
3991 }
3992 volumeHeader->startupFile.logicalSize = SWAP_BE64 (fp->ff_size);
3993 volumeHeader->startupFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
3994 volumeHeader->startupFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize);
3995 FTOC(fp)->c_flag &= ~C_MODIFIED;
3996 }
3997 }
3998
3999 done:
4000 MarkVCBClean(hfsmp);
4001 hfs_unlock_mount (hfsmp);
4002
4003 /* If requested, flush out the alternate volume header */
4004 if (altflush) {
4005 /*
4006 * The two altVH offsets do not match --- which means that a smaller file
4007 * system exists in a larger partition. Verify that we have the correct
4008 * alternate volume header sector as per the current parititon size.
4009 * The GPT device that we are mounted on top could have changed sizes
4010 * without us knowning.
4011 *
4012 * We're in a transaction, so it's safe to modify the partition_avh_sector
4013 * field if necessary.
4014 */
4015 if (hfsmp->hfs_partition_avh_sector != hfsmp->hfs_fs_avh_sector) {
4016 uint64_t sector_count;
4017
4018 /* Get underlying device block count */
4019 if ((retval = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCGETBLOCKCOUNT,
4020 (caddr_t)&sector_count, 0, vfs_context_current()))) {
4021 printf("hfs_flushVH: err %d getting block count (%s) \n", retval, vcb->vcbVN);
4022 retval = ENXIO;
4023 goto err_exit;
4024 }
4025
4026 /* Partition size was changed without our knowledge */
4027 if (sector_count != (uint64_t)hfsmp->hfs_logical_block_count) {
4028 hfsmp->hfs_partition_avh_sector = (hfsmp->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
4029 HFS_ALT_SECTOR(hfsmp->hfs_logical_block_size, sector_count);
4030 /* Note: hfs_fs_avh_sector will remain unchanged */
4031 printf ("hfs_flushVH: altflush: partition size changed, partition_avh_sector=%qu, fs_avh_sector=%qu\n",
4032 hfsmp->hfs_partition_avh_sector, hfsmp->hfs_fs_avh_sector);
4033 }
4034 }
4035
4036 /*
4037 * First see if we need to write I/O to the "secondary" AVH
4038 * located at FS Size - 1024 bytes, because this one will
4039 * always go into the journal. We put this AVH into the journal
4040 * because even if the filesystem size has shrunk, this LBA should be
4041 * reachable after the partition-size modification has occurred.
4042 * The one where we need to be careful is partitionsize-1024, since the
4043 * partition size should hopefully shrink.
4044 *
4045 * Most of the time this block will not execute.
4046 */
4047 if ((hfsmp->hfs_fs_avh_sector) &&
4048 (hfsmp->hfs_partition_avh_sector != hfsmp->hfs_fs_avh_sector)) {
4049 if (buf_meta_bread(hfsmp->hfs_devvp,
4050 HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_fs_avh_sector, hfsmp->hfs_log_per_phys),
4051 hfsmp->hfs_physical_block_size, NOCRED, &alt_bp) == 0) {
4052 if (hfsmp->jnl) {
4053 journal_modify_block_start(hfsmp->jnl, alt_bp);
4054 }
4055
4056 bcopy(volumeHeader, (char *)buf_dataptr(alt_bp) +
4057 HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size),
4058 kMDBSize);
4059
4060 if (hfsmp->jnl) {
4061 journal_modify_block_end(hfsmp->jnl, alt_bp, NULL, NULL);
4062 } else {
4063 (void) VNOP_BWRITE(alt_bp);
4064 }
4065 } else if (alt_bp) {
4066 buf_brelse(alt_bp);
4067 }
4068 }
4069
4070 /*
4071 * Flush out alternate volume header located at 1024 bytes before
4072 * end of the partition as part of journal transaction. In
4073 * most cases, this will be the only alternate volume header
4074 * that we need to worry about because the file system size is
4075 * same as the partition size, therefore hfs_fs_avh_sector is
4076 * same as hfs_partition_avh_sector. This is the "priority" AVH.
4077 *
4078 * However, do not always put this I/O into the journal. If we skipped the
4079 * FS-Size AVH write above, then we will put this I/O into the journal as
4080 * that indicates the two were in sync. However, if the FS size is
4081 * not the same as the partition size, we are tracking two. We don't
4082 * put it in the journal in that case, since if the partition
4083 * size changes between uptimes, and we need to replay the journal,
4084 * this I/O could generate an EIO if during replay it is now trying
4085 * to access blocks beyond the device EOF.
4086 */
4087 if (hfsmp->hfs_partition_avh_sector) {
4088 if (buf_meta_bread(hfsmp->hfs_devvp,
4089 HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_partition_avh_sector, hfsmp->hfs_log_per_phys),
4090 hfsmp->hfs_physical_block_size, NOCRED, &alt_bp) == 0) {
4091
4092 /* only one AVH, put this I/O in the journal. */
4093 if ((hfsmp->jnl) && (hfsmp->hfs_partition_avh_sector == hfsmp->hfs_fs_avh_sector)) {
4094 journal_modify_block_start(hfsmp->jnl, alt_bp);
4095 }
4096
4097 bcopy(volumeHeader, (char *)buf_dataptr(alt_bp) +
4098 HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size),
4099 kMDBSize);
4100
4101 /* If journaled and we only have one AVH to track */
4102 if ((hfsmp->jnl) && (hfsmp->hfs_partition_avh_sector == hfsmp->hfs_fs_avh_sector)) {
4103 journal_modify_block_end (hfsmp->jnl, alt_bp, NULL, NULL);
4104 } else {
4105 /*
4106 * If we don't have a journal or there are two AVH's at the
4107 * moment, then this one doesn't go in the journal. Note that
4108 * this one may generate I/O errors, since the partition
4109 * can be resized behind our backs at any moment and this I/O
4110 * may now appear to be beyond the device EOF.
4111 */
4112 (void) VNOP_BWRITE(alt_bp);
4113 (void) VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE,
4114 NULL, FWRITE, NULL);
4115 }
4116 } else if (alt_bp) {
4117 buf_brelse(alt_bp);
4118 }
4119 }
4120 }
4121
4122 /* Finish modifying the block for the primary VH */
4123 if (hfsmp->jnl) {
4124 journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL);
4125 } else {
4126 if (waitfor != MNT_WAIT) {
4127 buf_bawrite(bp);
4128 } else {
4129 retval = VNOP_BWRITE(bp);
4130 /* When critical data changes, flush the device cache */
4131 if (critical && (retval == 0)) {
4132 (void) VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE,
4133 NULL, FWRITE, NULL);
4134 }
4135 }
4136 }
4137 hfs_end_transaction(hfsmp);
4138
4139 return (retval);
4140
4141 err_exit:
4142 if (alt_bp)
4143 buf_brelse(alt_bp);
4144 if (bp)
4145 buf_brelse(bp);
4146 hfs_end_transaction(hfsmp);
4147 return retval;
4148 }
4149
4150
4151 /*
4152 * Creates a UUID from a unique "name" in the HFS UUID Name space.
4153 * See version 3 UUID.
4154 */
4155 static void
4156 hfs_getvoluuid(struct hfsmount *hfsmp, uuid_t result)
4157 {
4158 MD5_CTX md5c;
4159 uint8_t rawUUID[8];
4160
4161 ((uint32_t *)rawUUID)[0] = hfsmp->vcbFndrInfo[6];
4162 ((uint32_t *)rawUUID)[1] = hfsmp->vcbFndrInfo[7];
4163
4164 MD5Init( &md5c );
4165 MD5Update( &md5c, HFS_UUID_NAMESPACE_ID, sizeof( uuid_t ) );
4166 MD5Update( &md5c, rawUUID, sizeof (rawUUID) );
4167 MD5Final( result, &md5c );
4168
4169 result[6] = 0x30 | ( result[6] & 0x0F );
4170 result[8] = 0x80 | ( result[8] & 0x3F );
4171 }
4172
4173 /*
4174 * Get file system attributes.
4175 */
4176 static int
4177 hfs_vfs_getattr(struct mount *mp, struct vfs_attr *fsap, __unused vfs_context_t context)
4178 {
4179 #define HFS_ATTR_CMN_VALIDMASK ATTR_CMN_VALIDMASK
4180 #define HFS_ATTR_FILE_VALIDMASK (ATTR_FILE_VALIDMASK & ~(ATTR_FILE_FILETYPE | ATTR_FILE_FORKCOUNT | ATTR_FILE_FORKLIST))
4181 #define HFS_ATTR_CMN_VOL_VALIDMASK (ATTR_CMN_VALIDMASK & ~(ATTR_CMN_ACCTIME))
4182
4183 ExtendedVCB *vcb = VFSTOVCB(mp);
4184 struct hfsmount *hfsmp = VFSTOHFS(mp);
4185 u_int32_t freeCNIDs;
4186
4187 int searchfs_on = 0;
4188 int exchangedata_on = 1;
4189
4190 #if CONFIG_SEARCHFS
4191 searchfs_on = 1;
4192 #endif
4193
4194 #if CONFIG_PROTECT
4195 if (cp_fs_protected(mp)) {
4196 exchangedata_on = 0;
4197 }
4198 #endif
4199
4200 freeCNIDs = (u_int32_t)0xFFFFFFFF - (u_int32_t)hfsmp->vcbNxtCNID;
4201
4202 VFSATTR_RETURN(fsap, f_objcount, (u_int64_t)hfsmp->vcbFilCnt + (u_int64_t)hfsmp->vcbDirCnt);
4203 VFSATTR_RETURN(fsap, f_filecount, (u_int64_t)hfsmp->vcbFilCnt);
4204 VFSATTR_RETURN(fsap, f_dircount, (u_int64_t)hfsmp->vcbDirCnt);
4205 VFSATTR_RETURN(fsap, f_maxobjcount, (u_int64_t)0xFFFFFFFF);
4206 VFSATTR_RETURN(fsap, f_iosize, (size_t)cluster_max_io_size(mp, 0));
4207 VFSATTR_RETURN(fsap, f_blocks, (u_int64_t)hfsmp->totalBlocks);
4208 VFSATTR_RETURN(fsap, f_bfree, (u_int64_t)hfs_freeblks(hfsmp, 0));
4209 VFSATTR_RETURN(fsap, f_bavail, (u_int64_t)hfs_freeblks(hfsmp, 1));
4210 VFSATTR_RETURN(fsap, f_bsize, (u_int32_t)vcb->blockSize);
4211 /* XXX needs clarification */
4212 VFSATTR_RETURN(fsap, f_bused, hfsmp->totalBlocks - hfs_freeblks(hfsmp, 1));
4213 /* Maximum files is constrained by total blocks. */
4214 VFSATTR_RETURN(fsap, f_files, (u_int64_t)(hfsmp->totalBlocks - 2));
4215 VFSATTR_RETURN(fsap, f_ffree, MIN((u_int64_t)freeCNIDs, (u_int64_t)hfs_freeblks(hfsmp, 1)));
4216
4217 fsap->f_fsid.val[0] = hfsmp->hfs_raw_dev;
4218 fsap->f_fsid.val[1] = vfs_typenum(mp);
4219 VFSATTR_SET_SUPPORTED(fsap, f_fsid);
4220
4221 VFSATTR_RETURN(fsap, f_signature, vcb->vcbSigWord);
4222 VFSATTR_RETURN(fsap, f_carbon_fsid, 0);
4223
4224 if (VFSATTR_IS_ACTIVE(fsap, f_capabilities)) {
4225 vol_capabilities_attr_t *cap;
4226
4227 cap = &fsap->f_capabilities;
4228
4229 if ((hfsmp->hfs_flags & HFS_STANDARD) == 0) {
4230 /* HFS+ & variants */
4231 cap->capabilities[VOL_CAPABILITIES_FORMAT] =
4232 VOL_CAP_FMT_PERSISTENTOBJECTIDS |
4233 VOL_CAP_FMT_SYMBOLICLINKS |
4234 VOL_CAP_FMT_HARDLINKS |
4235 VOL_CAP_FMT_JOURNAL |
4236 VOL_CAP_FMT_ZERO_RUNS |
4237 (hfsmp->jnl ? VOL_CAP_FMT_JOURNAL_ACTIVE : 0) |
4238 (hfsmp->hfs_flags & HFS_CASE_SENSITIVE ? VOL_CAP_FMT_CASE_SENSITIVE : 0) |
4239 VOL_CAP_FMT_CASE_PRESERVING |
4240 VOL_CAP_FMT_FAST_STATFS |
4241 VOL_CAP_FMT_2TB_FILESIZE |
4242 VOL_CAP_FMT_HIDDEN_FILES |
4243 #if HFS_COMPRESSION
4244 VOL_CAP_FMT_PATH_FROM_ID |
4245 VOL_CAP_FMT_DECMPFS_COMPRESSION;
4246 #else
4247 VOL_CAP_FMT_PATH_FROM_ID;
4248 #endif
4249 }
4250 #if CONFIG_HFS_STD
4251 else {
4252 /* HFS standard */
4253 cap->capabilities[VOL_CAPABILITIES_FORMAT] =
4254 VOL_CAP_FMT_PERSISTENTOBJECTIDS |
4255 VOL_CAP_FMT_CASE_PRESERVING |
4256 VOL_CAP_FMT_FAST_STATFS |
4257 VOL_CAP_FMT_HIDDEN_FILES |
4258 VOL_CAP_FMT_PATH_FROM_ID;
4259 }
4260 #endif
4261
4262 /*
4263 * The capabilities word in 'cap' tell you whether or not
4264 * this particular filesystem instance has feature X enabled.
4265 */
4266
4267 cap->capabilities[VOL_CAPABILITIES_INTERFACES] =
4268 VOL_CAP_INT_ATTRLIST |
4269 VOL_CAP_INT_NFSEXPORT |
4270 VOL_CAP_INT_READDIRATTR |
4271 VOL_CAP_INT_ALLOCATE |
4272 VOL_CAP_INT_VOL_RENAME |
4273 VOL_CAP_INT_ADVLOCK |
4274 VOL_CAP_INT_FLOCK |
4275 #if NAMEDSTREAMS
4276 VOL_CAP_INT_EXTENDED_ATTR |
4277 VOL_CAP_INT_NAMEDSTREAMS;
4278 #else
4279 VOL_CAP_INT_EXTENDED_ATTR;
4280 #endif
4281
4282 /* HFS may conditionally support searchfs and exchangedata depending on the runtime */
4283
4284 if (searchfs_on) {
4285 cap->capabilities[VOL_CAPABILITIES_INTERFACES] |= VOL_CAP_INT_SEARCHFS;
4286 }
4287 if (exchangedata_on) {
4288 cap->capabilities[VOL_CAPABILITIES_INTERFACES] |= VOL_CAP_INT_EXCHANGEDATA;
4289 }
4290
4291 cap->capabilities[VOL_CAPABILITIES_RESERVED1] = 0;
4292 cap->capabilities[VOL_CAPABILITIES_RESERVED2] = 0;
4293
4294 cap->valid[VOL_CAPABILITIES_FORMAT] =
4295 VOL_CAP_FMT_PERSISTENTOBJECTIDS |
4296 VOL_CAP_FMT_SYMBOLICLINKS |
4297 VOL_CAP_FMT_HARDLINKS |
4298 VOL_CAP_FMT_JOURNAL |
4299 VOL_CAP_FMT_JOURNAL_ACTIVE |
4300 VOL_CAP_FMT_NO_ROOT_TIMES |
4301 VOL_CAP_FMT_SPARSE_FILES |
4302 VOL_CAP_FMT_ZERO_RUNS |
4303 VOL_CAP_FMT_CASE_SENSITIVE |
4304 VOL_CAP_FMT_CASE_PRESERVING |
4305 VOL_CAP_FMT_FAST_STATFS |
4306 VOL_CAP_FMT_2TB_FILESIZE |
4307 VOL_CAP_FMT_OPENDENYMODES |
4308 VOL_CAP_FMT_HIDDEN_FILES |
4309 #if HFS_COMPRESSION
4310 VOL_CAP_FMT_PATH_FROM_ID |
4311 VOL_CAP_FMT_DECMPFS_COMPRESSION;
4312 #else
4313 VOL_CAP_FMT_PATH_FROM_ID;
4314 #endif
4315
4316 /*
4317 * Bits in the "valid" field tell you whether or not the on-disk
4318 * format supports feature X.
4319 */
4320
4321 cap->valid[VOL_CAPABILITIES_INTERFACES] =
4322 VOL_CAP_INT_ATTRLIST |
4323 VOL_CAP_INT_NFSEXPORT |
4324 VOL_CAP_INT_READDIRATTR |
4325 VOL_CAP_INT_COPYFILE |
4326 VOL_CAP_INT_ALLOCATE |
4327 VOL_CAP_INT_VOL_RENAME |
4328 VOL_CAP_INT_ADVLOCK |
4329 VOL_CAP_INT_FLOCK |
4330 VOL_CAP_INT_MANLOCK |
4331 #if NAMEDSTREAMS
4332 VOL_CAP_INT_EXTENDED_ATTR |
4333 VOL_CAP_INT_NAMEDSTREAMS;
4334 #else
4335 VOL_CAP_INT_EXTENDED_ATTR;
4336 #endif
4337
4338 /* HFS always supports exchangedata and searchfs in the on-disk format natively */
4339 cap->valid[VOL_CAPABILITIES_INTERFACES] |= (VOL_CAP_INT_SEARCHFS | VOL_CAP_INT_EXCHANGEDATA);
4340
4341
4342 cap->valid[VOL_CAPABILITIES_RESERVED1] = 0;
4343 cap->valid[VOL_CAPABILITIES_RESERVED2] = 0;
4344 VFSATTR_SET_SUPPORTED(fsap, f_capabilities);
4345 }
4346 if (VFSATTR_IS_ACTIVE(fsap, f_attributes)) {
4347 vol_attributes_attr_t *attrp = &fsap->f_attributes;
4348
4349 attrp->validattr.commonattr = HFS_ATTR_CMN_VOL_VALIDMASK;
4350 attrp->validattr.volattr = ATTR_VOL_VALIDMASK & ~ATTR_VOL_INFO;
4351 attrp->validattr.dirattr = ATTR_DIR_VALIDMASK;
4352 attrp->validattr.fileattr = HFS_ATTR_FILE_VALIDMASK;
4353 attrp->validattr.forkattr = 0;
4354
4355 attrp->nativeattr.commonattr = HFS_ATTR_CMN_VOL_VALIDMASK;
4356 attrp->nativeattr.volattr = ATTR_VOL_VALIDMASK & ~ATTR_VOL_INFO;
4357 attrp->nativeattr.dirattr = ATTR_DIR_VALIDMASK;
4358 attrp->nativeattr.fileattr = HFS_ATTR_FILE_VALIDMASK;
4359 attrp->nativeattr.forkattr = 0;
4360 VFSATTR_SET_SUPPORTED(fsap, f_attributes);
4361 }
4362 fsap->f_create_time.tv_sec = hfsmp->hfs_itime;
4363 fsap->f_create_time.tv_nsec = 0;
4364 VFSATTR_SET_SUPPORTED(fsap, f_create_time);
4365 fsap->f_modify_time.tv_sec = hfsmp->vcbLsMod;
4366 fsap->f_modify_time.tv_nsec = 0;
4367 VFSATTR_SET_SUPPORTED(fsap, f_modify_time);
4368
4369 fsap->f_backup_time.tv_sec = hfsmp->vcbVolBkUp;
4370 fsap->f_backup_time.tv_nsec = 0;
4371 VFSATTR_SET_SUPPORTED(fsap, f_backup_time);
4372 if (VFSATTR_IS_ACTIVE(fsap, f_fssubtype)) {
4373 u_int16_t subtype = 0;
4374
4375 /*
4376 * Subtypes (flavors) for HFS
4377 * 0: Mac OS Extended
4378 * 1: Mac OS Extended (Journaled)
4379 * 2: Mac OS Extended (Case Sensitive)
4380 * 3: Mac OS Extended (Case Sensitive, Journaled)
4381 * 4 - 127: Reserved
4382 * 128: Mac OS Standard
4383 *
4384 */
4385 if ((hfsmp->hfs_flags & HFS_STANDARD) == 0) {
4386 if (hfsmp->jnl) {
4387 subtype |= HFS_SUBTYPE_JOURNALED;
4388 }
4389 if (hfsmp->hfs_flags & HFS_CASE_SENSITIVE) {
4390 subtype |= HFS_SUBTYPE_CASESENSITIVE;
4391 }
4392 }
4393 #if CONFIG_HFS_STD
4394 else {
4395 subtype = HFS_SUBTYPE_STANDARDHFS;
4396 }
4397 #endif
4398 fsap->f_fssubtype = subtype;
4399 VFSATTR_SET_SUPPORTED(fsap, f_fssubtype);
4400 }
4401
4402 if (VFSATTR_IS_ACTIVE(fsap, f_vol_name)) {
4403 strlcpy(fsap->f_vol_name, (char *) hfsmp->vcbVN, MAXPATHLEN);
4404 VFSATTR_SET_SUPPORTED(fsap, f_vol_name);
4405 }
4406 if (VFSATTR_IS_ACTIVE(fsap, f_uuid)) {
4407 hfs_getvoluuid(hfsmp, fsap->f_uuid);
4408 VFSATTR_SET_SUPPORTED(fsap, f_uuid);
4409 }
4410 return (0);
4411 }
4412
4413 /*
4414 * Perform a volume rename. Requires the FS' root vp.
4415 */
4416 static int
4417 hfs_rename_volume(struct vnode *vp, const char *name, proc_t p)
4418 {
4419 ExtendedVCB *vcb = VTOVCB(vp);
4420 struct cnode *cp = VTOC(vp);
4421 struct hfsmount *hfsmp = VTOHFS(vp);
4422 struct cat_desc to_desc;
4423 struct cat_desc todir_desc;
4424 struct cat_desc new_desc;
4425 cat_cookie_t cookie;
4426 int lockflags;
4427 int error = 0;
4428 char converted_volname[256];
4429 size_t volname_length = 0;
4430 size_t conv_volname_length = 0;
4431
4432
4433 /*
4434 * Ignore attempts to rename a volume to a zero-length name.
4435 */
4436 if (name[0] == 0)
4437 return(0);
4438
4439 bzero(&to_desc, sizeof(to_desc));
4440 bzero(&todir_desc, sizeof(todir_desc));
4441 bzero(&new_desc, sizeof(new_desc));
4442 bzero(&cookie, sizeof(cookie));
4443
4444 todir_desc.cd_parentcnid = kHFSRootParentID;
4445 todir_desc.cd_cnid = kHFSRootFolderID;
4446 todir_desc.cd_flags = CD_ISDIR;
4447
4448 to_desc.cd_nameptr = (const u_int8_t *)name;
4449 to_desc.cd_namelen = strlen(name);
4450 to_desc.cd_parentcnid = kHFSRootParentID;
4451 to_desc.cd_cnid = cp->c_cnid;
4452 to_desc.cd_flags = CD_ISDIR;
4453
4454 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)) == 0) {
4455 if ((error = hfs_start_transaction(hfsmp)) == 0) {
4456 if ((error = cat_preflight(hfsmp, CAT_RENAME, &cookie, p)) == 0) {
4457 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
4458
4459 error = cat_rename(hfsmp, &cp->c_desc, &todir_desc, &to_desc, &new_desc);
4460
4461 /*
4462 * If successful, update the name in the VCB, ensure it's terminated.
4463 */
4464 if (error == 0) {
4465 strlcpy((char *)vcb->vcbVN, name, sizeof(vcb->vcbVN));
4466
4467 volname_length = strlen ((const char*)vcb->vcbVN);
4468 #define DKIOCCSSETLVNAME _IOW('d', 198, char[256])
4469 /* Send the volume name down to CoreStorage if necessary */
4470 error = utf8_normalizestr(vcb->vcbVN, volname_length, (u_int8_t*)converted_volname, &conv_volname_length, 256, UTF_PRECOMPOSED);
4471 if (error == 0) {
4472 (void) VNOP_IOCTL (hfsmp->hfs_devvp, DKIOCCSSETLVNAME, converted_volname, 0, vfs_context_current());
4473 }
4474 error = 0;
4475 }
4476
4477 hfs_systemfile_unlock(hfsmp, lockflags);
4478 cat_postflight(hfsmp, &cookie, p);
4479
4480 if (error)
4481 MarkVCBDirty(vcb);
4482 (void) hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
4483 }
4484 hfs_end_transaction(hfsmp);
4485 }
4486 if (!error) {
4487 /* Release old allocated name buffer */
4488 if (cp->c_desc.cd_flags & CD_HASBUF) {
4489 const char *tmp_name = (const char *)cp->c_desc.cd_nameptr;
4490
4491 cp->c_desc.cd_nameptr = 0;
4492 cp->c_desc.cd_namelen = 0;
4493 cp->c_desc.cd_flags &= ~CD_HASBUF;
4494 vfs_removename(tmp_name);
4495 }
4496 /* Update cnode's catalog descriptor */
4497 replace_desc(cp, &new_desc);
4498 vcb->volumeNameEncodingHint = new_desc.cd_encoding;
4499 cp->c_touch_chgtime = TRUE;
4500 }
4501
4502 hfs_unlock(cp);
4503 }
4504
4505 return(error);
4506 }
4507
4508 /*
4509 * Get file system attributes.
4510 */
4511 static int
4512 hfs_vfs_setattr(struct mount *mp, struct vfs_attr *fsap, __unused vfs_context_t context)
4513 {
4514 kauth_cred_t cred = vfs_context_ucred(context);
4515 int error = 0;
4516
4517 /*
4518 * Must be superuser or owner of filesystem to change volume attributes
4519 */
4520 if (!kauth_cred_issuser(cred) && (kauth_cred_getuid(cred) != vfs_statfs(mp)->f_owner))
4521 return(EACCES);
4522
4523 if (VFSATTR_IS_ACTIVE(fsap, f_vol_name)) {
4524 vnode_t root_vp;
4525
4526 error = hfs_vfs_root(mp, &root_vp, context);
4527 if (error)
4528 goto out;
4529
4530 error = hfs_rename_volume(root_vp, fsap->f_vol_name, vfs_context_proc(context));
4531 (void) vnode_put(root_vp);
4532 if (error)
4533 goto out;
4534
4535 VFSATTR_SET_SUPPORTED(fsap, f_vol_name);
4536 }
4537
4538 out:
4539 return error;
4540 }
4541
4542 /* If a runtime corruption is detected, set the volume inconsistent
4543 * bit in the volume attributes. The volume inconsistent bit is a persistent
4544 * bit which represents that the volume is corrupt and needs repair.
4545 * The volume inconsistent bit can be set from the kernel when it detects
4546 * runtime corruption or from file system repair utilities like fsck_hfs when
4547 * a repair operation fails. The bit should be cleared only from file system
4548 * verify/repair utility like fsck_hfs when a verify/repair succeeds.
4549 */
4550 __private_extern__
4551 void hfs_mark_inconsistent(struct hfsmount *hfsmp,
4552 hfs_inconsistency_reason_t reason)
4553 {
4554 hfs_lock_mount (hfsmp);
4555 if ((hfsmp->vcbAtrb & kHFSVolumeInconsistentMask) == 0) {
4556 hfsmp->vcbAtrb |= kHFSVolumeInconsistentMask;
4557 MarkVCBDirty(hfsmp);
4558 }
4559 if ((hfsmp->hfs_flags & HFS_READ_ONLY)==0) {
4560 switch (reason) {
4561 case HFS_INCONSISTENCY_DETECTED:
4562 printf("hfs_mark_inconsistent: Runtime corruption detected on %s, fsck will be forced on next mount.\n",
4563 hfsmp->vcbVN);
4564 break;
4565 case HFS_ROLLBACK_FAILED:
4566 printf("hfs_mark_inconsistent: Failed to roll back; volume `%s' might be inconsistent; fsck will be forced on next mount.\n",
4567 hfsmp->vcbVN);
4568 break;
4569 case HFS_OP_INCOMPLETE:
4570 printf("hfs_mark_inconsistent: Failed to complete operation; volume `%s' might be inconsistent; fsck will be forced on next mount.\n",
4571 hfsmp->vcbVN);
4572 break;
4573 case HFS_FSCK_FORCED:
4574 printf("hfs_mark_inconsistent: fsck requested for `%s'; fsck will be forced on next mount.\n",
4575 hfsmp->vcbVN);
4576 break;
4577 }
4578 }
4579 hfs_unlock_mount (hfsmp);
4580 }
4581
4582 /* Replay the journal on the device node provided. Returns zero if
4583 * journal replay succeeded or no journal was supposed to be replayed.
4584 */
4585 static int hfs_journal_replay(vnode_t devvp, vfs_context_t context)
4586 {
4587 int retval = 0;
4588 int error = 0;
4589 struct mount *mp = NULL;
4590 struct hfs_mount_args *args = NULL;
4591
4592 /* Replay allowed only on raw devices */
4593 if (!vnode_ischr(devvp) && !vnode_isblk(devvp)) {
4594 retval = EINVAL;
4595 goto out;
4596 }
4597
4598 /* Create dummy mount structures */
4599 MALLOC(mp, struct mount *, sizeof(struct mount), M_TEMP, M_WAITOK);
4600 if (mp == NULL) {
4601 retval = ENOMEM;
4602 goto out;
4603 }
4604 bzero(mp, sizeof(struct mount));
4605 mount_lock_init(mp);
4606
4607 MALLOC(args, struct hfs_mount_args *, sizeof(struct hfs_mount_args), M_TEMP, M_WAITOK);
4608 if (args == NULL) {
4609 retval = ENOMEM;
4610 goto out;
4611 }
4612 bzero(args, sizeof(struct hfs_mount_args));
4613
4614 retval = hfs_mountfs(devvp, mp, args, 1, context);
4615 buf_flushdirtyblks(devvp, TRUE, 0, "hfs_journal_replay");
4616
4617 /* FSYNC the devnode to be sure all data has been flushed */
4618 error = VNOP_FSYNC(devvp, MNT_WAIT, context);
4619 if (error) {
4620 retval = error;
4621 }
4622
4623 out:
4624 if (mp) {
4625 mount_lock_destroy(mp);
4626 FREE(mp, M_TEMP);
4627 }
4628 if (args) {
4629 FREE(args, M_TEMP);
4630 }
4631 return retval;
4632 }
4633
4634
4635 /*
4636 * Cancel the syncer
4637 */
4638 static void
4639 hfs_syncer_free(struct hfsmount *hfsmp)
4640 {
4641 if (hfsmp && hfsmp->hfs_syncer) {
4642 hfs_syncer_lock(hfsmp);
4643
4644 /*
4645 * First, make sure everything else knows we don't want any more
4646 * requests queued.
4647 */
4648 thread_call_t syncer = hfsmp->hfs_syncer;
4649 hfsmp->hfs_syncer = NULL;
4650
4651 hfs_syncer_unlock(hfsmp);
4652
4653 // Now deal with requests that are outstanding
4654 if (hfsmp->hfs_sync_incomplete) {
4655 if (thread_call_cancel(syncer)) {
4656 // We managed to cancel the timer so we're done
4657 hfsmp->hfs_sync_incomplete = FALSE;
4658 } else {
4659 // Syncer must be running right now so we have to wait
4660 hfs_syncer_lock(hfsmp);
4661 while (hfsmp->hfs_sync_incomplete)
4662 hfs_syncer_wait(hfsmp);
4663 hfs_syncer_unlock(hfsmp);
4664 }
4665 }
4666
4667 // Now we're safe to free the syncer
4668 thread_call_free(syncer);
4669 }
4670 }
4671
4672 /*
4673 * hfs vfs operations.
4674 */
4675 struct vfsops hfs_vfsops = {
4676 hfs_mount,
4677 hfs_start,
4678 hfs_unmount,
4679 hfs_vfs_root,
4680 hfs_quotactl,
4681 hfs_vfs_getattr, /* was hfs_statfs */
4682 hfs_sync,
4683 hfs_vfs_vget,
4684 hfs_fhtovp,
4685 hfs_vptofh,
4686 hfs_init,
4687 hfs_sysctl,
4688 hfs_vfs_setattr,
4689 {NULL}
4690 };