]> git.saurik.com Git - apple/hfs.git/blob - core/hfs_vfsops.c
hfs-556.100.11.tar.gz
[apple/hfs.git] / core / hfs_vfsops.c
1 /*
2 * Copyright (c) 1999-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1991, 1993, 1994
30 * The Regents of the University of California. All rights reserved.
31 * (c) UNIX System Laboratories, Inc.
32 * All or some portions of this file are derived from material licensed
33 * to the University of California by American Telephone and Telegraph
34 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
35 * the permission of UNIX System Laboratories, Inc.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * hfs_vfsops.c
66 * derived from @(#)ufs_vfsops.c 8.8 (Berkeley) 5/20/95
67 *
68 * (c) Copyright 1997-2002 Apple Inc. All rights reserved.
69 *
70 * hfs_vfsops.c -- VFS layer for loadable HFS file system.
71 *
72 */
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/kauth.h>
76
77 #include <sys/ubc.h>
78 #include <sys/sysctl.h>
79 #include <sys/malloc.h>
80 #include <sys/stat.h>
81 #include <sys/quota.h>
82 #include <sys/disk.h>
83 #include <sys/paths.h>
84 #include <sys/utfconv.h>
85 #include <sys/kdebug.h>
86 #include <sys/fslog.h>
87 #include <sys/ubc.h>
88 #include <libkern/OSKextLib.h>
89 #include <libkern/OSAtomic.h>
90
91 /* for parsing boot-args */
92 #include <pexpert/pexpert.h>
93
94
95 #include <kern/locks.h>
96
97 #include "hfs_journal.h"
98
99 #include <miscfs/specfs/specdev.h>
100 #include "hfs_mount.h"
101
102 #include <libkern/crypto/md5.h>
103 #include <uuid/uuid.h>
104
105 #include "hfs_iokit.h"
106 #include "hfs.h"
107 #include "hfs_catalog.h"
108 #include "hfs_cnode.h"
109 #include "hfs_dbg.h"
110 #include "hfs_endian.h"
111 #include "hfs_hotfiles.h"
112 #include "hfs_quota.h"
113 #include "hfs_btreeio.h"
114 #include "hfs_kdebug.h"
115 #include "hfs_cprotect.h"
116
117 #include "FileMgrInternal.h"
118 #include "BTreesInternal.h"
119
120 #define HFS_MOUNT_DEBUG 1
121
122 /* Enable/disable debugging code for live volume resizing, defined in hfs_resize.c */
123 extern int hfs_resize_debug;
124
125 lck_grp_attr_t * hfs_group_attr;
126 lck_attr_t * hfs_lock_attr;
127 lck_grp_t * hfs_mutex_group;
128 lck_grp_t * hfs_rwlock_group;
129 lck_grp_t * hfs_spinlock_group;
130
131 // variables to manage HFS kext retain count -- only supported on Macs
132 #if TARGET_OS_OSX
133 int hfs_active_mounts = 0;
134 #endif
135
136 extern struct vnodeopv_desc hfs_vnodeop_opv_desc;
137
138 #if CONFIG_HFS_STD
139 extern struct vnodeopv_desc hfs_std_vnodeop_opv_desc;
140 static int hfs_flushMDB(struct hfsmount *hfsmp, int waitfor, int altflush);
141 #endif
142
143 /* not static so we can re-use in hfs_readwrite.c for vn_getpath_ext calls */
144 int hfs_vfs_vget(struct mount *mp, ino64_t ino, struct vnode **vpp, vfs_context_t context);
145
146 static int hfs_changefs(struct mount *mp, struct hfs_mount_args *args);
147 static int hfs_fhtovp(struct mount *mp, int fhlen, unsigned char *fhp, struct vnode **vpp, vfs_context_t context);
148 static int hfs_flushfiles(struct mount *, int, struct proc *);
149 static int hfs_init(struct vfsconf *vfsp);
150 static void hfs_locks_destroy(struct hfsmount *hfsmp);
151 static int hfs_quotactl(struct mount *, int, uid_t, caddr_t, vfs_context_t context);
152 static int hfs_start(struct mount *mp, int flags, vfs_context_t context);
153 static int hfs_vptofh(struct vnode *vp, int *fhlenp, unsigned char *fhp, vfs_context_t context);
154 static void hfs_syncer_free(struct hfsmount *hfsmp);
155
156 void hfs_initialize_allocator (struct hfsmount *hfsmp);
157 int hfs_teardown_allocator (struct hfsmount *hfsmp);
158
159 int hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t context);
160 int hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, int journal_replay_only, vfs_context_t context);
161 int hfs_reload(struct mount *mp);
162 int hfs_statfs(struct mount *mp, register struct vfsstatfs *sbp, vfs_context_t context);
163 int hfs_sync(struct mount *mp, int waitfor, vfs_context_t context);
164 int hfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
165 user_addr_t newp, size_t newlen, vfs_context_t context);
166 int hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context);
167
168 static int hfs_journal_replay(vnode_t devvp, vfs_context_t context);
169
170 #if HFS_LEAK_DEBUG
171 #include <IOKit/IOLib.h>
172 #endif
173
174 /*
175 * VFS Operations.
176 *
177 * mount system call
178 */
179
180 int
181 hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t context)
182 {
183
184 #if HFS_LEAK_DEBUG
185
186 #warning HFS_LEAK_DEBUG is on
187
188 hfs_alloc_trace_enable();
189
190 #endif
191
192 struct proc *p = vfs_context_proc(context);
193 struct hfsmount *hfsmp = NULL;
194 struct hfs_mount_args args;
195 int retval = E_NONE;
196 u_int32_t cmdflags;
197
198 if (data && (retval = copyin(data, (caddr_t)&args, sizeof(args)))) {
199 if (HFS_MOUNT_DEBUG) {
200 printf("hfs_mount: copyin returned %d for fs\n", retval);
201 }
202 return (retval);
203 }
204 cmdflags = (u_int32_t)vfs_flags(mp) & MNT_CMDFLAGS;
205 if (cmdflags & MNT_UPDATE) {
206 hfs_assert(data);
207
208 hfsmp = VFSTOHFS(mp);
209
210 /* Reload incore data after an fsck. */
211 if (cmdflags & MNT_RELOAD) {
212 if (vfs_isrdonly(mp)) {
213 int error = hfs_reload(mp);
214 if (error && HFS_MOUNT_DEBUG) {
215 printf("hfs_mount: hfs_reload returned %d on %s \n", error, hfsmp->vcbVN);
216 }
217 return error;
218 }
219 else {
220 if (HFS_MOUNT_DEBUG) {
221 printf("hfs_mount: MNT_RELOAD not supported on rdwr filesystem %s\n", hfsmp->vcbVN);
222 }
223 return (EINVAL);
224 }
225 }
226
227 /* Change to a read-only file system. */
228 if (((hfsmp->hfs_flags & HFS_READ_ONLY) == 0) &&
229 vfs_isrdonly(mp)) {
230 int flags;
231
232 /* Set flag to indicate that a downgrade to read-only
233 * is in progress and therefore block any further
234 * modifications to the file system.
235 */
236 hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK);
237 hfsmp->hfs_flags |= HFS_RDONLY_DOWNGRADE;
238 hfsmp->hfs_downgrading_thread = current_thread();
239 hfs_unlock_global (hfsmp);
240 hfs_syncer_free(hfsmp);
241
242 /* use hfs_sync to push out System (btree) files */
243 retval = hfs_sync(mp, MNT_WAIT, context);
244 if (retval && ((cmdflags & MNT_FORCE) == 0)) {
245 hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
246 hfsmp->hfs_downgrading_thread = NULL;
247 if (HFS_MOUNT_DEBUG) {
248 printf("hfs_mount: VFS_SYNC returned %d during b-tree sync of %s \n", retval, hfsmp->vcbVN);
249 }
250 goto out;
251 }
252
253 flags = WRITECLOSE;
254 if (cmdflags & MNT_FORCE)
255 flags |= FORCECLOSE;
256
257 if ((retval = hfs_flushfiles(mp, flags, p))) {
258 hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
259 hfsmp->hfs_downgrading_thread = NULL;
260 if (HFS_MOUNT_DEBUG) {
261 printf("hfs_mount: hfs_flushfiles returned %d on %s \n", retval, hfsmp->vcbVN);
262 }
263 goto out;
264 }
265
266 /* mark the volume cleanly unmounted */
267 hfsmp->vcbAtrb |= kHFSVolumeUnmountedMask;
268 retval = hfs_flushvolumeheader(hfsmp, HFS_FVH_WAIT);
269 hfsmp->hfs_flags |= HFS_READ_ONLY;
270
271 /*
272 * Close down the journal.
273 *
274 * NOTE: It is critically important to close down the journal
275 * and have it issue all pending I/O prior to calling VNOP_FSYNC below.
276 * In a journaled environment it is expected that the journal be
277 * the only actor permitted to issue I/O for metadata blocks in HFS.
278 * If we were to call VNOP_FSYNC prior to closing down the journal,
279 * we would inadvertantly issue (and wait for) the I/O we just
280 * initiated above as part of the flushvolumeheader call.
281 *
282 * To avoid this, we follow the same order of operations as in
283 * unmount and issue the journal_close prior to calling VNOP_FSYNC.
284 */
285
286 if (hfsmp->jnl) {
287 hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK);
288
289 journal_close(hfsmp->jnl);
290 hfsmp->jnl = NULL;
291
292 // Note: we explicitly don't want to shutdown
293 // access to the jvp because we may need
294 // it later if we go back to being read-write.
295
296 hfs_unlock_global (hfsmp);
297
298 vfs_clearflags(hfsmp->hfs_mp, MNT_JOURNALED);
299 }
300
301 /*
302 * Write out any pending I/O still outstanding against the device node
303 * now that the journal has been closed.
304 */
305 if (retval == 0) {
306 vnode_get(hfsmp->hfs_devvp);
307 retval = VNOP_FSYNC(hfsmp->hfs_devvp, MNT_WAIT, context);
308 vnode_put(hfsmp->hfs_devvp);
309 }
310
311 if (retval) {
312 if (HFS_MOUNT_DEBUG) {
313 printf("hfs_mount: FSYNC on devvp returned %d for fs %s\n", retval, hfsmp->vcbVN);
314 }
315 hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
316 hfsmp->hfs_downgrading_thread = NULL;
317 hfsmp->hfs_flags &= ~HFS_READ_ONLY;
318 goto out;
319 }
320
321 if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) {
322 if (hfsmp->hfs_summary_table) {
323 int err = 0;
324 /*
325 * Take the bitmap lock to serialize against a concurrent bitmap scan still in progress
326 */
327 if (hfsmp->hfs_allocation_vp) {
328 err = hfs_lock (VTOC(hfsmp->hfs_allocation_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
329 }
330 hfs_free(hfsmp->hfs_summary_table, hfsmp->hfs_summary_bytes);
331 hfsmp->hfs_summary_table = NULL;
332 hfsmp->hfs_flags &= ~HFS_SUMMARY_TABLE;
333 if (err == 0 && hfsmp->hfs_allocation_vp){
334 hfs_unlock (VTOC(hfsmp->hfs_allocation_vp));
335 }
336 }
337 }
338
339 hfsmp->hfs_downgrading_thread = NULL;
340 }
341
342 /* Change to a writable file system. */
343 if (vfs_iswriteupgrade(mp)) {
344 /*
345 * On inconsistent disks, do not allow read-write mount
346 * unless it is the boot volume being mounted.
347 */
348 if (!(vfs_flags(mp) & MNT_ROOTFS) &&
349 (hfsmp->vcbAtrb & kHFSVolumeInconsistentMask)) {
350 if (HFS_MOUNT_DEBUG) {
351 printf("hfs_mount: attempting to mount inconsistent non-root volume %s\n", (hfsmp->vcbVN));
352 }
353 retval = EINVAL;
354 goto out;
355 }
356
357 // If the journal was shut-down previously because we were
358 // asked to be read-only, let's start it back up again now
359
360 if ( (HFSTOVCB(hfsmp)->vcbAtrb & kHFSVolumeJournaledMask)
361 && hfsmp->jnl == NULL
362 && hfsmp->jvp != NULL) {
363 int jflags;
364
365 if (hfsmp->hfs_flags & HFS_NEED_JNL_RESET) {
366 jflags = JOURNAL_RESET;
367 } else {
368 jflags = 0;
369 }
370
371 hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK);
372
373 /* We provide the mount point twice here: The first is used as
374 * an opaque argument to be passed back when hfs_sync_metadata
375 * is called. The second is provided to the throttling code to
376 * indicate which mount's device should be used when accounting
377 * for metadata writes.
378 */
379 hfsmp->jnl = journal_open(hfsmp->jvp,
380 hfs_blk_to_bytes(hfsmp->jnl_start, HFSTOVCB(hfsmp)->blockSize) + (off_t)HFSTOVCB(hfsmp)->hfsPlusIOPosOffset,
381 hfsmp->jnl_size,
382 hfsmp->hfs_devvp,
383 hfsmp->hfs_logical_block_size,
384 jflags,
385 0,
386 hfs_sync_metadata, hfsmp->hfs_mp,
387 hfsmp->hfs_mp);
388
389 /*
390 * Set up the trim callback function so that we can add
391 * recently freed extents to the free extent cache once
392 * the transaction that freed them is written to the
393 * journal on disk.
394 */
395 if (hfsmp->jnl)
396 journal_trim_set_callback(hfsmp->jnl, hfs_trim_callback, hfsmp);
397
398 hfs_unlock_global (hfsmp);
399
400 if (hfsmp->jnl == NULL) {
401 if (HFS_MOUNT_DEBUG) {
402 printf("hfs_mount: journal_open == NULL; couldn't be opened on %s \n", (hfsmp->vcbVN));
403 }
404 retval = EINVAL;
405 goto out;
406 } else {
407 hfsmp->hfs_flags &= ~HFS_NEED_JNL_RESET;
408 vfs_setflags(hfsmp->hfs_mp, MNT_JOURNALED);
409 }
410 }
411
412 /* See if we need to erase unused Catalog nodes due to <rdar://problem/6947811>. */
413 retval = hfs_erase_unused_nodes(hfsmp);
414 if (retval != E_NONE) {
415 if (HFS_MOUNT_DEBUG) {
416 printf("hfs_mount: hfs_erase_unused_nodes returned %d for fs %s\n", retval, hfsmp->vcbVN);
417 }
418 goto out;
419 }
420
421 /* If this mount point was downgraded from read-write
422 * to read-only, clear that information as we are now
423 * moving back to read-write.
424 */
425 hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
426 hfsmp->hfs_downgrading_thread = NULL;
427
428 /* mark the volume dirty (clear clean unmount bit) */
429 hfsmp->vcbAtrb &= ~kHFSVolumeUnmountedMask;
430
431 retval = hfs_flushvolumeheader(hfsmp, HFS_FVH_WAIT);
432 if (retval != E_NONE) {
433 if (HFS_MOUNT_DEBUG) {
434 printf("hfs_mount: hfs_flushvolumeheader returned %d for fs %s\n", retval, hfsmp->vcbVN);
435 }
436 goto out;
437 }
438
439 /* Only clear HFS_READ_ONLY after a successful write */
440 hfsmp->hfs_flags &= ~HFS_READ_ONLY;
441
442
443 if (!(hfsmp->hfs_flags & (HFS_READ_ONLY | HFS_STANDARD))) {
444 /* Setup private/hidden directories for hardlinks. */
445 hfs_privatedir_init(hfsmp, FILE_HARDLINKS);
446 hfs_privatedir_init(hfsmp, DIR_HARDLINKS);
447
448 hfs_remove_orphans(hfsmp);
449
450 /*
451 * Since we're upgrading to a read-write mount, allow
452 * hot file clustering if conditions allow.
453 *
454 * Note: this normally only would happen if you booted
455 * single-user and upgraded the mount to read-write
456 *
457 * Note: at this point we are not allowed to fail the
458 * mount operation because the HotFile init code
459 * in hfs_recording_init() will lookup vnodes with
460 * VNOP_LOOKUP() which hangs vnodes off the mount
461 * (and if we were to fail, VFS is not prepared to
462 * clean that up at this point. Since HotFiles are
463 * optional, this is not a big deal.
464 */
465 if (ISSET(hfsmp->hfs_flags, HFS_METADATA_ZONE)
466 && (!ISSET(hfsmp->hfs_flags, HFS_SSD)
467 || ISSET(hfsmp->hfs_flags, HFS_CS_HOTFILE_PIN))) {
468 hfs_recording_init(hfsmp);
469 }
470 /* Force ACLs on HFS+ file systems. */
471 if (vfs_extendedsecurity(HFSTOVFS(hfsmp)) == 0) {
472 vfs_setextendedsecurity(HFSTOVFS(hfsmp));
473 }
474 }
475 }
476
477 /* Update file system parameters. */
478 retval = hfs_changefs(mp, &args);
479 if (retval && HFS_MOUNT_DEBUG) {
480 printf("hfs_mount: hfs_changefs returned %d for %s\n", retval, hfsmp->vcbVN);
481 }
482
483 } else /* not an update request */ {
484 if (devvp == NULL) {
485 retval = EINVAL;
486 goto out;
487 }
488 /* Set the mount flag to indicate that we support volfs */
489 vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_DOVOLFS));
490
491 retval = hfs_mountfs(devvp, mp, data ? &args : NULL, 0, context);
492 if (retval) {
493 const char *name = vnode_getname(devvp);
494 printf("hfs_mount: hfs_mountfs returned error=%d for device %s\n", retval, (name ? name : "unknown-dev"));
495 if (name) {
496 vnode_putname(name);
497 }
498 goto out;
499 }
500
501 /* After hfs_mountfs succeeds, we should have valid hfsmp */
502 hfsmp = VFSTOHFS(mp);
503
504 /* Set up the maximum defrag file size */
505 hfsmp->hfs_defrag_max = HFS_INITIAL_DEFRAG_SIZE;
506
507
508 if (!data) {
509 // Root mount
510
511 hfsmp->hfs_uid = UNKNOWNUID;
512 hfsmp->hfs_gid = UNKNOWNGID;
513 hfsmp->hfs_dir_mask = (S_IRWXU | S_IRGRP|S_IXGRP | S_IROTH|S_IXOTH); /* 0755 */
514 hfsmp->hfs_file_mask = (S_IRWXU | S_IRGRP|S_IXGRP | S_IROTH|S_IXOTH); /* 0755 */
515
516 /* Establish the free block reserve. */
517 hfsmp->reserveBlocks = ((u_int64_t)hfsmp->totalBlocks * HFS_MINFREE) / 100;
518 hfsmp->reserveBlocks = MIN(hfsmp->reserveBlocks, HFS_MAXRESERVE / hfsmp->blockSize);
519 }
520 #if TARGET_OS_OSX
521 // increment kext retain count
522 OSIncrementAtomic(&hfs_active_mounts);
523 OSKextRetainKextWithLoadTag(OSKextGetCurrentLoadTag());
524 if (hfs_active_mounts <= 0 && panic_on_assert)
525 panic("hfs_mount: error - kext resource count is non-positive: %d but at least one active mount\n", hfs_active_mounts);
526 #endif
527 }
528
529 out:
530 if (retval == 0) {
531 (void)hfs_statfs(mp, vfs_statfs(mp), context);
532 }
533 return (retval);
534 }
535
536
537 struct hfs_changefs_cargs {
538 struct hfsmount *hfsmp;
539 int namefix;
540 int permfix;
541 int permswitch;
542 };
543
544 static int
545 hfs_changefs_callback(struct vnode *vp, void *cargs)
546 {
547 ExtendedVCB *vcb;
548 struct cnode *cp;
549 struct cat_desc cndesc;
550 struct cat_attr cnattr;
551 struct hfs_changefs_cargs *args;
552 int lockflags;
553 int error;
554
555 args = (struct hfs_changefs_cargs *)cargs;
556
557 cp = VTOC(vp);
558 vcb = HFSTOVCB(args->hfsmp);
559
560 lockflags = hfs_systemfile_lock(args->hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
561 error = cat_lookup(args->hfsmp, &cp->c_desc, 0, 0, &cndesc, &cnattr, NULL, NULL);
562 hfs_systemfile_unlock(args->hfsmp, lockflags);
563 if (error) {
564 /*
565 * If we couldn't find this guy skip to the next one
566 */
567 if (args->namefix)
568 cache_purge(vp);
569
570 return (VNODE_RETURNED);
571 }
572 /*
573 * Get the real uid/gid and perm mask from disk.
574 */
575 if (args->permswitch || args->permfix) {
576 cp->c_uid = cnattr.ca_uid;
577 cp->c_gid = cnattr.ca_gid;
578 cp->c_mode = cnattr.ca_mode;
579 }
580 /*
581 * If we're switching name converters then...
582 * Remove the existing entry from the namei cache.
583 * Update name to one based on new encoder.
584 */
585 if (args->namefix) {
586 cache_purge(vp);
587 replace_desc(cp, &cndesc);
588
589 if (cndesc.cd_cnid == kHFSRootFolderID) {
590 strlcpy((char *)vcb->vcbVN, (const char *)cp->c_desc.cd_nameptr, NAME_MAX+1);
591 cp->c_desc.cd_encoding = args->hfsmp->hfs_encoding;
592 }
593 } else {
594 cat_releasedesc(&cndesc);
595 }
596 return (VNODE_RETURNED);
597 }
598
599 /* Change fs mount parameters */
600 static int
601 hfs_changefs(struct mount *mp, struct hfs_mount_args *args)
602 {
603 int retval = 0;
604 int namefix, permfix, permswitch;
605 struct hfsmount *hfsmp;
606 ExtendedVCB *vcb;
607 struct hfs_changefs_cargs cargs;
608 u_int32_t mount_flags;
609
610 #if CONFIG_HFS_STD
611 u_int32_t old_encoding = 0;
612 hfs_to_unicode_func_t get_unicode_func;
613 unicode_to_hfs_func_t get_hfsname_func = NULL;
614 #endif
615
616 hfsmp = VFSTOHFS(mp);
617 vcb = HFSTOVCB(hfsmp);
618 mount_flags = (unsigned int)vfs_flags(mp);
619
620 hfsmp->hfs_flags |= HFS_IN_CHANGEFS;
621
622 permswitch = (((hfsmp->hfs_flags & HFS_UNKNOWN_PERMS) &&
623 ((mount_flags & MNT_UNKNOWNPERMISSIONS) == 0)) ||
624 (((hfsmp->hfs_flags & HFS_UNKNOWN_PERMS) == 0) &&
625 (mount_flags & MNT_UNKNOWNPERMISSIONS)));
626
627 /* The root filesystem must operate with actual permissions: */
628 if (permswitch && (mount_flags & MNT_ROOTFS) && (mount_flags & MNT_UNKNOWNPERMISSIONS)) {
629 vfs_clearflags(mp, (u_int64_t)((unsigned int)MNT_UNKNOWNPERMISSIONS)); /* Just say "No". */
630 retval = EINVAL;
631 goto exit;
632 }
633 if (mount_flags & MNT_UNKNOWNPERMISSIONS)
634 hfsmp->hfs_flags |= HFS_UNKNOWN_PERMS;
635 else
636 hfsmp->hfs_flags &= ~HFS_UNKNOWN_PERMS;
637
638 namefix = permfix = 0;
639
640 /*
641 * Tracking of hot files requires up-to-date access times. So if
642 * access time updates are disabled, we must also disable hot files.
643 */
644 if (mount_flags & MNT_NOATIME) {
645 (void) hfs_recording_suspend(hfsmp);
646 }
647
648 /* Change the timezone (Note: this affects all hfs volumes and hfs+ volume create dates) */
649 if (args->hfs_timezone.tz_minuteswest != VNOVAL) {
650 gTimeZone = args->hfs_timezone;
651 }
652
653 /* Change the default uid, gid and/or mask */
654 if ((args->hfs_uid != (uid_t)VNOVAL) && (hfsmp->hfs_uid != args->hfs_uid)) {
655 hfsmp->hfs_uid = args->hfs_uid;
656 if (vcb->vcbSigWord == kHFSPlusSigWord)
657 ++permfix;
658 }
659 if ((args->hfs_gid != (gid_t)VNOVAL) && (hfsmp->hfs_gid != args->hfs_gid)) {
660 hfsmp->hfs_gid = args->hfs_gid;
661 if (vcb->vcbSigWord == kHFSPlusSigWord)
662 ++permfix;
663 }
664 if (args->hfs_mask != (mode_t)VNOVAL) {
665 if (hfsmp->hfs_dir_mask != (args->hfs_mask & ALLPERMS)) {
666 hfsmp->hfs_dir_mask = args->hfs_mask & ALLPERMS;
667 hfsmp->hfs_file_mask = args->hfs_mask & ALLPERMS;
668 if ((args->flags != VNOVAL) && (args->flags & HFSFSMNT_NOXONFILES))
669 hfsmp->hfs_file_mask = (args->hfs_mask & DEFFILEMODE);
670 if (vcb->vcbSigWord == kHFSPlusSigWord)
671 ++permfix;
672 }
673 }
674
675 #if CONFIG_HFS_STD
676 /* Change the hfs encoding value (hfs only) */
677 if ((vcb->vcbSigWord == kHFSSigWord) &&
678 (args->hfs_encoding != (u_int32_t)VNOVAL) &&
679 (hfsmp->hfs_encoding != args->hfs_encoding)) {
680
681 retval = hfs_getconverter(args->hfs_encoding, &get_unicode_func, &get_hfsname_func);
682 if (retval)
683 goto exit;
684
685 /*
686 * Connect the new hfs_get_unicode converter but leave
687 * the old hfs_get_hfsname converter in place so that
688 * we can lookup existing vnodes to get their correctly
689 * encoded names.
690 *
691 * When we're all finished, we can then connect the new
692 * hfs_get_hfsname converter and release our interest
693 * in the old converters.
694 */
695 hfsmp->hfs_get_unicode = get_unicode_func;
696 old_encoding = hfsmp->hfs_encoding;
697 hfsmp->hfs_encoding = args->hfs_encoding;
698 ++namefix;
699 }
700 #endif
701
702 if (!(namefix || permfix || permswitch))
703 goto exit;
704
705 /* XXX 3762912 hack to support HFS filesystem 'owner' */
706 if (permfix) {
707 vfs_setowner(mp,
708 hfsmp->hfs_uid == UNKNOWNUID ? KAUTH_UID_NONE : hfsmp->hfs_uid,
709 hfsmp->hfs_gid == UNKNOWNGID ? KAUTH_GID_NONE : hfsmp->hfs_gid);
710 }
711
712 /*
713 * For each active vnode fix things that changed
714 *
715 * Note that we can visit a vnode more than once
716 * and we can race with fsync.
717 *
718 * hfs_changefs_callback will be called for each vnode
719 * hung off of this mount point
720 *
721 * The vnode will be properly referenced and unreferenced
722 * around the callback
723 */
724 cargs.hfsmp = hfsmp;
725 cargs.namefix = namefix;
726 cargs.permfix = permfix;
727 cargs.permswitch = permswitch;
728
729 vnode_iterate(mp, 0, hfs_changefs_callback, (void *)&cargs);
730
731 #if CONFIG_HFS_STD
732 /*
733 * If we're switching name converters we can now
734 * connect the new hfs_get_hfsname converter and
735 * release our interest in the old converters.
736 */
737 if (namefix) {
738 /* HFS standard only */
739 hfsmp->hfs_get_hfsname = get_hfsname_func;
740 vcb->volumeNameEncodingHint = args->hfs_encoding;
741 (void) hfs_relconverter(old_encoding);
742 }
743 #endif
744
745 exit:
746 hfsmp->hfs_flags &= ~HFS_IN_CHANGEFS;
747 return (retval);
748 }
749
750
751 struct hfs_reload_cargs {
752 struct hfsmount *hfsmp;
753 int error;
754 };
755
756 static int
757 hfs_reload_callback(struct vnode *vp, void *cargs)
758 {
759 struct cnode *cp;
760 struct hfs_reload_cargs *args;
761 int lockflags;
762
763 args = (struct hfs_reload_cargs *)cargs;
764 /*
765 * flush all the buffers associated with this node
766 */
767 (void) buf_invalidateblks(vp, 0, 0, 0);
768
769 cp = VTOC(vp);
770 /*
771 * Remove any directory hints
772 */
773 if (vnode_isdir(vp))
774 hfs_reldirhints(cp, 0);
775
776 /*
777 * Re-read cnode data for all active vnodes (non-metadata files).
778 */
779 if (!vnode_issystem(vp) && !VNODE_IS_RSRC(vp) && (cp->c_fileid >= kHFSFirstUserCatalogNodeID)) {
780 struct cat_fork *datafork;
781 struct cat_desc desc;
782
783 datafork = cp->c_datafork ? &cp->c_datafork->ff_data : NULL;
784
785 /* lookup by fileID since name could have changed */
786 lockflags = hfs_systemfile_lock(args->hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
787 args->error = cat_idlookup(args->hfsmp, cp->c_fileid, 0, 0, &desc, &cp->c_attr, datafork);
788 hfs_systemfile_unlock(args->hfsmp, lockflags);
789 if (args->error) {
790 return (VNODE_RETURNED_DONE);
791 }
792
793 /* update cnode's catalog descriptor */
794 (void) replace_desc(cp, &desc);
795 }
796 return (VNODE_RETURNED);
797 }
798
799 /*
800 * Reload all incore data for a filesystem (used after running fsck on
801 * the root filesystem and finding things to fix). The filesystem must
802 * be mounted read-only.
803 *
804 * Things to do to update the mount:
805 * invalidate all cached meta-data.
806 * invalidate all inactive vnodes.
807 * invalidate all cached file data.
808 * re-read volume header from disk.
809 * re-load meta-file info (extents, file size).
810 * re-load B-tree header data.
811 * re-read cnode data for all active vnodes.
812 */
813 int
814 hfs_reload(struct mount *mountp)
815 {
816 register struct vnode *devvp;
817 struct buf *bp;
818 int error, i;
819 struct hfsmount *hfsmp;
820 struct HFSPlusVolumeHeader *vhp;
821 ExtendedVCB *vcb;
822 struct filefork *forkp;
823 struct cat_desc cndesc;
824 struct hfs_reload_cargs args;
825 daddr64_t priIDSector;
826
827 hfsmp = VFSTOHFS(mountp);
828 vcb = HFSTOVCB(hfsmp);
829
830 if (vcb->vcbSigWord == kHFSSigWord)
831 return (EINVAL); /* rooting from HFS is not supported! */
832
833 /*
834 * Invalidate all cached meta-data.
835 */
836 devvp = hfsmp->hfs_devvp;
837 if (buf_invalidateblks(devvp, 0, 0, 0))
838 panic("hfs_reload: dirty1");
839
840 args.hfsmp = hfsmp;
841 args.error = 0;
842 /*
843 * hfs_reload_callback will be called for each vnode
844 * hung off of this mount point that can't be recycled...
845 * vnode_iterate will recycle those that it can (the VNODE_RELOAD option)
846 * the vnode will be in an 'unbusy' state (VNODE_WAIT) and
847 * properly referenced and unreferenced around the callback
848 */
849 vnode_iterate(mountp, VNODE_RELOAD | VNODE_WAIT, hfs_reload_callback, (void *)&args);
850
851 if (args.error)
852 return (args.error);
853
854 /*
855 * Re-read VolumeHeader from disk.
856 */
857 priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
858 HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size));
859
860 error = (int)buf_meta_bread(hfsmp->hfs_devvp,
861 HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys),
862 hfsmp->hfs_physical_block_size, NOCRED, &bp);
863 if (error) {
864 if (bp != NULL)
865 buf_brelse(bp);
866 return (error);
867 }
868
869 vhp = (HFSPlusVolumeHeader *) (buf_dataptr(bp) + HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size));
870
871 /* Do a quick sanity check */
872 if ((SWAP_BE16(vhp->signature) != kHFSPlusSigWord &&
873 SWAP_BE16(vhp->signature) != kHFSXSigWord) ||
874 (SWAP_BE16(vhp->version) != kHFSPlusVersion &&
875 SWAP_BE16(vhp->version) != kHFSXVersion) ||
876 SWAP_BE32(vhp->blockSize) != vcb->blockSize) {
877 buf_brelse(bp);
878 return (EIO);
879 }
880
881 vcb->vcbLsMod = to_bsd_time(SWAP_BE32(vhp->modifyDate));
882 vcb->vcbAtrb = SWAP_BE32 (vhp->attributes);
883 vcb->vcbJinfoBlock = SWAP_BE32(vhp->journalInfoBlock);
884 vcb->vcbClpSiz = SWAP_BE32 (vhp->rsrcClumpSize);
885 vcb->vcbNxtCNID = SWAP_BE32 (vhp->nextCatalogID);
886 vcb->vcbVolBkUp = to_bsd_time(SWAP_BE32(vhp->backupDate));
887 vcb->vcbWrCnt = SWAP_BE32 (vhp->writeCount);
888 vcb->vcbFilCnt = SWAP_BE32 (vhp->fileCount);
889 vcb->vcbDirCnt = SWAP_BE32 (vhp->folderCount);
890 HFS_UPDATE_NEXT_ALLOCATION(vcb, SWAP_BE32 (vhp->nextAllocation));
891 vcb->totalBlocks = SWAP_BE32 (vhp->totalBlocks);
892 vcb->freeBlocks = SWAP_BE32 (vhp->freeBlocks);
893 vcb->encodingsBitmap = SWAP_BE64 (vhp->encodingsBitmap);
894 bcopy(vhp->finderInfo, vcb->vcbFndrInfo, sizeof(vhp->finderInfo));
895 vcb->localCreateDate = SWAP_BE32 (vhp->createDate); /* hfs+ create date is in local time */
896
897 /*
898 * Re-load meta-file vnode data (extent info, file size, etc).
899 */
900 forkp = VTOF((struct vnode *)vcb->extentsRefNum);
901 for (i = 0; i < kHFSPlusExtentDensity; i++) {
902 forkp->ff_extents[i].startBlock =
903 SWAP_BE32 (vhp->extentsFile.extents[i].startBlock);
904 forkp->ff_extents[i].blockCount =
905 SWAP_BE32 (vhp->extentsFile.extents[i].blockCount);
906 }
907 forkp->ff_size = SWAP_BE64 (vhp->extentsFile.logicalSize);
908 forkp->ff_blocks = SWAP_BE32 (vhp->extentsFile.totalBlocks);
909 forkp->ff_clumpsize = SWAP_BE32 (vhp->extentsFile.clumpSize);
910
911
912 forkp = VTOF((struct vnode *)vcb->catalogRefNum);
913 for (i = 0; i < kHFSPlusExtentDensity; i++) {
914 forkp->ff_extents[i].startBlock =
915 SWAP_BE32 (vhp->catalogFile.extents[i].startBlock);
916 forkp->ff_extents[i].blockCount =
917 SWAP_BE32 (vhp->catalogFile.extents[i].blockCount);
918 }
919 forkp->ff_size = SWAP_BE64 (vhp->catalogFile.logicalSize);
920 forkp->ff_blocks = SWAP_BE32 (vhp->catalogFile.totalBlocks);
921 forkp->ff_clumpsize = SWAP_BE32 (vhp->catalogFile.clumpSize);
922
923 if (hfsmp->hfs_attribute_vp) {
924 forkp = VTOF(hfsmp->hfs_attribute_vp);
925 for (i = 0; i < kHFSPlusExtentDensity; i++) {
926 forkp->ff_extents[i].startBlock =
927 SWAP_BE32 (vhp->attributesFile.extents[i].startBlock);
928 forkp->ff_extents[i].blockCount =
929 SWAP_BE32 (vhp->attributesFile.extents[i].blockCount);
930 }
931 forkp->ff_size = SWAP_BE64 (vhp->attributesFile.logicalSize);
932 forkp->ff_blocks = SWAP_BE32 (vhp->attributesFile.totalBlocks);
933 forkp->ff_clumpsize = SWAP_BE32 (vhp->attributesFile.clumpSize);
934 }
935
936 forkp = VTOF((struct vnode *)vcb->allocationsRefNum);
937 for (i = 0; i < kHFSPlusExtentDensity; i++) {
938 forkp->ff_extents[i].startBlock =
939 SWAP_BE32 (vhp->allocationFile.extents[i].startBlock);
940 forkp->ff_extents[i].blockCount =
941 SWAP_BE32 (vhp->allocationFile.extents[i].blockCount);
942 }
943 forkp->ff_size = SWAP_BE64 (vhp->allocationFile.logicalSize);
944 forkp->ff_blocks = SWAP_BE32 (vhp->allocationFile.totalBlocks);
945 forkp->ff_clumpsize = SWAP_BE32 (vhp->allocationFile.clumpSize);
946
947 buf_brelse(bp);
948 vhp = NULL;
949
950 /*
951 * Re-load B-tree header data
952 */
953 forkp = VTOF((struct vnode *)vcb->extentsRefNum);
954 if ( (error = MacToVFSError( BTReloadData((FCB*)forkp) )) )
955 return (error);
956
957 forkp = VTOF((struct vnode *)vcb->catalogRefNum);
958 if ( (error = MacToVFSError( BTReloadData((FCB*)forkp) )) )
959 return (error);
960
961 if (hfsmp->hfs_attribute_vp) {
962 forkp = VTOF(hfsmp->hfs_attribute_vp);
963 if ( (error = MacToVFSError( BTReloadData((FCB*)forkp) )) )
964 return (error);
965 }
966
967 /* Reload the volume name */
968 if ((error = cat_idlookup(hfsmp, kHFSRootFolderID, 0, 0, &cndesc, NULL, NULL)))
969 return (error);
970 vcb->volumeNameEncodingHint = cndesc.cd_encoding;
971 bcopy(cndesc.cd_nameptr, vcb->vcbVN, min(255, cndesc.cd_namelen));
972 cat_releasedesc(&cndesc);
973
974 /* Re-establish private/hidden directories. */
975 hfs_privatedir_init(hfsmp, FILE_HARDLINKS);
976 hfs_privatedir_init(hfsmp, DIR_HARDLINKS);
977
978 /* In case any volume information changed to trigger a notification */
979 hfs_generate_volume_notifications(hfsmp);
980
981 return (0);
982 }
983
984 __unused
985 static uint64_t tv_to_usecs(struct timeval *tv)
986 {
987 return tv->tv_sec * 1000000ULL + tv->tv_usec;
988 }
989
990 // Returns TRUE if b - a >= usecs
991 static bool hfs_has_elapsed (const struct timeval *a,
992 const struct timeval *b,
993 uint64_t usecs)
994 {
995 struct timeval diff;
996 timersub(b, a, &diff);
997 return diff.tv_sec * 1000000ULL + diff.tv_usec >= usecs;
998 }
999
1000 void hfs_syncer(void *arg, __unused wait_result_t wr)
1001 {
1002 struct hfsmount *hfsmp = arg;
1003 struct timeval now;
1004
1005 KDBG(HFSDBG_SYNCER | DBG_FUNC_START, obfuscate_addr(hfsmp));
1006
1007 hfs_syncer_lock(hfsmp);
1008
1009 while (ISSET(hfsmp->hfs_flags, HFS_RUN_SYNCER)
1010 && timerisset(&hfsmp->hfs_sync_req_oldest)) {
1011
1012 hfs_syncer_wait(hfsmp, &HFS_META_DELAY_TS);
1013
1014 if (!ISSET(hfsmp->hfs_flags, HFS_RUN_SYNCER)
1015 || !timerisset(&hfsmp->hfs_sync_req_oldest)) {
1016 break;
1017 }
1018
1019 /* Check to see whether we should flush now: either the oldest
1020 is > HFS_MAX_META_DELAY or HFS_META_DELAY has elapsed since
1021 the request and there are no pending writes. */
1022
1023 microuptime(&now);
1024 uint64_t idle_time = vfs_idle_time(hfsmp->hfs_mp);
1025
1026 if (!hfs_has_elapsed(&hfsmp->hfs_sync_req_oldest, &now,
1027 HFS_MAX_META_DELAY)
1028 && idle_time < HFS_META_DELAY) {
1029 continue;
1030 }
1031
1032 timerclear(&hfsmp->hfs_sync_req_oldest);
1033
1034 hfs_syncer_unlock(hfsmp);
1035
1036 KDBG(HFSDBG_SYNCER_TIMED | DBG_FUNC_START, obfuscate_addr(hfsmp));
1037
1038 /*
1039 * We intentionally do a synchronous flush (of the journal or entire volume) here.
1040 * For journaled volumes, this means we wait until the metadata blocks are written
1041 * to both the journal and their final locations (in the B-trees, etc.).
1042 *
1043 * This tends to avoid interleaving the metadata writes with other writes (for
1044 * example, user data, or to the journal when a later transaction notices that
1045 * an earlier transaction has finished its async writes, and then updates the
1046 * journal start in the journal header). Avoiding interleaving of writes is
1047 * very good for performance on simple flash devices like SD cards, thumb drives;
1048 * and on devices like floppies. Since removable devices tend to be this kind of
1049 * simple device, doing a synchronous flush actually improves performance in
1050 * practice.
1051 *
1052 * NOTE: For non-journaled volumes, the call to hfs_sync will also cause dirty
1053 * user data to be written.
1054 */
1055 if (hfsmp->jnl) {
1056 hfs_flush(hfsmp, HFS_FLUSH_JOURNAL_META);
1057 } else {
1058 hfs_sync(hfsmp->hfs_mp, MNT_WAIT, vfs_context_current());
1059 }
1060
1061 KDBG(HFSDBG_SYNCER_TIMED | DBG_FUNC_END);
1062
1063 hfs_syncer_lock(hfsmp);
1064 } // while (...)
1065
1066 hfsmp->hfs_syncer_thread = NULL;
1067 hfs_syncer_unlock(hfsmp);
1068 hfs_syncer_wakeup(hfsmp);
1069
1070 /* BE CAREFUL WHAT YOU ADD HERE: at this point hfs_unmount is free
1071 to continue and therefore hfsmp might be invalid. */
1072
1073 KDBG(HFSDBG_SYNCER | DBG_FUNC_END);
1074 }
1075
1076 /*
1077 * Call into the allocator code and perform a full scan of the bitmap file.
1078 *
1079 * This allows us to TRIM unallocated ranges if needed, and also to build up
1080 * an in-memory summary table of the state of the allocated blocks.
1081 */
1082 void hfs_scan_blocks (struct hfsmount *hfsmp) {
1083 /*
1084 * Take the allocation file lock. Journal transactions will block until
1085 * we're done here.
1086 */
1087
1088 int flags = hfs_systemfile_lock(hfsmp, SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
1089
1090 /*
1091 * We serialize here with the HFS mount lock as we're mounting.
1092 *
1093 * The mount can only proceed once this thread has acquired the bitmap
1094 * lock, since we absolutely do not want someone else racing in and
1095 * getting the bitmap lock, doing a read/write of the bitmap file,
1096 * then us getting the bitmap lock.
1097 *
1098 * To prevent this, the mount thread takes the HFS mount mutex, starts us
1099 * up, then immediately msleeps on the scan_var variable in the mount
1100 * point as a condition variable. This serialization is safe since
1101 * if we race in and try to proceed while they're still holding the lock,
1102 * we'll block trying to acquire the global lock. Since the mount thread
1103 * acquires the HFS mutex before starting this function in a new thread,
1104 * any lock acquisition on our part must be linearizably AFTER the mount thread's.
1105 *
1106 * Note that the HFS mount mutex is always taken last, and always for only
1107 * a short time. In this case, we just take it long enough to mark the
1108 * scan-in-flight bit.
1109 */
1110 (void) hfs_lock_mount (hfsmp);
1111 hfsmp->scan_var |= HFS_ALLOCATOR_SCAN_INFLIGHT;
1112 wakeup((caddr_t) &hfsmp->scan_var);
1113 hfs_unlock_mount (hfsmp);
1114
1115 /* Initialize the summary table */
1116 if (hfs_init_summary (hfsmp)) {
1117 printf("hfs: could not initialize summary table for %s\n", hfsmp->vcbVN);
1118 }
1119
1120 /*
1121 * ScanUnmapBlocks assumes that the bitmap lock is held when you
1122 * call the function. We don't care if there were any errors issuing unmaps.
1123 *
1124 * It will also attempt to build up the summary table for subsequent
1125 * allocator use, as configured.
1126 */
1127 (void) ScanUnmapBlocks(hfsmp);
1128
1129 (void) hfs_lock_mount (hfsmp);
1130 hfsmp->scan_var &= ~HFS_ALLOCATOR_SCAN_INFLIGHT;
1131 hfsmp->scan_var |= HFS_ALLOCATOR_SCAN_COMPLETED;
1132 wakeup((caddr_t) &hfsmp->scan_var);
1133 hfs_unlock_mount (hfsmp);
1134
1135 buf_invalidateblks(hfsmp->hfs_allocation_vp, 0, 0, 0);
1136
1137 hfs_systemfile_unlock(hfsmp, flags);
1138
1139 }
1140
1141 /*
1142 * Common code for mount and mountroot
1143 */
1144 int
1145 hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args,
1146 int journal_replay_only, vfs_context_t context)
1147 {
1148 struct proc *p = vfs_context_proc(context);
1149 int retval = E_NONE;
1150 struct hfsmount *hfsmp = NULL;
1151 struct buf *bp;
1152 dev_t dev;
1153 HFSMasterDirectoryBlock *mdbp = NULL;
1154 int ronly;
1155 #if QUOTA
1156 int i;
1157 #endif
1158 int mntwrapper;
1159 kauth_cred_t cred;
1160 u_int64_t disksize;
1161 daddr64_t log_blkcnt;
1162 u_int32_t log_blksize;
1163 u_int32_t phys_blksize;
1164 u_int32_t minblksize;
1165 u_int32_t iswritable;
1166 daddr64_t mdb_offset;
1167 int isvirtual = 0;
1168 int isroot = !journal_replay_only && args == NULL;
1169 u_int32_t device_features = 0;
1170 int isssd;
1171
1172 ronly = mp && vfs_isrdonly(mp);
1173 dev = vnode_specrdev(devvp);
1174 cred = p ? vfs_context_ucred(context) : NOCRED;
1175 mntwrapper = 0;
1176
1177 bp = NULL;
1178 hfsmp = NULL;
1179 mdbp = NULL;
1180 minblksize = kHFSBlockSize;
1181
1182 /* Advisory locking should be handled at the VFS layer */
1183 if (mp)
1184 vfs_setlocklocal(mp);
1185
1186 /* Get the logical block size (treated as physical block size everywhere) */
1187 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, (caddr_t)&log_blksize, 0, context)) {
1188 if (HFS_MOUNT_DEBUG) {
1189 printf("hfs_mountfs: DKIOCGETBLOCKSIZE failed\n");
1190 }
1191 retval = ENXIO;
1192 goto error_exit;
1193 }
1194 if (log_blksize == 0 || log_blksize > 1024*1024*1024) {
1195 printf("hfs: logical block size 0x%x looks bad. Not mounting.\n", log_blksize);
1196 retval = ENXIO;
1197 goto error_exit;
1198 }
1199
1200 /* Get the physical block size. */
1201 retval = VNOP_IOCTL(devvp, DKIOCGETPHYSICALBLOCKSIZE, (caddr_t)&phys_blksize, 0, context);
1202 if (retval) {
1203 if ((retval != ENOTSUP) && (retval != ENOTTY)) {
1204 if (HFS_MOUNT_DEBUG) {
1205 printf("hfs_mountfs: DKIOCGETPHYSICALBLOCKSIZE failed\n");
1206 }
1207 retval = ENXIO;
1208 goto error_exit;
1209 }
1210 /* If device does not support this ioctl, assume that physical
1211 * block size is same as logical block size
1212 */
1213 phys_blksize = log_blksize;
1214 }
1215 if (phys_blksize == 0 || phys_blksize > MAXBSIZE) {
1216 printf("hfs: physical block size 0x%x looks bad. Not mounting.\n", phys_blksize);
1217 retval = ENXIO;
1218 goto error_exit;
1219 }
1220
1221 if (phys_blksize < log_blksize) {
1222 /*
1223 * In the off chance that the phys_blksize is SMALLER than the logical
1224 * then don't let that happen. Pretend that the PHYSICALBLOCKSIZE
1225 * ioctl was not supported.
1226 */
1227 phys_blksize = log_blksize;
1228 }
1229
1230
1231 /* Switch to 512 byte sectors (temporarily) */
1232 if (log_blksize > 512) {
1233 u_int32_t size512 = 512;
1234
1235 if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&size512, FWRITE, context)) {
1236 if (HFS_MOUNT_DEBUG) {
1237 printf("hfs_mountfs: DKIOCSETBLOCKSIZE failed \n");
1238 }
1239 retval = ENXIO;
1240 goto error_exit;
1241 }
1242 }
1243 /* Get the number of 512 byte physical blocks. */
1244 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
1245 /* resetting block size may fail if getting block count did */
1246 (void)VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context);
1247 if (HFS_MOUNT_DEBUG) {
1248 printf("hfs_mountfs: DKIOCGETBLOCKCOUNT failed\n");
1249 }
1250 retval = ENXIO;
1251 goto error_exit;
1252 }
1253 /* Compute an accurate disk size (i.e. within 512 bytes) */
1254 disksize = (u_int64_t)log_blkcnt * (u_int64_t)512;
1255
1256 /*
1257 * On Tiger it is not necessary to switch the device
1258 * block size to be 4k if there are more than 31-bits
1259 * worth of blocks but to insure compatibility with
1260 * pre-Tiger systems we have to do it.
1261 *
1262 * If the device size is not a multiple of 4K (8 * 512), then
1263 * switching the logical block size isn't going to help because
1264 * we will be unable to write the alternate volume header.
1265 * In this case, just leave the logical block size unchanged.
1266 */
1267 if (log_blkcnt > 0x000000007fffffff && (log_blkcnt & 7) == 0) {
1268 minblksize = log_blksize = 4096;
1269 if (phys_blksize < log_blksize)
1270 phys_blksize = log_blksize;
1271 }
1272
1273 /*
1274 * The cluster layer is not currently prepared to deal with a logical
1275 * block size larger than the system's page size. (It can handle
1276 * blocks per page, but not multiple pages per block.) So limit the
1277 * logical block size to the page size.
1278 */
1279 if (log_blksize > PAGE_SIZE) {
1280 log_blksize = PAGE_SIZE;
1281 }
1282
1283 /* Now switch to our preferred physical block size. */
1284 if (log_blksize > 512) {
1285 if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) {
1286 if (HFS_MOUNT_DEBUG) {
1287 printf("hfs_mountfs: DKIOCSETBLOCKSIZE (2) failed\n");
1288 }
1289 retval = ENXIO;
1290 goto error_exit;
1291 }
1292 /* Get the count of physical blocks. */
1293 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
1294 if (HFS_MOUNT_DEBUG) {
1295 printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (2) failed\n");
1296 }
1297 retval = ENXIO;
1298 goto error_exit;
1299 }
1300 }
1301
1302 /*
1303 * At this point:
1304 * minblksize is the minimum physical block size
1305 * log_blksize has our preferred physical block size
1306 * log_blkcnt has the total number of physical blocks
1307 */
1308
1309 mdb_offset = (daddr64_t)HFS_PRI_SECTOR(log_blksize);
1310
1311 if ((retval = (int)buf_meta_bread(devvp,
1312 HFS_PHYSBLK_ROUNDDOWN(mdb_offset, (phys_blksize/log_blksize)),
1313 phys_blksize, cred, &bp))) {
1314 if (HFS_MOUNT_DEBUG) {
1315 printf("hfs_mountfs: buf_meta_bread failed with %d\n", retval);
1316 }
1317 goto error_exit;
1318 }
1319 mdbp = hfs_malloc(kMDBSize);
1320 bcopy((char *)buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize), mdbp, kMDBSize);
1321 buf_brelse(bp);
1322 bp = NULL;
1323
1324 hfsmp = hfs_mallocz(sizeof(struct hfsmount));
1325
1326 hfs_chashinit_finish(hfsmp);
1327
1328 /* Init the ID lookup hashtable */
1329 hfs_idhash_init (hfsmp);
1330
1331 /*
1332 * See if the disk supports unmap (trim).
1333 *
1334 * NOTE: vfs_init_io_attributes has not been called yet, so we can't use the io_flags field
1335 * returned by vfs_ioattr. We need to call VNOP_IOCTL ourselves.
1336 */
1337 if (VNOP_IOCTL(devvp, DKIOCGETFEATURES, (caddr_t)&device_features, 0, context) == 0) {
1338 if (device_features & DK_FEATURE_UNMAP) {
1339 hfsmp->hfs_flags |= HFS_UNMAP;
1340 }
1341
1342 if(device_features & DK_FEATURE_BARRIER)
1343 hfsmp->hfs_flags |= HFS_FEATURE_BARRIER;
1344 }
1345
1346 /*
1347 * See if the disk is a solid state device, too. We need this to decide what to do about
1348 * hotfiles.
1349 */
1350 if (VNOP_IOCTL(devvp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, context) == 0) {
1351 if (isssd) {
1352 hfsmp->hfs_flags |= HFS_SSD;
1353 }
1354 }
1355
1356 /* See if the underlying device is Core Storage or not */
1357 dk_corestorage_info_t cs_info;
1358 memset(&cs_info, 0, sizeof(dk_corestorage_info_t));
1359 if (VNOP_IOCTL(devvp, DKIOCCORESTORAGE, (caddr_t)&cs_info, 0, context) == 0) {
1360 hfsmp->hfs_flags |= HFS_CS;
1361 if (isroot && (cs_info.flags & DK_CORESTORAGE_PIN_YOUR_METADATA)) {
1362 hfsmp->hfs_flags |= HFS_CS_METADATA_PIN;
1363 }
1364 if (isroot && (cs_info.flags & DK_CORESTORAGE_ENABLE_HOTFILES)) {
1365 hfsmp->hfs_flags |= HFS_CS_HOTFILE_PIN;
1366 hfsmp->hfs_cs_hotfile_size = cs_info.hotfile_size;
1367 }
1368 if ((cs_info.flags & DK_CORESTORAGE_PIN_YOUR_SWAPFILE)) {
1369 hfsmp->hfs_flags |= HFS_CS_SWAPFILE_PIN;
1370
1371 struct vfsioattr ioattr;
1372 vfs_ioattr(mp, &ioattr);
1373 ioattr.io_flags |= VFS_IOATTR_FLAGS_SWAPPIN_SUPPORTED;
1374 ioattr.io_max_swappin_available = cs_info.swapfile_pinning;
1375 vfs_setioattr(mp, &ioattr);
1376 }
1377 }
1378
1379 /*
1380 * Init the volume information structure
1381 */
1382
1383 lck_mtx_init(&hfsmp->hfs_mutex, hfs_mutex_group, hfs_lock_attr);
1384 lck_mtx_init(&hfsmp->hfc_mutex, hfs_mutex_group, hfs_lock_attr);
1385 lck_rw_init(&hfsmp->hfs_global_lock, hfs_rwlock_group, hfs_lock_attr);
1386 lck_spin_init(&hfsmp->vcbFreeExtLock, hfs_spinlock_group, hfs_lock_attr);
1387 #if NEW_XATTR
1388 lck_spin_init(&hfsmp->hfs_xattr_io.lock, hfs_spinlock_group, hfs_lock_attr);
1389 #endif
1390
1391 if (mp)
1392 vfs_setfsprivate(mp, hfsmp);
1393 hfsmp->hfs_mp = mp; /* Make VFSTOHFS work */
1394 hfsmp->hfs_raw_dev = vnode_specrdev(devvp);
1395 hfsmp->hfs_devvp = devvp;
1396 vnode_ref(devvp); /* Hold a ref on the device, dropped when hfsmp is freed. */
1397 hfsmp->hfs_logical_block_size = log_blksize;
1398 hfsmp->hfs_logical_block_count = log_blkcnt;
1399 hfsmp->hfs_logical_bytes = (uint64_t) log_blksize * (uint64_t) log_blkcnt;
1400 hfsmp->hfs_physical_block_size = phys_blksize;
1401 hfsmp->hfs_log_per_phys = (phys_blksize / log_blksize);
1402 hfsmp->hfs_flags |= HFS_WRITEABLE_MEDIA;
1403 if (ronly)
1404 hfsmp->hfs_flags |= HFS_READ_ONLY;
1405 if (mp && ((unsigned int)vfs_flags(mp)) & MNT_UNKNOWNPERMISSIONS)
1406 hfsmp->hfs_flags |= HFS_UNKNOWN_PERMS;
1407
1408 #if QUOTA
1409 for (i = 0; i < MAXQUOTAS; i++)
1410 dqfileinit(&hfsmp->hfs_qfiles[i]);
1411 #endif
1412
1413 if (args) {
1414 hfsmp->hfs_uid = (args->hfs_uid == (uid_t)VNOVAL) ? UNKNOWNUID : args->hfs_uid;
1415 if (hfsmp->hfs_uid == 0xfffffffd) hfsmp->hfs_uid = UNKNOWNUID;
1416 hfsmp->hfs_gid = (args->hfs_gid == (gid_t)VNOVAL) ? UNKNOWNGID : args->hfs_gid;
1417 if (hfsmp->hfs_gid == 0xfffffffd) hfsmp->hfs_gid = UNKNOWNGID;
1418 vfs_setowner(mp, hfsmp->hfs_uid, hfsmp->hfs_gid); /* tell the VFS */
1419 if (args->hfs_mask != (mode_t)VNOVAL) {
1420 hfsmp->hfs_dir_mask = args->hfs_mask & ALLPERMS;
1421 if (args->flags & HFSFSMNT_NOXONFILES) {
1422 hfsmp->hfs_file_mask = (args->hfs_mask & DEFFILEMODE);
1423 } else {
1424 hfsmp->hfs_file_mask = args->hfs_mask & ALLPERMS;
1425 }
1426 } else {
1427 hfsmp->hfs_dir_mask = UNKNOWNPERMISSIONS & ALLPERMS; /* 0777: rwx---rwx */
1428 hfsmp->hfs_file_mask = UNKNOWNPERMISSIONS & DEFFILEMODE; /* 0666: no --x by default? */
1429 }
1430 if ((args->flags != (int)VNOVAL) && (args->flags & HFSFSMNT_WRAPPER))
1431 mntwrapper = 1;
1432 } else {
1433 /* Even w/o explicit mount arguments, MNT_UNKNOWNPERMISSIONS requires setting up uid, gid, and mask: */
1434 if (mp && ((unsigned int)vfs_flags(mp)) & MNT_UNKNOWNPERMISSIONS) {
1435 hfsmp->hfs_uid = UNKNOWNUID;
1436 hfsmp->hfs_gid = UNKNOWNGID;
1437 vfs_setowner(mp, hfsmp->hfs_uid, hfsmp->hfs_gid); /* tell the VFS */
1438 hfsmp->hfs_dir_mask = UNKNOWNPERMISSIONS & ALLPERMS; /* 0777: rwx---rwx */
1439 hfsmp->hfs_file_mask = UNKNOWNPERMISSIONS & DEFFILEMODE; /* 0666: no --x by default? */
1440 }
1441 }
1442
1443 /* Find out if disk media is writable. */
1444 if (VNOP_IOCTL(devvp, DKIOCISWRITABLE, (caddr_t)&iswritable, 0, context) == 0) {
1445 if (iswritable)
1446 hfsmp->hfs_flags |= HFS_WRITEABLE_MEDIA;
1447 else
1448 hfsmp->hfs_flags &= ~HFS_WRITEABLE_MEDIA;
1449 }
1450
1451 // Reservations
1452 rl_init(&hfsmp->hfs_reserved_ranges[0]);
1453 rl_init(&hfsmp->hfs_reserved_ranges[1]);
1454
1455 // record the current time at which we're mounting this volume
1456 struct timeval tv;
1457 microtime(&tv);
1458 hfsmp->hfs_mount_time = tv.tv_sec;
1459
1460 /* Mount a standard HFS disk */
1461 if ((SWAP_BE16(mdbp->drSigWord) == kHFSSigWord) &&
1462 (mntwrapper || (SWAP_BE16(mdbp->drEmbedSigWord) != kHFSPlusSigWord))) {
1463 #if CONFIG_HFS_STD
1464 /* If only journal replay is requested, exit immediately */
1465 if (journal_replay_only) {
1466 retval = 0;
1467 goto error_exit;
1468 }
1469
1470 /* On 10.6 and beyond, non read-only mounts for HFS standard vols get rejected */
1471 if (vfs_isrdwr(mp)) {
1472 retval = EROFS;
1473 goto error_exit;
1474 }
1475
1476 printf("hfs_mountfs: Mounting HFS Standard volumes was deprecated in Mac OS 10.7 \n");
1477
1478 /* Treat it as if it's read-only and not writeable */
1479 hfsmp->hfs_flags |= HFS_READ_ONLY;
1480 hfsmp->hfs_flags &= ~HFS_WRITEABLE_MEDIA;
1481
1482 if ((vfs_flags(mp) & MNT_ROOTFS)) {
1483 retval = EINVAL; /* Cannot root from HFS standard disks */
1484 goto error_exit;
1485 }
1486 /* HFS disks can only use 512 byte physical blocks */
1487 if (log_blksize > kHFSBlockSize) {
1488 log_blksize = kHFSBlockSize;
1489 if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) {
1490 retval = ENXIO;
1491 goto error_exit;
1492 }
1493 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
1494 retval = ENXIO;
1495 goto error_exit;
1496 }
1497 hfsmp->hfs_logical_block_size = log_blksize;
1498 hfsmp->hfs_logical_block_count = log_blkcnt;
1499 hfsmp->hfs_logical_bytes = (uint64_t) log_blksize * (uint64_t) log_blkcnt;
1500 hfsmp->hfs_physical_block_size = log_blksize;
1501 hfsmp->hfs_log_per_phys = 1;
1502 }
1503 if (args) {
1504 hfsmp->hfs_encoding = args->hfs_encoding;
1505 HFSTOVCB(hfsmp)->volumeNameEncodingHint = args->hfs_encoding;
1506
1507 /* establish the timezone */
1508 gTimeZone = args->hfs_timezone;
1509 }
1510
1511 retval = hfs_getconverter(hfsmp->hfs_encoding, &hfsmp->hfs_get_unicode,
1512 &hfsmp->hfs_get_hfsname);
1513 if (retval)
1514 goto error_exit;
1515
1516 retval = hfs_MountHFSVolume(hfsmp, mdbp, p);
1517 if (retval)
1518 (void) hfs_relconverter(hfsmp->hfs_encoding);
1519 #else
1520 /* On platforms where HFS Standard is not supported, deny the mount altogether */
1521 retval = EINVAL;
1522 goto error_exit;
1523 #endif
1524
1525 }
1526 else { /* Mount an HFS Plus disk */
1527 HFSPlusVolumeHeader *vhp;
1528 off_t embeddedOffset;
1529 int jnl_disable = 0;
1530
1531 /* Get the embedded Volume Header */
1532 if (SWAP_BE16(mdbp->drEmbedSigWord) == kHFSPlusSigWord) {
1533 embeddedOffset = SWAP_BE16(mdbp->drAlBlSt) * kHFSBlockSize;
1534 embeddedOffset += (u_int64_t)SWAP_BE16(mdbp->drEmbedExtent.startBlock) *
1535 (u_int64_t)SWAP_BE32(mdbp->drAlBlkSiz);
1536
1537 /*
1538 * Cooperative Fusion is not allowed on embedded HFS+
1539 * filesystems (HFS+ inside HFS standard wrapper)
1540 */
1541 hfsmp->hfs_flags &= ~HFS_CS_METADATA_PIN;
1542
1543 /*
1544 * If the embedded volume doesn't start on a block
1545 * boundary, then switch the device to a 512-byte
1546 * block size so everything will line up on a block
1547 * boundary.
1548 */
1549 if ((embeddedOffset % log_blksize) != 0) {
1550 printf("hfs_mountfs: embedded volume offset not"
1551 " a multiple of physical block size (%d);"
1552 " switching to 512\n", log_blksize);
1553 log_blksize = 512;
1554 if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE,
1555 (caddr_t)&log_blksize, FWRITE, context)) {
1556
1557 if (HFS_MOUNT_DEBUG) {
1558 printf("hfs_mountfs: DKIOCSETBLOCKSIZE (3) failed\n");
1559 }
1560 retval = ENXIO;
1561 goto error_exit;
1562 }
1563 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT,
1564 (caddr_t)&log_blkcnt, 0, context)) {
1565 if (HFS_MOUNT_DEBUG) {
1566 printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (3) failed\n");
1567 }
1568 retval = ENXIO;
1569 goto error_exit;
1570 }
1571 /* Note: relative block count adjustment */
1572 hfsmp->hfs_logical_block_count *=
1573 hfsmp->hfs_logical_block_size / log_blksize;
1574
1575 /* Update logical /physical block size */
1576 hfsmp->hfs_logical_block_size = log_blksize;
1577 hfsmp->hfs_physical_block_size = log_blksize;
1578
1579 phys_blksize = log_blksize;
1580 hfsmp->hfs_log_per_phys = 1;
1581 }
1582
1583 disksize = (u_int64_t)SWAP_BE16(mdbp->drEmbedExtent.blockCount) *
1584 (u_int64_t)SWAP_BE32(mdbp->drAlBlkSiz);
1585
1586 hfsmp->hfs_logical_block_count = disksize / log_blksize;
1587
1588 hfsmp->hfs_logical_bytes = (uint64_t) hfsmp->hfs_logical_block_count * (uint64_t) hfsmp->hfs_logical_block_size;
1589
1590 mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize));
1591
1592 if (bp) {
1593 buf_markinvalid(bp);
1594 buf_brelse(bp);
1595 bp = NULL;
1596 }
1597 retval = (int)buf_meta_bread(devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys),
1598 phys_blksize, cred, &bp);
1599 if (retval) {
1600 if (HFS_MOUNT_DEBUG) {
1601 printf("hfs_mountfs: buf_meta_bread (2) failed with %d\n", retval);
1602 }
1603 goto error_exit;
1604 }
1605 bcopy((char *)buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize), mdbp, 512);
1606 buf_brelse(bp);
1607 bp = NULL;
1608 vhp = (HFSPlusVolumeHeader*) mdbp;
1609
1610 }
1611 else { /* pure HFS+ */
1612 embeddedOffset = 0;
1613 vhp = (HFSPlusVolumeHeader*) mdbp;
1614 }
1615
1616 retval = hfs_ValidateHFSPlusVolumeHeader(hfsmp, vhp);
1617 if (retval)
1618 goto error_exit;
1619
1620 /*
1621 * If allocation block size is less than the physical block size,
1622 * invalidate the buffer read in using native physical block size
1623 * to ensure data consistency.
1624 *
1625 * HFS Plus reserves one allocation block for the Volume Header.
1626 * If the physical size is larger, then when we read the volume header,
1627 * we will also end up reading in the next allocation block(s).
1628 * If those other allocation block(s) is/are modified, and then the volume
1629 * header is modified, the write of the volume header's buffer will write
1630 * out the old contents of the other allocation blocks.
1631 *
1632 * We assume that the physical block size is same as logical block size.
1633 * The physical block size value is used to round down the offsets for
1634 * reading and writing the primary and alternate volume headers.
1635 *
1636 * The same logic is also in hfs_MountHFSPlusVolume to ensure that
1637 * hfs_mountfs, hfs_MountHFSPlusVolume and later are doing the I/Os
1638 * using same block size.
1639 */
1640 if (SWAP_BE32(vhp->blockSize) < hfsmp->hfs_physical_block_size) {
1641 phys_blksize = hfsmp->hfs_logical_block_size;
1642 hfsmp->hfs_physical_block_size = hfsmp->hfs_logical_block_size;
1643 hfsmp->hfs_log_per_phys = 1;
1644 // There should be one bp associated with devvp in buffer cache.
1645 retval = buf_invalidateblks(devvp, 0, 0, 0);
1646 if (retval)
1647 goto error_exit;
1648 }
1649
1650 if (isroot && ((SWAP_BE32(vhp->attributes) & kHFSVolumeUnmountedMask) != 0)) {
1651 vfs_set_root_unmounted_cleanly();
1652 }
1653
1654 /*
1655 * On inconsistent disks, do not allow read-write mount
1656 * unless it is the boot volume being mounted. We also
1657 * always want to replay the journal if the journal_replay_only
1658 * flag is set because that will (most likely) get the
1659 * disk into a consistent state before fsck_hfs starts
1660 * looking at it.
1661 */
1662 if (!journal_replay_only
1663 && !(vfs_flags(mp) & MNT_ROOTFS)
1664 && (SWAP_BE32(vhp->attributes) & kHFSVolumeInconsistentMask)
1665 && !(hfsmp->hfs_flags & HFS_READ_ONLY)) {
1666
1667 if (HFS_MOUNT_DEBUG) {
1668 printf("hfs_mountfs: failed to mount non-root inconsistent disk\n");
1669 }
1670 retval = EINVAL;
1671 goto error_exit;
1672 }
1673
1674
1675 // XXXdbg
1676 //
1677 hfsmp->jnl = NULL;
1678 hfsmp->jvp = NULL;
1679 if (args != NULL && (args->flags & HFSFSMNT_EXTENDED_ARGS) &&
1680 args->journal_disable) {
1681 jnl_disable = 1;
1682 }
1683
1684 //
1685 // We only initialize the journal here if the last person
1686 // to mount this volume was journaling aware. Otherwise
1687 // we delay journal initialization until later at the end
1688 // of hfs_MountHFSPlusVolume() because the last person who
1689 // mounted it could have messed things up behind our back
1690 // (so we need to go find the .journal file, make sure it's
1691 // the right size, re-sync up if it was moved, etc).
1692 //
1693 if ( (SWAP_BE32(vhp->lastMountedVersion) == kHFSJMountVersion)
1694 && (SWAP_BE32(vhp->attributes) & kHFSVolumeJournaledMask)
1695 && !jnl_disable) {
1696
1697 // if we're able to init the journal, mark the mount
1698 // point as journaled.
1699 //
1700 if ((retval = hfs_early_journal_init(hfsmp, vhp, args, embeddedOffset, mdb_offset, mdbp, cred)) == 0) {
1701 if (mp)
1702 vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
1703 } else {
1704 if (retval == EROFS) {
1705 // EROFS is a special error code that means the volume has an external
1706 // journal which we couldn't find. in that case we do not want to
1707 // rewrite the volume header - we'll just refuse to mount the volume.
1708 if (HFS_MOUNT_DEBUG) {
1709 printf("hfs_mountfs: hfs_early_journal_init indicated external jnl \n");
1710 }
1711 retval = EINVAL;
1712 goto error_exit;
1713 }
1714
1715 // if the journal failed to open, then set the lastMountedVersion
1716 // to be "FSK!" which fsck_hfs will see and force the fsck instead
1717 // of just bailing out because the volume is journaled.
1718 if (!ronly) {
1719 if (HFS_MOUNT_DEBUG) {
1720 printf("hfs_mountfs: hfs_early_journal_init failed, setting to FSK \n");
1721 }
1722
1723 HFSPlusVolumeHeader *jvhp;
1724
1725 hfsmp->hfs_flags |= HFS_NEED_JNL_RESET;
1726
1727 if (mdb_offset == 0) {
1728 mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize));
1729 }
1730
1731 bp = NULL;
1732 retval = (int)buf_meta_bread(devvp,
1733 HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys),
1734 phys_blksize, cred, &bp);
1735 if (retval == 0) {
1736 jvhp = (HFSPlusVolumeHeader *)(buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize));
1737
1738 if (SWAP_BE16(jvhp->signature) == kHFSPlusSigWord || SWAP_BE16(jvhp->signature) == kHFSXSigWord) {
1739 printf ("hfs(1): Journal replay fail. Writing lastMountVersion as FSK!\n");
1740 jvhp->lastMountedVersion = SWAP_BE32(kFSKMountVersion);
1741 buf_bwrite(bp);
1742 } else {
1743 buf_brelse(bp);
1744 }
1745 bp = NULL;
1746 } else if (bp) {
1747 buf_brelse(bp);
1748 // clear this so the error exit path won't try to use it
1749 bp = NULL;
1750 }
1751 }
1752
1753 // if this isn't the root device just bail out.
1754 // If it is the root device we just continue on
1755 // in the hopes that fsck_hfs will be able to
1756 // fix any damage that exists on the volume.
1757 if (mp && !(vfs_flags(mp) & MNT_ROOTFS)) {
1758 if (HFS_MOUNT_DEBUG) {
1759 printf("hfs_mountfs: hfs_early_journal_init failed, erroring out \n");
1760 }
1761 retval = EINVAL;
1762 goto error_exit;
1763 }
1764 }
1765 }
1766
1767 /* Either the journal is replayed successfully, or there
1768 * was nothing to replay, or no journal exists. In any case,
1769 * return success.
1770 */
1771 if (journal_replay_only) {
1772 retval = 0;
1773 goto error_exit;
1774 }
1775
1776 #if CONFIG_HFS_STD
1777 (void) hfs_getconverter(0, &hfsmp->hfs_get_unicode, &hfsmp->hfs_get_hfsname);
1778 #endif
1779
1780 retval = hfs_MountHFSPlusVolume(hfsmp, vhp, embeddedOffset, disksize, p, args, cred);
1781 /*
1782 * If the backend didn't like our physical blocksize
1783 * then retry with physical blocksize of 512.
1784 */
1785 if ((retval == ENXIO) && (log_blksize > 512) && (log_blksize != minblksize)) {
1786 printf("hfs_mountfs: could not use physical block size "
1787 "(%d) switching to 512\n", log_blksize);
1788 log_blksize = 512;
1789 if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) {
1790 if (HFS_MOUNT_DEBUG) {
1791 printf("hfs_mountfs: DKIOCSETBLOCKSIZE (4) failed \n");
1792 }
1793 retval = ENXIO;
1794 goto error_exit;
1795 }
1796 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
1797 if (HFS_MOUNT_DEBUG) {
1798 printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (4) failed \n");
1799 }
1800 retval = ENXIO;
1801 goto error_exit;
1802 }
1803 set_fsblocksize(devvp);
1804 /* Note: relative block count adjustment (in case this is an embedded volume). */
1805 hfsmp->hfs_logical_block_count *= hfsmp->hfs_logical_block_size / log_blksize;
1806 hfsmp->hfs_logical_block_size = log_blksize;
1807 hfsmp->hfs_log_per_phys = hfsmp->hfs_physical_block_size / log_blksize;
1808
1809 hfsmp->hfs_logical_bytes = (uint64_t) hfsmp->hfs_logical_block_count * (uint64_t) hfsmp->hfs_logical_block_size;
1810
1811 if (hfsmp->jnl && hfsmp->jvp == devvp) {
1812 // close and re-open this with the new block size
1813 journal_close(hfsmp->jnl);
1814 hfsmp->jnl = NULL;
1815 if (hfs_early_journal_init(hfsmp, vhp, args, embeddedOffset, mdb_offset, mdbp, cred) == 0) {
1816 vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
1817 } else {
1818 // if the journal failed to open, then set the lastMountedVersion
1819 // to be "FSK!" which fsck_hfs will see and force the fsck instead
1820 // of just bailing out because the volume is journaled.
1821 if (!ronly) {
1822 if (HFS_MOUNT_DEBUG) {
1823 printf("hfs_mountfs: hfs_early_journal_init (2) resetting.. \n");
1824 }
1825 HFSPlusVolumeHeader *jvhp;
1826
1827 hfsmp->hfs_flags |= HFS_NEED_JNL_RESET;
1828
1829 if (mdb_offset == 0) {
1830 mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize));
1831 }
1832
1833 bp = NULL;
1834 retval = (int)buf_meta_bread(devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys),
1835 phys_blksize, cred, &bp);
1836 if (retval == 0) {
1837 jvhp = (HFSPlusVolumeHeader *)(buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize));
1838
1839 if (SWAP_BE16(jvhp->signature) == kHFSPlusSigWord || SWAP_BE16(jvhp->signature) == kHFSXSigWord) {
1840 printf ("hfs(2): Journal replay fail. Writing lastMountVersion as FSK!\n");
1841 jvhp->lastMountedVersion = SWAP_BE32(kFSKMountVersion);
1842 buf_bwrite(bp);
1843 } else {
1844 buf_brelse(bp);
1845 }
1846 bp = NULL;
1847 } else if (bp) {
1848 buf_brelse(bp);
1849 // clear this so the error exit path won't try to use it
1850 bp = NULL;
1851 }
1852 }
1853
1854 // if this isn't the root device just bail out.
1855 // If it is the root device we just continue on
1856 // in the hopes that fsck_hfs will be able to
1857 // fix any damage that exists on the volume.
1858 if ( !(vfs_flags(mp) & MNT_ROOTFS)) {
1859 if (HFS_MOUNT_DEBUG) {
1860 printf("hfs_mountfs: hfs_early_journal_init (2) failed \n");
1861 }
1862 retval = EINVAL;
1863 goto error_exit;
1864 }
1865 }
1866 }
1867
1868 /* Try again with a smaller block size... */
1869 retval = hfs_MountHFSPlusVolume(hfsmp, vhp, embeddedOffset, disksize, p, args, cred);
1870 if (retval && HFS_MOUNT_DEBUG) {
1871 printf("hfs_MountHFSPlusVolume (late) returned %d\n",retval);
1872 }
1873 }
1874 #if CONFIG_HFS_STD
1875 if (retval)
1876 (void) hfs_relconverter(0);
1877 #endif
1878 }
1879
1880 // save off a snapshot of the mtime from the previous mount
1881 // (for matador).
1882 hfsmp->hfs_last_mounted_mtime = hfsmp->hfs_mtime;
1883
1884 if ( retval ) {
1885 if (HFS_MOUNT_DEBUG) {
1886 printf("hfs_mountfs: encountered failure %d \n", retval);
1887 }
1888 goto error_exit;
1889 }
1890
1891 struct vfsstatfs *vsfs = vfs_statfs(mp);
1892 vsfs->f_fsid.val[0] = dev;
1893 vsfs->f_fsid.val[1] = vfs_typenum(mp);
1894
1895 vfs_setmaxsymlen(mp, 0);
1896
1897 #if CONFIG_HFS_STD
1898 if (ISSET(hfsmp->hfs_flags, HFS_STANDARD)) {
1899 /* HFS standard doesn't support extended readdir! */
1900 mount_set_noreaddirext (mp);
1901 }
1902 #endif
1903
1904 if (args) {
1905 /*
1906 * Set the free space warning levels for a non-root volume:
1907 *
1908 * Set the "danger" limit to 1% of the volume size or 150MB, whichever is less.
1909 * Set the "warning" limit to 2% of the volume size or 500MB, whichever is less.
1910 * Set the "near warning" limit to 10% of the volume size or 1GB, whichever is less.
1911 * And last, set the "desired" freespace level to to 12% of the volume size or 1.2GB,
1912 * whichever is less.
1913 */
1914 hfsmp->hfs_freespace_notify_dangerlimit =
1915 MIN(HFS_VERYLOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize,
1916 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_VERYLOWDISKTRIGGERFRACTION);
1917 hfsmp->hfs_freespace_notify_warninglimit =
1918 MIN(HFS_LOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize,
1919 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_LOWDISKTRIGGERFRACTION);
1920 hfsmp->hfs_freespace_notify_nearwarninglimit =
1921 MIN(HFS_NEARLOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize,
1922 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_NEARLOWDISKTRIGGERFRACTION);
1923 hfsmp->hfs_freespace_notify_desiredlevel =
1924 MIN(HFS_LOWDISKSHUTOFFLEVEL / HFSTOVCB(hfsmp)->blockSize,
1925 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_LOWDISKSHUTOFFFRACTION);
1926 } else {
1927 /*
1928 * Set the free space warning levels for the root volume:
1929 *
1930 * Set the "danger" limit to 5% of the volume size or 512MB, whichever is less.
1931 * Set the "warning" limit to 10% of the volume size or 1GB, whichever is less.
1932 * Set the "near warning" limit to 10.5% of the volume size or 1.1GB, whichever is less.
1933 * And last, set the "desired" freespace level to to 11% of the volume size or 1.25GB,
1934 * whichever is less.
1935 *
1936 * NOTE: While those are the default limits, KernelEventAgent (as of 3/2016)
1937 * will unilaterally override these to the following on OSX only:
1938 * Danger: 3GB
1939 * Warning: Min (2% of root volume, 10GB), with a floor of 10GB
1940 * Desired: Warning Threshold + 1.5GB
1941 */
1942 hfsmp->hfs_freespace_notify_dangerlimit =
1943 MIN(HFS_ROOTVERYLOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize,
1944 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTVERYLOWDISKTRIGGERFRACTION);
1945 hfsmp->hfs_freespace_notify_warninglimit =
1946 MIN(HFS_ROOTLOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize,
1947 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTLOWDISKTRIGGERFRACTION);
1948 hfsmp->hfs_freespace_notify_nearwarninglimit =
1949 MIN(HFS_ROOTNEARLOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize,
1950 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTNEARLOWDISKTRIGGERFRACTION);
1951 hfsmp->hfs_freespace_notify_desiredlevel =
1952 MIN(HFS_ROOTLOWDISKSHUTOFFLEVEL / HFSTOVCB(hfsmp)->blockSize,
1953 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTLOWDISKSHUTOFFFRACTION);
1954 };
1955
1956 /* Check if the file system exists on virtual device, like disk image */
1957 if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, context) == 0) {
1958 if (isvirtual) {
1959 hfsmp->hfs_flags |= HFS_VIRTUAL_DEVICE;
1960 }
1961 }
1962
1963 if (!isroot
1964 && !ISSET(hfsmp->hfs_flags, HFS_VIRTUAL_DEVICE)
1965 && hfs_is_ejectable(vfs_statfs(mp)->f_mntfromname)) {
1966 SET(hfsmp->hfs_flags, HFS_RUN_SYNCER);
1967 }
1968
1969 const char *dev_name = (hfsmp->hfs_devvp
1970 ? vnode_getname_printable(hfsmp->hfs_devvp) : NULL);
1971
1972 printf("hfs: mounted %s on device %s\n",
1973 (hfsmp->vcbVN[0] ? (const char*) hfsmp->vcbVN : "unknown"),
1974 dev_name ?: "unknown device");
1975
1976 if (dev_name)
1977 vnode_putname_printable(dev_name);
1978
1979 /*
1980 * Start looking for free space to drop below this level and generate a
1981 * warning immediately if needed:
1982 */
1983 hfsmp->hfs_notification_conditions = 0;
1984 hfs_generate_volume_notifications(hfsmp);
1985
1986 if (ronly == 0) {
1987 (void) hfs_flushvolumeheader(hfsmp, HFS_FVH_WAIT);
1988 }
1989 hfs_free(mdbp, kMDBSize);
1990 return (0);
1991
1992 error_exit:
1993 if (bp)
1994 buf_brelse(bp);
1995
1996 hfs_free(mdbp, kMDBSize);
1997
1998 hfs_close_jvp(hfsmp);
1999
2000 if (hfsmp) {
2001 if (hfsmp->hfs_devvp) {
2002 vnode_rele(hfsmp->hfs_devvp);
2003 }
2004 hfs_locks_destroy(hfsmp);
2005 hfs_delete_chash(hfsmp);
2006 hfs_idhash_destroy (hfsmp);
2007
2008 hfs_free(hfsmp, sizeof(*hfsmp));
2009 if (mp)
2010 vfs_setfsprivate(mp, NULL);
2011 }
2012 return (retval);
2013 }
2014
2015
2016 /*
2017 * Make a filesystem operational.
2018 * Nothing to do at the moment.
2019 */
2020 /* ARGSUSED */
2021 static int
2022 hfs_start(__unused struct mount *mp, __unused int flags, __unused vfs_context_t context)
2023 {
2024 return (0);
2025 }
2026
2027
2028 /*
2029 * unmount system call
2030 */
2031 int
2032 hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context)
2033 {
2034 struct proc *p = vfs_context_proc(context);
2035 struct hfsmount *hfsmp = VFSTOHFS(mp);
2036 int retval = E_NONE;
2037 int flags;
2038 int force;
2039 int started_tr = 0;
2040
2041 flags = 0;
2042 force = 0;
2043 if (mntflags & MNT_FORCE) {
2044 flags |= FORCECLOSE;
2045 force = 1;
2046 }
2047
2048 const char *dev_name = (hfsmp->hfs_devvp
2049 ? vnode_getname_printable(hfsmp->hfs_devvp) : NULL);
2050
2051 printf("hfs: unmount initiated on %s on device %s\n",
2052 (hfsmp->vcbVN[0] ? (const char*) hfsmp->vcbVN : "unknown"),
2053 dev_name ?: "unknown device");
2054
2055 if (dev_name)
2056 vnode_putname_printable(dev_name);
2057
2058 if ((retval = hfs_flushfiles(mp, flags, p)) && !force)
2059 return (retval);
2060
2061 if (hfsmp->hfs_flags & HFS_METADATA_ZONE)
2062 (void) hfs_recording_suspend(hfsmp);
2063
2064 hfs_syncer_free(hfsmp);
2065
2066 if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) {
2067 if (hfsmp->hfs_summary_table) {
2068 int err = 0;
2069 /*
2070 * Take the bitmap lock to serialize against a concurrent bitmap scan still in progress
2071 */
2072 if (hfsmp->hfs_allocation_vp) {
2073 err = hfs_lock (VTOC(hfsmp->hfs_allocation_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2074 }
2075 hfs_free(hfsmp->hfs_summary_table, hfsmp->hfs_summary_bytes);
2076 hfsmp->hfs_summary_table = NULL;
2077 hfsmp->hfs_flags &= ~HFS_SUMMARY_TABLE;
2078
2079 if (err == 0 && hfsmp->hfs_allocation_vp){
2080 hfs_unlock (VTOC(hfsmp->hfs_allocation_vp));
2081 }
2082
2083 }
2084 }
2085
2086 /*
2087 * Flush out the b-trees, volume bitmap and Volume Header
2088 */
2089 if ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0) {
2090 retval = hfs_start_transaction(hfsmp);
2091 if (retval == 0) {
2092 started_tr = 1;
2093 } else if (!force) {
2094 goto err_exit;
2095 }
2096
2097 if (hfsmp->hfs_startup_vp) {
2098 (void) hfs_lock(VTOC(hfsmp->hfs_startup_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2099 retval = hfs_fsync(hfsmp->hfs_startup_vp, MNT_WAIT, 0, p);
2100 hfs_unlock(VTOC(hfsmp->hfs_startup_vp));
2101 if (retval && !force)
2102 goto err_exit;
2103 }
2104
2105 if (hfsmp->hfs_attribute_vp) {
2106 (void) hfs_lock(VTOC(hfsmp->hfs_attribute_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2107 retval = hfs_fsync(hfsmp->hfs_attribute_vp, MNT_WAIT, 0, p);
2108 hfs_unlock(VTOC(hfsmp->hfs_attribute_vp));
2109 if (retval && !force)
2110 goto err_exit;
2111 }
2112
2113 (void) hfs_lock(VTOC(hfsmp->hfs_catalog_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2114 retval = hfs_fsync(hfsmp->hfs_catalog_vp, MNT_WAIT, 0, p);
2115 hfs_unlock(VTOC(hfsmp->hfs_catalog_vp));
2116 if (retval && !force)
2117 goto err_exit;
2118
2119 (void) hfs_lock(VTOC(hfsmp->hfs_extents_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2120 retval = hfs_fsync(hfsmp->hfs_extents_vp, MNT_WAIT, 0, p);
2121 hfs_unlock(VTOC(hfsmp->hfs_extents_vp));
2122 if (retval && !force)
2123 goto err_exit;
2124
2125 if (hfsmp->hfs_allocation_vp) {
2126 (void) hfs_lock(VTOC(hfsmp->hfs_allocation_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2127 retval = hfs_fsync(hfsmp->hfs_allocation_vp, MNT_WAIT, 0, p);
2128 hfs_unlock(VTOC(hfsmp->hfs_allocation_vp));
2129 if (retval && !force)
2130 goto err_exit;
2131 }
2132
2133 if (hfsmp->hfc_filevp && vnode_issystem(hfsmp->hfc_filevp)) {
2134 retval = hfs_fsync(hfsmp->hfc_filevp, MNT_WAIT, 0, p);
2135 if (retval && !force)
2136 goto err_exit;
2137 }
2138
2139 /* If runtime corruption was detected, indicate that the volume
2140 * was not unmounted cleanly.
2141 */
2142 if (hfsmp->vcbAtrb & kHFSVolumeInconsistentMask) {
2143 HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeUnmountedMask;
2144 } else {
2145 HFSTOVCB(hfsmp)->vcbAtrb |= kHFSVolumeUnmountedMask;
2146 }
2147
2148 if (hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) {
2149 int i;
2150 u_int32_t min_start = hfsmp->totalBlocks;
2151
2152 // set the nextAllocation pointer to the smallest free block number
2153 // we've seen so on the next mount we won't rescan unnecessarily
2154 lck_spin_lock(&hfsmp->vcbFreeExtLock);
2155 for(i=0; i < (int)hfsmp->vcbFreeExtCnt; i++) {
2156 if (hfsmp->vcbFreeExt[i].startBlock < min_start) {
2157 min_start = hfsmp->vcbFreeExt[i].startBlock;
2158 }
2159 }
2160 lck_spin_unlock(&hfsmp->vcbFreeExtLock);
2161 if (min_start < hfsmp->nextAllocation) {
2162 hfsmp->nextAllocation = min_start;
2163 }
2164 }
2165
2166 retval = hfs_flushvolumeheader(hfsmp, HFS_FVH_WAIT);
2167 if (retval) {
2168 HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeUnmountedMask;
2169 if (!force)
2170 goto err_exit; /* could not flush everything */
2171 }
2172
2173 if (started_tr) {
2174 hfs_end_transaction(hfsmp);
2175 started_tr = 0;
2176 }
2177 }
2178
2179 if (hfsmp->jnl) {
2180 hfs_flush(hfsmp, HFS_FLUSH_FULL);
2181 }
2182
2183 /*
2184 * Invalidate our caches and release metadata vnodes
2185 */
2186 (void) hfsUnmount(hfsmp, p);
2187
2188 #if CONFIG_HFS_STD
2189 if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord) {
2190 (void) hfs_relconverter(hfsmp->hfs_encoding);
2191 }
2192 #endif
2193
2194 // XXXdbg
2195 if (hfsmp->jnl) {
2196 journal_close(hfsmp->jnl);
2197 hfsmp->jnl = NULL;
2198 }
2199
2200 VNOP_FSYNC(hfsmp->hfs_devvp, MNT_WAIT, context);
2201
2202 hfs_close_jvp(hfsmp);
2203
2204 /*
2205 * Last chance to dump unreferenced system files.
2206 */
2207 (void) vflush(mp, NULLVP, FORCECLOSE);
2208
2209 #if HFS_SPARSE_DEV
2210 /* Drop our reference on the backing fs (if any). */
2211 if ((hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) && hfsmp->hfs_backingvp) {
2212 struct vnode * tmpvp;
2213
2214 hfsmp->hfs_flags &= ~HFS_HAS_SPARSE_DEVICE;
2215 tmpvp = hfsmp->hfs_backingvp;
2216 hfsmp->hfs_backingvp = NULLVP;
2217 vnode_rele(tmpvp);
2218 }
2219 #endif /* HFS_SPARSE_DEV */
2220
2221 vnode_rele(hfsmp->hfs_devvp);
2222
2223 hfs_locks_destroy(hfsmp);
2224 hfs_delete_chash(hfsmp);
2225 hfs_idhash_destroy(hfsmp);
2226
2227 hfs_assert(TAILQ_EMPTY(&hfsmp->hfs_reserved_ranges[HFS_TENTATIVE_BLOCKS])
2228 && TAILQ_EMPTY(&hfsmp->hfs_reserved_ranges[HFS_LOCKED_BLOCKS]));
2229 hfs_assert(!hfsmp->lockedBlocks);
2230
2231 hfs_free(hfsmp, sizeof(*hfsmp));
2232
2233 // decrement kext retain count
2234 #if TARGET_OS_OSX
2235 OSDecrementAtomic(&hfs_active_mounts);
2236 OSKextReleaseKextWithLoadTag(OSKextGetCurrentLoadTag());
2237 #endif
2238
2239 #if HFS_LEAK_DEBUG && TARGET_OS_OSX
2240 if (hfs_active_mounts == 0) {
2241 if (hfs_dump_allocations())
2242 Debugger(NULL);
2243 else {
2244 printf("hfs: last unmount and nothing was leaked!\n");
2245 msleep(hfs_unmount, NULL, PINOD, "hfs_unmount",
2246 &(struct timespec){ 5, 0 });
2247 }
2248 }
2249 #endif
2250
2251 return (0);
2252
2253 err_exit:
2254 if (started_tr) {
2255 hfs_end_transaction(hfsmp);
2256 }
2257 return retval;
2258 }
2259
2260
2261 /*
2262 * Return the root of a filesystem.
2263 */
2264 int hfs_vfs_root(struct mount *mp, struct vnode **vpp, __unused vfs_context_t context)
2265 {
2266 return hfs_vget(VFSTOHFS(mp), (cnid_t)kHFSRootFolderID, vpp, 1, 0);
2267 }
2268
2269
2270 /*
2271 * Do operations associated with quotas
2272 */
2273 #if !QUOTA
2274 static int
2275 hfs_quotactl(__unused struct mount *mp, __unused int cmds, __unused uid_t uid, __unused caddr_t datap, __unused vfs_context_t context)
2276 {
2277 return (ENOTSUP);
2278 }
2279 #else
2280 static int
2281 hfs_quotactl(struct mount *mp, int cmds, uid_t uid, caddr_t datap, vfs_context_t context)
2282 {
2283 struct proc *p = vfs_context_proc(context);
2284 int cmd, type, error;
2285
2286 if (uid == ~0U)
2287 uid = kauth_cred_getuid(vfs_context_ucred(context));
2288 cmd = cmds >> SUBCMDSHIFT;
2289
2290 switch (cmd) {
2291 case Q_SYNC:
2292 case Q_QUOTASTAT:
2293 break;
2294 case Q_GETQUOTA:
2295 if (uid == kauth_cred_getuid(vfs_context_ucred(context)))
2296 break;
2297 /* fall through */
2298 default:
2299 if ( (error = vfs_context_suser(context)) )
2300 return (error);
2301 }
2302
2303 type = cmds & SUBCMDMASK;
2304 if ((u_int)type >= MAXQUOTAS)
2305 return (EINVAL);
2306 if ((error = vfs_busy(mp, LK_NOWAIT)) != 0)
2307 return (error);
2308
2309 switch (cmd) {
2310
2311 case Q_QUOTAON:
2312 error = hfs_quotaon(p, mp, type, datap);
2313 break;
2314
2315 case Q_QUOTAOFF:
2316 error = hfs_quotaoff(p, mp, type);
2317 break;
2318
2319 case Q_SETQUOTA:
2320 error = hfs_setquota(mp, uid, type, datap);
2321 break;
2322
2323 case Q_SETUSE:
2324 error = hfs_setuse(mp, uid, type, datap);
2325 break;
2326
2327 case Q_GETQUOTA:
2328 error = hfs_getquota(mp, uid, type, datap);
2329 break;
2330
2331 case Q_SYNC:
2332 error = hfs_qsync(mp);
2333 break;
2334
2335 case Q_QUOTASTAT:
2336 error = hfs_quotastat(mp, type, datap);
2337 break;
2338
2339 default:
2340 error = EINVAL;
2341 break;
2342 }
2343 vfs_unbusy(mp);
2344
2345 return (error);
2346 }
2347 #endif /* QUOTA */
2348
2349 /* Subtype is composite of bits */
2350 #define HFS_SUBTYPE_JOURNALED 0x01
2351 #define HFS_SUBTYPE_CASESENSITIVE 0x02
2352 /* bits 2 - 6 reserved */
2353 #define HFS_SUBTYPE_STANDARDHFS 0x80
2354
2355 /*
2356 * Get file system statistics.
2357 */
2358 int
2359 hfs_statfs(struct mount *mp, register struct vfsstatfs *sbp, __unused vfs_context_t context)
2360 {
2361 ExtendedVCB *vcb = VFSTOVCB(mp);
2362 struct hfsmount *hfsmp = VFSTOHFS(mp);
2363 u_int16_t subtype = 0;
2364
2365 sbp->f_bsize = (u_int32_t)vcb->blockSize;
2366 sbp->f_iosize = (size_t)cluster_max_io_size(mp, 0);
2367 sbp->f_blocks = (u_int64_t)((u_int32_t)vcb->totalBlocks);
2368 sbp->f_bfree = (u_int64_t)((u_int32_t )hfs_freeblks(hfsmp, 0));
2369 sbp->f_bavail = (u_int64_t)((u_int32_t )hfs_freeblks(hfsmp, 1));
2370 sbp->f_files = (u_int64_t)HFS_MAX_FILES;
2371 sbp->f_ffree = (u_int64_t)hfs_free_cnids(hfsmp);
2372
2373 /*
2374 * Subtypes (flavors) for HFS
2375 * 0: Mac OS Extended
2376 * 1: Mac OS Extended (Journaled)
2377 * 2: Mac OS Extended (Case Sensitive)
2378 * 3: Mac OS Extended (Case Sensitive, Journaled)
2379 * 4 - 127: Reserved
2380 * 128: Mac OS Standard
2381 *
2382 */
2383 if ((hfsmp->hfs_flags & HFS_STANDARD) == 0) {
2384 /* HFS+ & variants */
2385 if (hfsmp->jnl) {
2386 subtype |= HFS_SUBTYPE_JOURNALED;
2387 }
2388 if (hfsmp->hfs_flags & HFS_CASE_SENSITIVE) {
2389 subtype |= HFS_SUBTYPE_CASESENSITIVE;
2390 }
2391 }
2392 #if CONFIG_HFS_STD
2393 else {
2394 /* HFS standard */
2395 subtype = HFS_SUBTYPE_STANDARDHFS;
2396 }
2397 #endif
2398 sbp->f_fssubtype = subtype;
2399
2400 return (0);
2401 }
2402
2403
2404 //
2405 // XXXdbg -- this is a callback to be used by the journal to
2406 // get meta data blocks flushed out to disk.
2407 //
2408 // XXXdbg -- be smarter and don't flush *every* block on each
2409 // call. try to only flush some so we don't wind up
2410 // being too synchronous.
2411 //
2412 void
2413 hfs_sync_metadata(void *arg)
2414 {
2415 struct mount *mp = (struct mount *)arg;
2416 struct hfsmount *hfsmp;
2417 ExtendedVCB *vcb;
2418 buf_t bp;
2419 int retval;
2420 daddr64_t priIDSector;
2421 hfsmp = VFSTOHFS(mp);
2422 vcb = HFSTOVCB(hfsmp);
2423
2424 // now make sure the super block is flushed
2425 priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
2426 HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size));
2427
2428 retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
2429 HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys),
2430 hfsmp->hfs_physical_block_size, NOCRED, &bp);
2431 if ((retval != 0 ) && (retval != ENXIO)) {
2432 printf("hfs_sync_metadata: can't read volume header at %d! (retval 0x%x)\n",
2433 (int)priIDSector, retval);
2434 }
2435
2436 if (retval == 0 && ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI)) {
2437 buf_bwrite(bp);
2438 } else if (bp) {
2439 buf_brelse(bp);
2440 }
2441
2442 /* Note that these I/Os bypass the journal (no calls to journal_start_modify_block) */
2443
2444 // the alternate super block...
2445 // XXXdbg - we probably don't need to do this each and every time.
2446 // hfs_btreeio.c:FlushAlternate() should flag when it was
2447 // written...
2448 if (hfsmp->hfs_partition_avh_sector) {
2449 retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
2450 HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_partition_avh_sector, hfsmp->hfs_log_per_phys),
2451 hfsmp->hfs_physical_block_size, NOCRED, &bp);
2452 if (retval == 0 && ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI)) {
2453 /*
2454 * note this I/O can fail if the partition shrank behind our backs!
2455 * So failure should be OK here.
2456 */
2457 buf_bwrite(bp);
2458 } else if (bp) {
2459 buf_brelse(bp);
2460 }
2461 }
2462
2463 /* Is the FS's idea of the AVH different than the partition ? */
2464 if ((hfsmp->hfs_fs_avh_sector) && (hfsmp->hfs_partition_avh_sector != hfsmp->hfs_fs_avh_sector)) {
2465 retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
2466 HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_fs_avh_sector, hfsmp->hfs_log_per_phys),
2467 hfsmp->hfs_physical_block_size, NOCRED, &bp);
2468 if (retval == 0 && ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI)) {
2469 buf_bwrite(bp);
2470 } else if (bp) {
2471 buf_brelse(bp);
2472 }
2473 }
2474
2475 }
2476
2477
2478 struct hfs_sync_cargs {
2479 kauth_cred_t cred;
2480 struct proc *p;
2481 int waitfor;
2482 int error;
2483 int atime_only_syncs;
2484 time_t sync_start_time;
2485 };
2486
2487
2488 static int
2489 hfs_sync_callback(struct vnode *vp, void *cargs)
2490 {
2491 struct cnode *cp = VTOC(vp);
2492 struct hfs_sync_cargs *args;
2493 int error;
2494
2495 args = (struct hfs_sync_cargs *)cargs;
2496
2497 if (hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0) {
2498 return (VNODE_RETURNED);
2499 }
2500
2501 hfs_dirty_t dirty_state = hfs_is_dirty(cp);
2502
2503 bool sync = dirty_state == HFS_DIRTY || vnode_hasdirtyblks(vp);
2504
2505 if (!sync && dirty_state == HFS_DIRTY_ATIME
2506 && args->atime_only_syncs < 256) {
2507 // We only update if the atime changed more than 60s ago
2508 if (args->sync_start_time - cp->c_attr.ca_atime > 60) {
2509 sync = true;
2510 ++args->atime_only_syncs;
2511 }
2512 }
2513
2514 if (sync) {
2515 error = hfs_fsync(vp, args->waitfor, 0, args->p);
2516
2517 if (error)
2518 args->error = error;
2519 } else if (cp->c_touch_acctime)
2520 hfs_touchtimes(VTOHFS(vp), cp);
2521
2522 hfs_unlock(cp);
2523 return (VNODE_RETURNED);
2524 }
2525
2526
2527
2528 /*
2529 * Go through the disk queues to initiate sandbagged IO;
2530 * go through the inodes to write those that have been modified;
2531 * initiate the writing of the super block if it has been modified.
2532 *
2533 * Note: we are always called with the filesystem marked `MPBUSY'.
2534 */
2535 int
2536 hfs_sync(struct mount *mp, int waitfor, vfs_context_t context)
2537 {
2538 struct proc *p = vfs_context_proc(context);
2539 struct cnode *cp;
2540 struct hfsmount *hfsmp;
2541 ExtendedVCB *vcb;
2542 struct vnode *meta_vp[4];
2543 int i;
2544 int error, allerror = 0;
2545 struct hfs_sync_cargs args;
2546
2547 hfsmp = VFSTOHFS(mp);
2548
2549 // Back off if hfs_changefs or a freeze is underway
2550 hfs_lock_mount(hfsmp);
2551 if ((hfsmp->hfs_flags & HFS_IN_CHANGEFS)
2552 || hfsmp->hfs_freeze_state != HFS_THAWED) {
2553 hfs_unlock_mount(hfsmp);
2554 return 0;
2555 }
2556
2557 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
2558 hfs_unlock_mount(hfsmp);
2559 return (EROFS);
2560 }
2561
2562 ++hfsmp->hfs_syncers;
2563 hfs_unlock_mount(hfsmp);
2564
2565 args.cred = kauth_cred_get();
2566 args.waitfor = waitfor;
2567 args.p = p;
2568 args.error = 0;
2569 args.atime_only_syncs = 0;
2570
2571 struct timeval tv;
2572 microtime(&tv);
2573
2574 args.sync_start_time = tv.tv_sec;
2575
2576 /*
2577 * hfs_sync_callback will be called for each vnode
2578 * hung off of this mount point... the vnode will be
2579 * properly referenced and unreferenced around the callback
2580 */
2581 vnode_iterate(mp, 0, hfs_sync_callback, (void *)&args);
2582
2583 if (args.error)
2584 allerror = args.error;
2585
2586 vcb = HFSTOVCB(hfsmp);
2587
2588 meta_vp[0] = vcb->extentsRefNum;
2589 meta_vp[1] = vcb->catalogRefNum;
2590 meta_vp[2] = vcb->allocationsRefNum; /* This is NULL for standard HFS */
2591 meta_vp[3] = hfsmp->hfs_attribute_vp; /* Optional file */
2592
2593 /* Now sync our three metadata files */
2594 for (i = 0; i < 4; ++i) {
2595 struct vnode *btvp;
2596
2597 btvp = meta_vp[i];;
2598 if ((btvp==0) || (vnode_mount(btvp) != mp))
2599 continue;
2600
2601 /* XXX use hfs_systemfile_lock instead ? */
2602 (void) hfs_lock(VTOC(btvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2603 cp = VTOC(btvp);
2604
2605 if (!hfs_is_dirty(cp) && !vnode_hasdirtyblks(btvp)) {
2606 hfs_unlock(VTOC(btvp));
2607 continue;
2608 }
2609 error = vnode_get(btvp);
2610 if (error) {
2611 hfs_unlock(VTOC(btvp));
2612 continue;
2613 }
2614 if ((error = hfs_fsync(btvp, waitfor, 0, p)))
2615 allerror = error;
2616
2617 hfs_unlock(cp);
2618 vnode_put(btvp);
2619 };
2620
2621
2622 #if CONFIG_HFS_STD
2623 /*
2624 * Force stale file system control information to be flushed.
2625 */
2626 if (vcb->vcbSigWord == kHFSSigWord) {
2627 if ((error = VNOP_FSYNC(hfsmp->hfs_devvp, waitfor, context))) {
2628 allerror = error;
2629 }
2630 }
2631 #endif
2632
2633 #if QUOTA
2634 hfs_qsync(mp);
2635 #endif /* QUOTA */
2636
2637 hfs_hotfilesync(hfsmp, vfs_context_kernel());
2638
2639 /*
2640 * Write back modified superblock.
2641 */
2642 if (IsVCBDirty(vcb)) {
2643 error = hfs_flushvolumeheader(hfsmp, waitfor == MNT_WAIT ? HFS_FVH_WAIT : 0);
2644 if (error)
2645 allerror = error;
2646 }
2647
2648 if (hfsmp->jnl) {
2649 hfs_flush(hfsmp, HFS_FLUSH_JOURNAL);
2650 }
2651
2652 hfs_lock_mount(hfsmp);
2653 boolean_t wake = (!--hfsmp->hfs_syncers
2654 && hfsmp->hfs_freeze_state == HFS_WANT_TO_FREEZE);
2655 hfs_unlock_mount(hfsmp);
2656 if (wake)
2657 wakeup(&hfsmp->hfs_freeze_state);
2658
2659 return (allerror);
2660 }
2661
2662
2663 /*
2664 * File handle to vnode
2665 *
2666 * Have to be really careful about stale file handles:
2667 * - check that the cnode id is valid
2668 * - call hfs_vget() to get the locked cnode
2669 * - check for an unallocated cnode (i_mode == 0)
2670 * - check that the given client host has export rights and return
2671 * those rights via. exflagsp and credanonp
2672 */
2673 static int
2674 hfs_fhtovp(struct mount *mp, int fhlen, unsigned char *fhp, struct vnode **vpp, __unused vfs_context_t context)
2675 {
2676 struct hfsfid *hfsfhp;
2677 struct vnode *nvp;
2678 int result;
2679
2680 *vpp = NULL;
2681 hfsfhp = (struct hfsfid *)fhp;
2682
2683 if (fhlen < (int)sizeof(struct hfsfid))
2684 return (EINVAL);
2685
2686 result = hfs_vget(VFSTOHFS(mp), ntohl(hfsfhp->hfsfid_cnid), &nvp, 0, 0);
2687 if (result) {
2688 if (result == ENOENT)
2689 result = ESTALE;
2690 return result;
2691 }
2692
2693 /*
2694 * We used to use the create time as the gen id of the file handle,
2695 * but it is not static enough because it can change at any point
2696 * via system calls. We still don't have another volume ID or other
2697 * unique identifier to use for a generation ID across reboots that
2698 * persists until the file is removed. Using only the CNID exposes
2699 * us to the potential wrap-around case, but as of 2/2008, it would take
2700 * over 2 months to wrap around if the machine did nothing but allocate
2701 * CNIDs. Using some kind of wrap counter would only be effective if
2702 * each file had the wrap counter associated with it. For now,
2703 * we use only the CNID to identify the file as it's good enough.
2704 */
2705
2706 *vpp = nvp;
2707
2708 hfs_unlock(VTOC(nvp));
2709 return (0);
2710 }
2711
2712
2713 /*
2714 * Vnode pointer to File handle
2715 */
2716 /* ARGSUSED */
2717 static int
2718 hfs_vptofh(struct vnode *vp, int *fhlenp, unsigned char *fhp, __unused vfs_context_t context)
2719 {
2720 struct cnode *cp;
2721 struct hfsfid *hfsfhp;
2722
2723 if (ISHFS(VTOVCB(vp)))
2724 return (ENOTSUP); /* hfs standard is not exportable */
2725
2726 if (*fhlenp < (int)sizeof(struct hfsfid))
2727 return (EOVERFLOW);
2728
2729 cp = VTOC(vp);
2730 hfsfhp = (struct hfsfid *)fhp;
2731 /* only the CNID is used to identify the file now */
2732 hfsfhp->hfsfid_cnid = htonl(cp->c_fileid);
2733 hfsfhp->hfsfid_gen = htonl(cp->c_fileid);
2734 *fhlenp = sizeof(struct hfsfid);
2735
2736 return (0);
2737 }
2738
2739
2740 /*
2741 * Initialize HFS filesystems, done only once per boot.
2742 *
2743 * HFS is not a kext-based file system. This makes it difficult to find
2744 * out when the last HFS file system was unmounted and call hfs_uninit()
2745 * to deallocate data structures allocated in hfs_init(). Therefore we
2746 * never deallocate memory allocated by lock attribute and group initializations
2747 * in this function.
2748 */
2749 static int
2750 hfs_init(__unused struct vfsconf *vfsp)
2751 {
2752 static int done = 0;
2753
2754 if (done)
2755 return (0);
2756 done = 1;
2757 hfs_chashinit();
2758
2759 BTReserveSetup();
2760
2761 hfs_lock_attr = lck_attr_alloc_init();
2762 hfs_group_attr = lck_grp_attr_alloc_init();
2763 hfs_mutex_group = lck_grp_alloc_init("hfs-mutex", hfs_group_attr);
2764 hfs_rwlock_group = lck_grp_alloc_init("hfs-rwlock", hfs_group_attr);
2765 hfs_spinlock_group = lck_grp_alloc_init("hfs-spinlock", hfs_group_attr);
2766
2767 #if HFS_COMPRESSION
2768 decmpfs_init();
2769 #endif
2770
2771 journal_init();
2772
2773 return (0);
2774 }
2775
2776
2777 /*
2778 * Destroy all locks, mutexes and spinlocks in hfsmp on unmount or failed mount
2779 */
2780 static void
2781 hfs_locks_destroy(struct hfsmount *hfsmp)
2782 {
2783
2784 lck_mtx_destroy(&hfsmp->hfs_mutex, hfs_mutex_group);
2785 lck_mtx_destroy(&hfsmp->hfc_mutex, hfs_mutex_group);
2786 lck_rw_destroy(&hfsmp->hfs_global_lock, hfs_rwlock_group);
2787 lck_spin_destroy(&hfsmp->vcbFreeExtLock, hfs_spinlock_group);
2788 #if NEW_XATTR
2789 lck_spin_destroy(&hfsmp->hfs_xattr_io.lock, hfs_spinlock_group);
2790 #endif
2791
2792 return;
2793 }
2794
2795
2796 static int
2797 hfs_getmountpoint(struct vnode *vp, struct hfsmount **hfsmpp)
2798 {
2799 struct hfsmount * hfsmp;
2800 char fstypename[MFSNAMELEN];
2801
2802 if (vp == NULL)
2803 return (EINVAL);
2804
2805 if (!vnode_isvroot(vp))
2806 return (EINVAL);
2807
2808 vnode_vfsname(vp, fstypename);
2809 if (strncmp(fstypename, "hfs", sizeof(fstypename)) != 0)
2810 return (EINVAL);
2811
2812 hfsmp = VTOHFS(vp);
2813
2814 if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord)
2815 return (EINVAL);
2816
2817 *hfsmpp = hfsmp;
2818
2819 return (0);
2820 }
2821
2822 // Replace user-space value
2823 static errno_t ureplace(user_addr_t oldp, size_t *oldlenp,
2824 user_addr_t newp, size_t newlen,
2825 void *data, size_t len)
2826 {
2827 errno_t error;
2828 if (!oldlenp)
2829 return EFAULT;
2830 if (oldp && *oldlenp < len)
2831 return ENOMEM;
2832 if (newp && newlen != len)
2833 return EINVAL;
2834 *oldlenp = len;
2835 if (oldp) {
2836 error = copyout(data, oldp, len);
2837 if (error)
2838 return error;
2839 }
2840 return newp ? copyin(newp, data, len) : 0;
2841 }
2842
2843 #define UREPLACE(oldp, oldlenp, newp, newlenp, v) \
2844 ureplace(oldp, oldlenp, newp, newlenp, &v, sizeof(v))
2845
2846 static hfsmount_t *hfs_mount_from_cwd(vfs_context_t ctx)
2847 {
2848 vnode_t vp = vfs_context_cwd(ctx);
2849
2850 if (!vp)
2851 return NULL;
2852
2853 /*
2854 * We could use vnode_tag, but it is probably more future proof to
2855 * compare fstypename.
2856 */
2857 char fstypename[MFSNAMELEN];
2858 vnode_vfsname(vp, fstypename);
2859
2860 if (strcmp(fstypename, "hfs"))
2861 return NULL;
2862
2863 return VTOHFS(vp);
2864 }
2865
2866 /*
2867 * HFS filesystem related variables.
2868 */
2869 int
2870 hfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
2871 user_addr_t newp, size_t newlen, vfs_context_t context)
2872 {
2873 int error;
2874 struct hfsmount *hfsmp;
2875 struct proc *p = NULL;
2876
2877 /* all sysctl names at this level are terminal */
2878 #if TARGET_OS_OSX
2879 p = vfs_context_proc(context);
2880 if (name[0] == HFS_ENCODINGBIAS) {
2881 int bias;
2882
2883 bias = hfs_getencodingbias();
2884
2885 error = UREPLACE(oldp, oldlenp, newp, newlen, bias);
2886 if (error || !newp)
2887 return error;
2888
2889 hfs_setencodingbias(bias);
2890
2891 return 0;
2892 } else
2893 #endif //OSX
2894 if (name[0] == HFS_EXTEND_FS) {
2895 u_int64_t newsize = 0;
2896 vnode_t vp = vfs_context_cwd(context);
2897
2898 if (newp == USER_ADDR_NULL || vp == NULLVP
2899 || newlen != sizeof(quad_t) || !oldlenp)
2900 return EINVAL;
2901 if ((error = hfs_getmountpoint(vp, &hfsmp)))
2902 return (error);
2903
2904 /* Start with the 'size' set to the current number of bytes in the filesystem */
2905 newsize = ((uint64_t)hfsmp->totalBlocks) * ((uint64_t)hfsmp->blockSize);
2906
2907 error = UREPLACE(oldp, oldlenp, newp, newlen, newsize);
2908 if (error)
2909 return error;
2910
2911 return hfs_extendfs(hfsmp, newsize, context);
2912 } else if (name[0] == HFS_ENABLE_JOURNALING) {
2913 // make the file system journaled...
2914 vnode_t jvp;
2915 ExtendedVCB *vcb;
2916 struct cat_attr jnl_attr;
2917 struct cat_attr jinfo_attr;
2918 struct cat_fork jnl_fork;
2919 struct cat_fork jinfo_fork;
2920 buf_t jib_buf;
2921 uint64_t jib_blkno;
2922 uint32_t tmpblkno;
2923 uint64_t journal_byte_offset;
2924 uint64_t journal_size;
2925 vnode_t jib_vp = NULLVP;
2926 struct JournalInfoBlock local_jib;
2927 int err = 0;
2928 void *jnl = NULL;
2929 int lockflags;
2930
2931 /* Only root can enable journaling */
2932 if (!kauth_cred_issuser(kauth_cred_get())) {
2933 return (EPERM);
2934 }
2935 if (namelen != 4)
2936 return EINVAL;
2937 hfsmp = hfs_mount_from_cwd(context);
2938 if (!hfsmp)
2939 return EINVAL;
2940
2941 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
2942 return EROFS;
2943 }
2944 if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord) {
2945 printf("hfs: can't make a plain hfs volume journaled.\n");
2946 return EINVAL;
2947 }
2948
2949 if (hfsmp->jnl) {
2950 printf("hfs: volume %s is already journaled!\n", hfsmp->vcbVN);
2951 return EAGAIN;
2952 }
2953 vcb = HFSTOVCB(hfsmp);
2954
2955 /* Set up local copies of the initialization info */
2956 tmpblkno = (uint32_t) name[1];
2957 jib_blkno = (uint64_t) tmpblkno;
2958 journal_byte_offset = (uint64_t) name[2];
2959 journal_byte_offset *= hfsmp->blockSize;
2960 journal_byte_offset += hfsmp->hfsPlusIOPosOffset;
2961 journal_size = (uint64_t)((unsigned)name[3]);
2962
2963 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS, HFS_EXCLUSIVE_LOCK);
2964 if (BTHasContiguousNodes(VTOF(vcb->catalogRefNum)) == 0 ||
2965 BTHasContiguousNodes(VTOF(vcb->extentsRefNum)) == 0) {
2966
2967 printf("hfs: volume has a btree w/non-contiguous nodes. can not enable journaling.\n");
2968 hfs_systemfile_unlock(hfsmp, lockflags);
2969 return EINVAL;
2970 }
2971 hfs_systemfile_unlock(hfsmp, lockflags);
2972
2973 // make sure these both exist!
2974 if ( GetFileInfo(vcb, kHFSRootFolderID, ".journal_info_block", &jinfo_attr, &jinfo_fork) == 0
2975 || GetFileInfo(vcb, kHFSRootFolderID, ".journal", &jnl_attr, &jnl_fork) == 0) {
2976
2977 return EINVAL;
2978 }
2979
2980 /*
2981 * At this point, we have a copy of the metadata that lives in the catalog for the
2982 * journal info block. Compare that the journal info block's single extent matches
2983 * that which was passed into this sysctl.
2984 *
2985 * If it is different, deny the journal enable call.
2986 */
2987 if (jinfo_fork.cf_blocks > 1) {
2988 /* too many blocks */
2989 return EINVAL;
2990 }
2991
2992 if (jinfo_fork.cf_extents[0].startBlock != jib_blkno) {
2993 /* Wrong block */
2994 return EINVAL;
2995 }
2996
2997 /*
2998 * We want to immediately purge the vnode for the JIB.
2999 *
3000 * Because it was written to from userland, there's probably
3001 * a vnode somewhere in the vnode cache (possibly with UBC backed blocks).
3002 * So we bring the vnode into core, then immediately do whatever
3003 * we can to flush/vclean it out. This is because those blocks will be
3004 * interpreted as user data, which may be treated separately on some platforms
3005 * than metadata. If the vnode is gone, then there cannot be backing blocks
3006 * in the UBC.
3007 */
3008 if (hfs_vget (hfsmp, jinfo_attr.ca_fileid, &jib_vp, 1, 0)) {
3009 return EINVAL;
3010 }
3011 /*
3012 * Now we have a vnode for the JIB. recycle it. Because we hold an iocount
3013 * on the vnode, we'll just mark it for termination when the last iocount
3014 * (hopefully ours), is dropped.
3015 */
3016 vnode_recycle (jib_vp);
3017 err = vnode_put (jib_vp);
3018 if (err) {
3019 return EINVAL;
3020 }
3021
3022 /* Initialize the local copy of the JIB (just like hfs.util) */
3023 memset (&local_jib, 'Z', sizeof(struct JournalInfoBlock));
3024 local_jib.flags = SWAP_BE32(kJIJournalInFSMask);
3025 /* Note that the JIB's offset is in bytes */
3026 local_jib.offset = SWAP_BE64(journal_byte_offset);
3027 local_jib.size = SWAP_BE64(journal_size);
3028
3029 /*
3030 * Now write out the local JIB. This essentially overwrites the userland
3031 * copy of the JIB. Read it as BLK_META to treat it as a metadata read/write.
3032 */
3033 jib_buf = buf_getblk (hfsmp->hfs_devvp,
3034 jib_blkno * (hfsmp->blockSize / hfsmp->hfs_logical_block_size),
3035 hfsmp->blockSize, 0, 0, BLK_META);
3036 char* buf_ptr = (char*) buf_dataptr (jib_buf);
3037
3038 /* Zero out the portion of the block that won't contain JIB data */
3039 memset (buf_ptr, 0, hfsmp->blockSize);
3040
3041 bcopy(&local_jib, buf_ptr, sizeof(local_jib));
3042 if (buf_bwrite (jib_buf)) {
3043 return EIO;
3044 }
3045
3046 /* Force a flush track cache */
3047 hfs_flush(hfsmp, HFS_FLUSH_CACHE);
3048
3049 /* Now proceed with full volume sync */
3050 hfs_sync(hfsmp->hfs_mp, MNT_WAIT, context);
3051
3052 printf("hfs: Initializing the journal (joffset 0x%llx sz 0x%llx)...\n",
3053 (off_t)name[2], (off_t)name[3]);
3054
3055 //
3056 // XXXdbg - note that currently (Sept, 08) hfs_util does not support
3057 // enabling the journal on a separate device so it is safe
3058 // to just copy hfs_devvp here. If hfs_util gets the ability
3059 // to dynamically enable the journal on a separate device then
3060 // we will have to do the same thing as hfs_early_journal_init()
3061 // to locate and open the journal device.
3062 //
3063 jvp = hfsmp->hfs_devvp;
3064 jnl = journal_create(jvp, journal_byte_offset, journal_size,
3065 hfsmp->hfs_devvp,
3066 hfsmp->hfs_logical_block_size,
3067 0,
3068 0,
3069 hfs_sync_metadata, hfsmp->hfs_mp,
3070 hfsmp->hfs_mp);
3071
3072 /*
3073 * Set up the trim callback function so that we can add
3074 * recently freed extents to the free extent cache once
3075 * the transaction that freed them is written to the
3076 * journal on disk.
3077 */
3078 if (jnl)
3079 journal_trim_set_callback(jnl, hfs_trim_callback, hfsmp);
3080
3081 if (jnl == NULL) {
3082 printf("hfs: FAILED to create the journal!\n");
3083 return EIO;
3084 }
3085
3086 hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK);
3087
3088 /*
3089 * Flush all dirty metadata buffers.
3090 */
3091 buf_flushdirtyblks(hfsmp->hfs_devvp, TRUE, 0, "hfs_sysctl");
3092 buf_flushdirtyblks(hfsmp->hfs_extents_vp, TRUE, 0, "hfs_sysctl");
3093 buf_flushdirtyblks(hfsmp->hfs_catalog_vp, TRUE, 0, "hfs_sysctl");
3094 buf_flushdirtyblks(hfsmp->hfs_allocation_vp, TRUE, 0, "hfs_sysctl");
3095 if (hfsmp->hfs_attribute_vp)
3096 buf_flushdirtyblks(hfsmp->hfs_attribute_vp, TRUE, 0, "hfs_sysctl");
3097
3098 HFSTOVCB(hfsmp)->vcbJinfoBlock = name[1];
3099 HFSTOVCB(hfsmp)->vcbAtrb |= kHFSVolumeJournaledMask;
3100 hfsmp->jvp = jvp;
3101 hfsmp->jnl = jnl;
3102
3103 // save this off for the hack-y check in hfs_remove()
3104 hfsmp->jnl_start = (u_int32_t)name[2];
3105 hfsmp->jnl_size = (off_t)((unsigned)name[3]);
3106 hfsmp->hfs_jnlinfoblkid = jinfo_attr.ca_fileid;
3107 hfsmp->hfs_jnlfileid = jnl_attr.ca_fileid;
3108
3109 vfs_setflags(hfsmp->hfs_mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
3110
3111 hfs_unlock_global (hfsmp);
3112 hfs_flushvolumeheader(hfsmp, HFS_FVH_WAIT | HFS_FVH_WRITE_ALT);
3113
3114 {
3115 fsid_t fsid;
3116
3117 fsid.val[0] = (int32_t)hfsmp->hfs_raw_dev;
3118 fsid.val[1] = (int32_t)vfs_typenum(HFSTOVFS(hfsmp));
3119 vfs_event_signal(&fsid, VQ_UPDATE, (intptr_t)NULL);
3120 }
3121 return 0;
3122 } else if (name[0] == HFS_DISABLE_JOURNALING) {
3123 // clear the journaling bit
3124
3125 /* Only root can disable journaling */
3126 if (!kauth_cred_issuser(kauth_cred_get())) {
3127 return (EPERM);
3128 }
3129
3130 hfsmp = hfs_mount_from_cwd(context);
3131 if (!hfsmp)
3132 return EINVAL;
3133
3134 /*
3135 * Disabling journaling is disallowed on volumes with directory hard links
3136 * because we have not tested the relevant code path.
3137 */
3138 if (hfsmp->hfs_private_attr[DIR_HARDLINKS].ca_entries != 0){
3139 printf("hfs: cannot disable journaling on volumes with directory hardlinks\n");
3140 return EPERM;
3141 }
3142
3143 printf("hfs: disabling journaling for %s\n", hfsmp->vcbVN);
3144
3145 hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK);
3146
3147 // Lights out for you buddy!
3148 journal_close(hfsmp->jnl);
3149 hfsmp->jnl = NULL;
3150
3151 hfs_close_jvp(hfsmp);
3152 vfs_clearflags(hfsmp->hfs_mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
3153 hfsmp->jnl_start = 0;
3154 hfsmp->hfs_jnlinfoblkid = 0;
3155 hfsmp->hfs_jnlfileid = 0;
3156
3157 HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeJournaledMask;
3158
3159 hfs_unlock_global (hfsmp);
3160
3161 hfs_flushvolumeheader(hfsmp, HFS_FVH_WAIT | HFS_FVH_WRITE_ALT);
3162
3163 {
3164 fsid_t fsid;
3165
3166 fsid.val[0] = (int32_t)hfsmp->hfs_raw_dev;
3167 fsid.val[1] = (int32_t)vfs_typenum(HFSTOVFS(hfsmp));
3168 vfs_event_signal(&fsid, VQ_UPDATE, (intptr_t)NULL);
3169 }
3170 return 0;
3171 } else if (name[0] == VFS_CTL_QUERY) {
3172 #if TARGET_OS_IPHONE
3173 return EPERM;
3174 #else //!TARGET_OS_IPHONE
3175 struct sysctl_req *req;
3176 union union_vfsidctl vc;
3177 struct mount *mp;
3178 struct vfsquery vq;
3179
3180 req = CAST_DOWN(struct sysctl_req *, oldp); /* we're new style vfs sysctl. */
3181 if (req == NULL) {
3182 return EFAULT;
3183 }
3184
3185 error = SYSCTL_IN(req, &vc, proc_is64bit(p)? sizeof(vc.vc64):sizeof(vc.vc32));
3186 if (error) return (error);
3187
3188 mp = vfs_getvfs(&vc.vc32.vc_fsid); /* works for 32 and 64 */
3189 if (mp == NULL) return (ENOENT);
3190
3191 hfsmp = VFSTOHFS(mp);
3192 bzero(&vq, sizeof(vq));
3193 vq.vq_flags = hfsmp->hfs_notification_conditions;
3194 return SYSCTL_OUT(req, &vq, sizeof(vq));;
3195 #endif // TARGET_OS_IPHONE
3196 } else if (name[0] == HFS_REPLAY_JOURNAL) {
3197 vnode_t devvp = NULL;
3198 int device_fd;
3199 if (namelen != 2) {
3200 return (EINVAL);
3201 }
3202 device_fd = name[1];
3203 error = file_vnode(device_fd, &devvp);
3204 if (error) {
3205 return error;
3206 }
3207 error = vnode_getwithref(devvp);
3208 if (error) {
3209 file_drop(device_fd);
3210 return error;
3211 }
3212 error = hfs_journal_replay(devvp, context);
3213 file_drop(device_fd);
3214 vnode_put(devvp);
3215 return error;
3216 }
3217 #if DEBUG || TARGET_OS_OSX
3218 else if (name[0] == HFS_ENABLE_RESIZE_DEBUG) {
3219 if (!kauth_cred_issuser(kauth_cred_get())) {
3220 return (EPERM);
3221 }
3222
3223 int old = hfs_resize_debug;
3224
3225 int res = UREPLACE(oldp, oldlenp, newp, newlen, hfs_resize_debug);
3226
3227 if (old != hfs_resize_debug) {
3228 printf("hfs: %s resize debug\n",
3229 hfs_resize_debug ? "enabled" : "disabled");
3230 }
3231
3232 return res;
3233 }
3234 #endif // DEBUG || OSX
3235
3236 return (ENOTSUP);
3237 }
3238
3239 /*
3240 * hfs_vfs_vget is not static since it is used in hfs_readwrite.c to support
3241 * the vn_getpath_ext. We use it to leverage the code below that updates
3242 * the origin list cache if necessary
3243 */
3244
3245 int
3246 hfs_vfs_vget(struct mount *mp, ino64_t ino, struct vnode **vpp, __unused vfs_context_t context)
3247 {
3248 int error;
3249 int lockflags;
3250 struct hfsmount *hfsmp;
3251
3252 hfsmp = VFSTOHFS(mp);
3253
3254 error = hfs_vget(hfsmp, (cnid_t)ino, vpp, 1, 0);
3255 if (error)
3256 return error;
3257
3258 /*
3259 * If the look-up was via the object ID (rather than the link ID),
3260 * then we make sure there's a parent here. We can't leave this
3261 * until hfs_vnop_getattr because if there's a problem getting the
3262 * parent at that point, all the caller will do is call
3263 * hfs_vfs_vget again and we'll end up in an infinite loop.
3264 */
3265
3266 cnode_t *cp = VTOC(*vpp);
3267
3268 if (ISSET(cp->c_flag, C_HARDLINK) && ino == cp->c_fileid) {
3269 hfs_lock_always(cp, HFS_SHARED_LOCK);
3270
3271 if (!hfs_haslinkorigin(cp)) {
3272 if (!hfs_lock_upgrade(cp))
3273 hfs_lock_always(cp, HFS_EXCLUSIVE_LOCK);
3274
3275 if (cp->c_cnid == cp->c_fileid) {
3276 /*
3277 * Descriptor is stale, so we need to refresh it. We
3278 * pick the first link.
3279 */
3280 cnid_t link_id;
3281
3282 error = hfs_first_link(hfsmp, cp, &link_id);
3283
3284 if (!error) {
3285 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
3286 error = cat_findname(hfsmp, link_id, &cp->c_desc);
3287 hfs_systemfile_unlock(hfsmp, lockflags);
3288 }
3289 } else {
3290 // We'll use whatever link the descriptor happens to have
3291 error = 0;
3292 }
3293 if (!error)
3294 hfs_savelinkorigin(cp, cp->c_parentcnid);
3295 }
3296
3297 hfs_unlock(cp);
3298
3299 if (error) {
3300 vnode_put(*vpp);
3301 *vpp = NULL;
3302 }
3303 }
3304
3305 return error;
3306 }
3307
3308
3309 /*
3310 * Look up an HFS object by ID.
3311 *
3312 * The object is returned with an iocount reference and the cnode locked.
3313 *
3314 * If the object is a file then it will represent the data fork.
3315 */
3316 int
3317 hfs_vget(struct hfsmount *hfsmp, cnid_t cnid, struct vnode **vpp, int skiplock, int allow_deleted)
3318 {
3319 struct vnode *vp = NULLVP;
3320 struct cat_desc cndesc;
3321 struct cat_attr cnattr;
3322 struct cat_fork cnfork;
3323 u_int32_t linkref = 0;
3324 int error;
3325
3326 /* Check for cnids that should't be exported. */
3327 if ((cnid < kHFSFirstUserCatalogNodeID) &&
3328 (cnid != kHFSRootFolderID && cnid != kHFSRootParentID)) {
3329 return (ENOENT);
3330 }
3331 /* Don't export our private directories. */
3332 if (cnid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid ||
3333 cnid == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) {
3334 return (ENOENT);
3335 }
3336 /*
3337 * Check the hash first
3338 */
3339 vp = hfs_chash_getvnode(hfsmp, cnid, 0, skiplock, allow_deleted);
3340 if (vp) {
3341 *vpp = vp;
3342 return(0);
3343 }
3344
3345 bzero(&cndesc, sizeof(cndesc));
3346 bzero(&cnattr, sizeof(cnattr));
3347 bzero(&cnfork, sizeof(cnfork));
3348
3349 /*
3350 * Not in hash, lookup in catalog
3351 */
3352 if (cnid == kHFSRootParentID) {
3353 static char hfs_rootname[] = "/";
3354
3355 cndesc.cd_nameptr = (const u_int8_t *)&hfs_rootname[0];
3356 cndesc.cd_namelen = 1;
3357 cndesc.cd_parentcnid = kHFSRootParentID;
3358 cndesc.cd_cnid = kHFSRootFolderID;
3359 cndesc.cd_flags = CD_ISDIR;
3360
3361 cnattr.ca_fileid = kHFSRootFolderID;
3362 cnattr.ca_linkcount = 1;
3363 cnattr.ca_entries = 1;
3364 cnattr.ca_dircount = 1;
3365 cnattr.ca_mode = (S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO);
3366 } else {
3367 int lockflags;
3368 cnid_t pid;
3369 const char *nameptr;
3370
3371 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
3372 error = cat_idlookup(hfsmp, cnid, 0, 0, &cndesc, &cnattr, &cnfork);
3373 hfs_systemfile_unlock(hfsmp, lockflags);
3374
3375 if (error) {
3376 *vpp = NULL;
3377 return (error);
3378 }
3379
3380 /*
3381 * Check for a raw hardlink inode and save its linkref.
3382 */
3383 pid = cndesc.cd_parentcnid;
3384 nameptr = (const char *)cndesc.cd_nameptr;
3385
3386 if ((pid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) &&
3387 cndesc.cd_namelen > HFS_INODE_PREFIX_LEN &&
3388 (bcmp(nameptr, HFS_INODE_PREFIX, HFS_INODE_PREFIX_LEN) == 0)) {
3389 linkref = strtoul(&nameptr[HFS_INODE_PREFIX_LEN], NULL, 10);
3390
3391 } else if ((pid == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) &&
3392 cndesc.cd_namelen > HFS_DIRINODE_PREFIX_LEN &&
3393 (bcmp(nameptr, HFS_DIRINODE_PREFIX, HFS_DIRINODE_PREFIX_LEN) == 0)) {
3394 linkref = strtoul(&nameptr[HFS_DIRINODE_PREFIX_LEN], NULL, 10);
3395
3396 } else if ((pid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) &&
3397 cndesc.cd_namelen > HFS_DELETE_PREFIX_LEN &&
3398 (bcmp(nameptr, HFS_DELETE_PREFIX, HFS_DELETE_PREFIX_LEN) == 0)) {
3399 *vpp = NULL;
3400 cat_releasedesc(&cndesc);
3401 return (ENOENT); /* open unlinked file */
3402 }
3403 }
3404
3405 /*
3406 * Finish initializing cnode descriptor for hardlinks.
3407 *
3408 * We need a valid name and parent for reverse lookups.
3409 */
3410 if (linkref) {
3411 cnid_t lastid;
3412 struct cat_desc linkdesc;
3413 int linkerr = 0;
3414
3415 cnattr.ca_linkref = linkref;
3416 bzero (&linkdesc, sizeof (linkdesc));
3417
3418 /*
3419 * If the caller supplied the raw inode value, then we don't know exactly
3420 * which hardlink they wanted. It's likely that they acquired the raw inode
3421 * value BEFORE the item became a hardlink, in which case, they probably
3422 * want the oldest link. So request the oldest link from the catalog.
3423 *
3424 * Unfortunately, this requires that we iterate through all N hardlinks. On the plus
3425 * side, since we know that we want the last linkID, we can also have this one
3426 * call give us back the name of the last ID, since it's going to have it in-hand...
3427 */
3428 linkerr = hfs_lookup_lastlink (hfsmp, linkref, &lastid, &linkdesc);
3429 if ((linkerr == 0) && (lastid != 0)) {
3430 /*
3431 * Release any lingering buffers attached to our local descriptor.
3432 * Then copy the name and other business into the cndesc
3433 */
3434 cat_releasedesc (&cndesc);
3435 bcopy (&linkdesc, &cndesc, sizeof(linkdesc));
3436 }
3437 /* If it failed, the linkref code will just use whatever it had in-hand below. */
3438 }
3439
3440 if (linkref) {
3441 int newvnode_flags = 0;
3442
3443 error = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr,
3444 &cnfork, &vp, &newvnode_flags);
3445 if (error == 0) {
3446 VTOC(vp)->c_flag |= C_HARDLINK;
3447 vnode_setmultipath(vp);
3448 }
3449 } else {
3450 int newvnode_flags = 0;
3451
3452 void *buf = hfs_malloc(MAXPATHLEN);
3453
3454 /* Supply hfs_getnewvnode with a component name. */
3455 struct componentname cn = {
3456 .cn_nameiop = LOOKUP,
3457 .cn_flags = ISLASTCN,
3458 .cn_pnlen = MAXPATHLEN,
3459 .cn_namelen = cndesc.cd_namelen,
3460 .cn_pnbuf = buf,
3461 .cn_nameptr = buf
3462 };
3463
3464 bcopy(cndesc.cd_nameptr, cn.cn_nameptr, cndesc.cd_namelen + 1);
3465
3466 error = hfs_getnewvnode(hfsmp, NULLVP, &cn, &cndesc, 0, &cnattr,
3467 &cnfork, &vp, &newvnode_flags);
3468
3469 if (error == 0 && (VTOC(vp)->c_flag & C_HARDLINK)) {
3470 hfs_savelinkorigin(VTOC(vp), cndesc.cd_parentcnid);
3471 }
3472
3473 hfs_free(buf, MAXPATHLEN);
3474 }
3475 cat_releasedesc(&cndesc);
3476
3477 *vpp = vp;
3478 if (vp && skiplock) {
3479 hfs_unlock(VTOC(vp));
3480 }
3481 return (error);
3482 }
3483
3484
3485 /*
3486 * Flush out all the files in a filesystem.
3487 */
3488 static int
3489 #if QUOTA
3490 hfs_flushfiles(struct mount *mp, int flags, struct proc *p)
3491 #else
3492 hfs_flushfiles(struct mount *mp, int flags, __unused struct proc *p)
3493 #endif /* QUOTA */
3494 {
3495 struct hfsmount *hfsmp;
3496 struct vnode *skipvp = NULLVP;
3497 int error;
3498 int accounted_root_usecounts;
3499 #if QUOTA
3500 int i;
3501 #endif
3502
3503 hfsmp = VFSTOHFS(mp);
3504
3505 accounted_root_usecounts = 0;
3506 #if QUOTA
3507 /*
3508 * The open quota files have an indirect reference on
3509 * the root directory vnode. We must account for this
3510 * extra reference when doing the intial vflush.
3511 */
3512 if (((unsigned int)vfs_flags(mp)) & MNT_QUOTA) {
3513 /* Find out how many quota files we have open. */
3514 for (i = 0; i < MAXQUOTAS; i++) {
3515 if (hfsmp->hfs_qfiles[i].qf_vp != NULLVP)
3516 ++accounted_root_usecounts;
3517 }
3518 }
3519 #endif /* QUOTA */
3520
3521 if (accounted_root_usecounts > 0) {
3522 /* Obtain the root vnode so we can skip over it. */
3523 skipvp = hfs_chash_getvnode(hfsmp, kHFSRootFolderID, 0, 0, 0);
3524 }
3525
3526 error = vflush(mp, skipvp, SKIPSYSTEM | SKIPSWAP | flags);
3527 if (error != 0)
3528 return(error);
3529
3530 error = vflush(mp, skipvp, SKIPSYSTEM | flags);
3531
3532 if (skipvp) {
3533 /*
3534 * See if there are additional references on the
3535 * root vp besides the ones obtained from the open
3536 * quota files and CoreStorage.
3537 */
3538 if ((error == 0) &&
3539 (vnode_isinuse(skipvp, accounted_root_usecounts))) {
3540 error = EBUSY; /* root directory is still open */
3541 }
3542 hfs_unlock(VTOC(skipvp));
3543 /* release the iocount from the hfs_chash_getvnode call above. */
3544 vnode_put(skipvp);
3545 }
3546 if (error && (flags & FORCECLOSE) == 0)
3547 return (error);
3548
3549 #if QUOTA
3550 if (((unsigned int)vfs_flags(mp)) & MNT_QUOTA) {
3551 for (i = 0; i < MAXQUOTAS; i++) {
3552 if (hfsmp->hfs_qfiles[i].qf_vp == NULLVP)
3553 continue;
3554 hfs_quotaoff(p, mp, i);
3555 }
3556 }
3557 #endif /* QUOTA */
3558
3559 if (skipvp) {
3560 error = vflush(mp, NULLVP, SKIPSYSTEM | flags);
3561 }
3562
3563 return (error);
3564 }
3565
3566 /*
3567 * Update volume encoding bitmap (HFS Plus only)
3568 *
3569 * Mark a legacy text encoding as in-use (as needed)
3570 * in the volume header of this HFS+ filesystem.
3571 */
3572 void
3573 hfs_setencodingbits(struct hfsmount *hfsmp, u_int32_t encoding)
3574 {
3575 #define kIndexMacUkrainian 48 /* MacUkrainian encoding is 152 */
3576 #define kIndexMacFarsi 49 /* MacFarsi encoding is 140 */
3577
3578 u_int32_t index;
3579
3580 switch (encoding) {
3581 case kTextEncodingMacUkrainian:
3582 index = kIndexMacUkrainian;
3583 break;
3584 case kTextEncodingMacFarsi:
3585 index = kIndexMacFarsi;
3586 break;
3587 default:
3588 index = encoding;
3589 break;
3590 }
3591
3592 /* Only mark the encoding as in-use if it wasn't already set */
3593 if (index < 64 && (hfsmp->encodingsBitmap & (u_int64_t)(1ULL << index)) == 0) {
3594 hfs_lock_mount (hfsmp);
3595 hfsmp->encodingsBitmap |= (u_int64_t)(1ULL << index);
3596 MarkVCBDirty(hfsmp);
3597 hfs_unlock_mount(hfsmp);
3598 }
3599 }
3600
3601 /*
3602 * Update volume stats
3603 *
3604 * On journal volumes this will cause a volume header flush
3605 */
3606 int
3607 hfs_volupdate(struct hfsmount *hfsmp, enum volop op, int inroot)
3608 {
3609 struct timeval tv;
3610
3611 microtime(&tv);
3612
3613 hfs_lock_mount (hfsmp);
3614
3615 MarkVCBDirty(hfsmp);
3616 hfsmp->hfs_mtime = tv.tv_sec;
3617
3618 switch (op) {
3619 case VOL_UPDATE:
3620 break;
3621 case VOL_MKDIR:
3622 if (hfsmp->hfs_dircount != 0xFFFFFFFF)
3623 ++hfsmp->hfs_dircount;
3624 if (inroot && hfsmp->vcbNmRtDirs != 0xFFFF)
3625 ++hfsmp->vcbNmRtDirs;
3626 break;
3627 case VOL_RMDIR:
3628 if (hfsmp->hfs_dircount != 0)
3629 --hfsmp->hfs_dircount;
3630 if (inroot && hfsmp->vcbNmRtDirs != 0xFFFF)
3631 --hfsmp->vcbNmRtDirs;
3632 break;
3633 case VOL_MKFILE:
3634 if (hfsmp->hfs_filecount != 0xFFFFFFFF)
3635 ++hfsmp->hfs_filecount;
3636 if (inroot && hfsmp->vcbNmFls != 0xFFFF)
3637 ++hfsmp->vcbNmFls;
3638 break;
3639 case VOL_RMFILE:
3640 if (hfsmp->hfs_filecount != 0)
3641 --hfsmp->hfs_filecount;
3642 if (inroot && hfsmp->vcbNmFls != 0xFFFF)
3643 --hfsmp->vcbNmFls;
3644 break;
3645 }
3646
3647 hfs_unlock_mount (hfsmp);
3648
3649 if (hfsmp->jnl) {
3650 hfs_flushvolumeheader(hfsmp, 0);
3651 }
3652
3653 return (0);
3654 }
3655
3656
3657 #if CONFIG_HFS_STD
3658 /* HFS Standard MDB flush */
3659 static int
3660 hfs_flushMDB(struct hfsmount *hfsmp, int waitfor, int altflush)
3661 {
3662 ExtendedVCB *vcb = HFSTOVCB(hfsmp);
3663 struct filefork *fp;
3664 HFSMasterDirectoryBlock *mdb;
3665 struct buf *bp = NULL;
3666 int retval;
3667 int sector_size;
3668 ByteCount namelen;
3669
3670 sector_size = hfsmp->hfs_logical_block_size;
3671 retval = (int)buf_bread(hfsmp->hfs_devvp, (daddr64_t)HFS_PRI_SECTOR(sector_size), sector_size, NOCRED, &bp);
3672 if (retval) {
3673 if (bp)
3674 buf_brelse(bp);
3675 return retval;
3676 }
3677
3678 hfs_lock_mount (hfsmp);
3679
3680 mdb = (HFSMasterDirectoryBlock *)(buf_dataptr(bp) + HFS_PRI_OFFSET(sector_size));
3681
3682 mdb->drCrDate = SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->hfs_itime)));
3683 mdb->drLsMod = SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->vcbLsMod)));
3684 mdb->drAtrb = SWAP_BE16 (vcb->vcbAtrb);
3685 mdb->drNmFls = SWAP_BE16 (vcb->vcbNmFls);
3686 mdb->drAllocPtr = SWAP_BE16 (vcb->nextAllocation);
3687 mdb->drClpSiz = SWAP_BE32 (vcb->vcbClpSiz);
3688 mdb->drNxtCNID = SWAP_BE32 (vcb->vcbNxtCNID);
3689 mdb->drFreeBks = SWAP_BE16 (vcb->freeBlocks);
3690
3691 namelen = strlen((char *)vcb->vcbVN);
3692 retval = utf8_to_hfs(vcb, namelen, vcb->vcbVN, mdb->drVN);
3693 /* Retry with MacRoman in case that's how it was exported. */
3694 if (retval)
3695 retval = utf8_to_mac_roman(namelen, vcb->vcbVN, mdb->drVN);
3696
3697 mdb->drVolBkUp = SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->vcbVolBkUp)));
3698 mdb->drWrCnt = SWAP_BE32 (vcb->vcbWrCnt);
3699 mdb->drNmRtDirs = SWAP_BE16 (vcb->vcbNmRtDirs);
3700 mdb->drFilCnt = SWAP_BE32 (vcb->vcbFilCnt);
3701 mdb->drDirCnt = SWAP_BE32 (vcb->vcbDirCnt);
3702
3703 bcopy(vcb->vcbFndrInfo, mdb->drFndrInfo, sizeof(mdb->drFndrInfo));
3704
3705 fp = VTOF(vcb->extentsRefNum);
3706 mdb->drXTExtRec[0].startBlock = SWAP_BE16 (fp->ff_extents[0].startBlock);
3707 mdb->drXTExtRec[0].blockCount = SWAP_BE16 (fp->ff_extents[0].blockCount);
3708 mdb->drXTExtRec[1].startBlock = SWAP_BE16 (fp->ff_extents[1].startBlock);
3709 mdb->drXTExtRec[1].blockCount = SWAP_BE16 (fp->ff_extents[1].blockCount);
3710 mdb->drXTExtRec[2].startBlock = SWAP_BE16 (fp->ff_extents[2].startBlock);
3711 mdb->drXTExtRec[2].blockCount = SWAP_BE16 (fp->ff_extents[2].blockCount);
3712 mdb->drXTFlSize = SWAP_BE32 (fp->ff_blocks * vcb->blockSize);
3713 mdb->drXTClpSiz = SWAP_BE32 (fp->ff_clumpsize);
3714 FTOC(fp)->c_flag &= ~C_MODIFIED;
3715
3716 fp = VTOF(vcb->catalogRefNum);
3717 mdb->drCTExtRec[0].startBlock = SWAP_BE16 (fp->ff_extents[0].startBlock);
3718 mdb->drCTExtRec[0].blockCount = SWAP_BE16 (fp->ff_extents[0].blockCount);
3719 mdb->drCTExtRec[1].startBlock = SWAP_BE16 (fp->ff_extents[1].startBlock);
3720 mdb->drCTExtRec[1].blockCount = SWAP_BE16 (fp->ff_extents[1].blockCount);
3721 mdb->drCTExtRec[2].startBlock = SWAP_BE16 (fp->ff_extents[2].startBlock);
3722 mdb->drCTExtRec[2].blockCount = SWAP_BE16 (fp->ff_extents[2].blockCount);
3723 mdb->drCTFlSize = SWAP_BE32 (fp->ff_blocks * vcb->blockSize);
3724 mdb->drCTClpSiz = SWAP_BE32 (fp->ff_clumpsize);
3725 FTOC(fp)->c_flag &= ~C_MODIFIED;
3726
3727 MarkVCBClean( vcb );
3728
3729 hfs_unlock_mount (hfsmp);
3730
3731 /* If requested, flush out the alternate MDB */
3732 if (altflush) {
3733 struct buf *alt_bp = NULL;
3734
3735 if (buf_meta_bread(hfsmp->hfs_devvp, hfsmp->hfs_partition_avh_sector, sector_size, NOCRED, &alt_bp) == 0) {
3736 bcopy(mdb, (char *)buf_dataptr(alt_bp) + HFS_ALT_OFFSET(sector_size), kMDBSize);
3737
3738 (void) VNOP_BWRITE(alt_bp);
3739 } else if (alt_bp)
3740 buf_brelse(alt_bp);
3741 }
3742
3743 if (waitfor != MNT_WAIT)
3744 buf_bawrite(bp);
3745 else
3746 retval = VNOP_BWRITE(bp);
3747
3748 return (retval);
3749 }
3750 #endif
3751
3752 /*
3753 * Flush any dirty in-memory mount data to the on-disk
3754 * volume header.
3755 *
3756 * Note: the on-disk volume signature is intentionally
3757 * not flushed since the on-disk "H+" and "HX" signatures
3758 * are always stored in-memory as "H+".
3759 */
3760 int
3761 hfs_flushvolumeheader(struct hfsmount *hfsmp,
3762 hfs_flush_volume_header_options_t options)
3763 {
3764 ExtendedVCB *vcb = HFSTOVCB(hfsmp);
3765 struct filefork *fp;
3766 HFSPlusVolumeHeader *volumeHeader, *altVH;
3767 int retval;
3768 struct buf *bp, *alt_bp;
3769 int i;
3770 daddr64_t priIDSector;
3771 bool critical = false;
3772 u_int16_t signature;
3773 u_int16_t hfsversion;
3774 daddr64_t avh_sector;
3775 bool altflush = ISSET(options, HFS_FVH_WRITE_ALT);
3776
3777 if (ISSET(options, HFS_FVH_FLUSH_IF_DIRTY)
3778 && !hfs_header_needs_flushing(hfsmp)) {
3779 return 0;
3780 }
3781
3782 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
3783 return(0);
3784 }
3785 #if CONFIG_HFS_STD
3786 if (hfsmp->hfs_flags & HFS_STANDARD) {
3787 return hfs_flushMDB(hfsmp, ISSET(options, HFS_FVH_WAIT) ? MNT_WAIT : 0, altflush);
3788 }
3789 #endif
3790 priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
3791 HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size));
3792
3793 if (hfs_start_transaction(hfsmp) != 0) {
3794 return EINVAL;
3795 }
3796
3797 bp = NULL;
3798 alt_bp = NULL;
3799
3800 retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
3801 HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys),
3802 hfsmp->hfs_physical_block_size, NOCRED, &bp);
3803 if (retval) {
3804 printf("hfs: err %d reading VH blk (vol=%s)\n", retval, vcb->vcbVN);
3805 goto err_exit;
3806 }
3807
3808 volumeHeader = (HFSPlusVolumeHeader *)((char *)buf_dataptr(bp) +
3809 HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size));
3810
3811 /*
3812 * Sanity check what we just read. If it's bad, try the alternate
3813 * instead.
3814 */
3815 signature = SWAP_BE16 (volumeHeader->signature);
3816 hfsversion = SWAP_BE16 (volumeHeader->version);
3817 if ((signature != kHFSPlusSigWord && signature != kHFSXSigWord) ||
3818 (hfsversion < kHFSPlusVersion) || (hfsversion > 100) ||
3819 (SWAP_BE32 (volumeHeader->blockSize) != vcb->blockSize)) {
3820 printf("hfs: corrupt VH on %s, sig 0x%04x, ver %d, blksize %d\n",
3821 vcb->vcbVN, signature, hfsversion,
3822 SWAP_BE32 (volumeHeader->blockSize));
3823 hfs_mark_inconsistent(hfsmp, HFS_INCONSISTENCY_DETECTED);
3824
3825 /* Almost always we read AVH relative to the partition size */
3826 avh_sector = hfsmp->hfs_partition_avh_sector;
3827
3828 if (hfsmp->hfs_partition_avh_sector != hfsmp->hfs_fs_avh_sector) {
3829 /*
3830 * The two altVH offsets do not match --- which means that a smaller file
3831 * system exists in a larger partition. Verify that we have the correct
3832 * alternate volume header sector as per the current parititon size.
3833 * The GPT device that we are mounted on top could have changed sizes
3834 * without us knowing.
3835 *
3836 * We're in a transaction, so it's safe to modify the partition_avh_sector
3837 * field if necessary.
3838 */
3839
3840 uint64_t sector_count;
3841
3842 /* Get underlying device block count */
3843 if ((retval = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCGETBLOCKCOUNT,
3844 (caddr_t)&sector_count, 0, vfs_context_current()))) {
3845 printf("hfs_flushVH: err %d getting block count (%s) \n", retval, vcb->vcbVN);
3846 retval = ENXIO;
3847 goto err_exit;
3848 }
3849
3850 /* Partition size was changed without our knowledge */
3851 if (sector_count != (uint64_t)hfsmp->hfs_logical_block_count) {
3852 hfsmp->hfs_partition_avh_sector = (hfsmp->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
3853 HFS_ALT_SECTOR(hfsmp->hfs_logical_block_size, sector_count);
3854 /* Note: hfs_fs_avh_sector will remain unchanged */
3855 printf ("hfs_flushVH: partition size changed, partition_avh_sector=%qu, fs_avh_sector=%qu\n",
3856 hfsmp->hfs_partition_avh_sector, hfsmp->hfs_fs_avh_sector);
3857
3858 /*
3859 * We just updated the offset for AVH relative to
3860 * the partition size, so the content of that AVH
3861 * will be invalid. But since we are also maintaining
3862 * a valid AVH relative to the file system size, we
3863 * can read it since primary VH and partition AVH
3864 * are not valid.
3865 */
3866 avh_sector = hfsmp->hfs_fs_avh_sector;
3867 }
3868 }
3869
3870 printf ("hfs: trying alternate (for %s) avh_sector=%qu\n",
3871 (avh_sector == hfsmp->hfs_fs_avh_sector) ? "file system" : "partition", avh_sector);
3872
3873 if (avh_sector) {
3874 retval = buf_meta_bread(hfsmp->hfs_devvp,
3875 HFS_PHYSBLK_ROUNDDOWN(avh_sector, hfsmp->hfs_log_per_phys),
3876 hfsmp->hfs_physical_block_size, NOCRED, &alt_bp);
3877 if (retval) {
3878 printf("hfs: err %d reading alternate VH (%s)\n", retval, vcb->vcbVN);
3879 goto err_exit;
3880 }
3881
3882 altVH = (HFSPlusVolumeHeader *)((char *)buf_dataptr(alt_bp) +
3883 HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size));
3884 signature = SWAP_BE16(altVH->signature);
3885 hfsversion = SWAP_BE16(altVH->version);
3886
3887 if ((signature != kHFSPlusSigWord && signature != kHFSXSigWord) ||
3888 (hfsversion < kHFSPlusVersion) || (kHFSPlusVersion > 100) ||
3889 (SWAP_BE32(altVH->blockSize) != vcb->blockSize)) {
3890 printf("hfs: corrupt alternate VH on %s, sig 0x%04x, ver %d, blksize %d\n",
3891 vcb->vcbVN, signature, hfsversion,
3892 SWAP_BE32(altVH->blockSize));
3893 retval = EIO;
3894 goto err_exit;
3895 }
3896
3897 /* The alternate is plausible, so use it. */
3898 bcopy(altVH, volumeHeader, kMDBSize);
3899 buf_brelse(alt_bp);
3900 alt_bp = NULL;
3901 } else {
3902 /* No alternate VH, nothing more we can do. */
3903 retval = EIO;
3904 goto err_exit;
3905 }
3906 }
3907
3908 if (hfsmp->jnl) {
3909 journal_modify_block_start(hfsmp->jnl, bp);
3910 }
3911
3912 /*
3913 * For embedded HFS+ volumes, update create date if it changed
3914 * (ie from a setattrlist call)
3915 */
3916 if ((vcb->hfsPlusIOPosOffset != 0) &&
3917 (SWAP_BE32 (volumeHeader->createDate) != vcb->localCreateDate)) {
3918 struct buf *bp2;
3919 HFSMasterDirectoryBlock *mdb;
3920
3921 retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
3922 HFS_PHYSBLK_ROUNDDOWN(HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size), hfsmp->hfs_log_per_phys),
3923 hfsmp->hfs_physical_block_size, NOCRED, &bp2);
3924 if (retval) {
3925 if (bp2)
3926 buf_brelse(bp2);
3927 retval = 0;
3928 } else {
3929 mdb = (HFSMasterDirectoryBlock *)(buf_dataptr(bp2) +
3930 HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size));
3931
3932 if ( SWAP_BE32 (mdb->drCrDate) != vcb->localCreateDate )
3933 {
3934 if (hfsmp->jnl) {
3935 journal_modify_block_start(hfsmp->jnl, bp2);
3936 }
3937
3938 mdb->drCrDate = SWAP_BE32 (vcb->localCreateDate); /* pick up the new create date */
3939
3940 if (hfsmp->jnl) {
3941 journal_modify_block_end(hfsmp->jnl, bp2, NULL, NULL);
3942 } else {
3943 (void) VNOP_BWRITE(bp2); /* write out the changes */
3944 }
3945 }
3946 else
3947 {
3948 buf_brelse(bp2); /* just release it */
3949 }
3950 }
3951 }
3952
3953 hfs_lock_mount (hfsmp);
3954
3955 /* Note: only update the lower 16 bits worth of attributes */
3956 volumeHeader->attributes = SWAP_BE32 (vcb->vcbAtrb);
3957 volumeHeader->journalInfoBlock = SWAP_BE32 (vcb->vcbJinfoBlock);
3958 if (hfsmp->jnl) {
3959 volumeHeader->lastMountedVersion = SWAP_BE32 (kHFSJMountVersion);
3960 } else {
3961 volumeHeader->lastMountedVersion = SWAP_BE32 (kHFSPlusMountVersion);
3962 }
3963 volumeHeader->createDate = SWAP_BE32 (vcb->localCreateDate); /* volume create date is in local time */
3964 volumeHeader->modifyDate = SWAP_BE32 (to_hfs_time(vcb->vcbLsMod));
3965 volumeHeader->backupDate = SWAP_BE32 (to_hfs_time(vcb->vcbVolBkUp));
3966 volumeHeader->fileCount = SWAP_BE32 (vcb->vcbFilCnt);
3967 volumeHeader->folderCount = SWAP_BE32 (vcb->vcbDirCnt);
3968 volumeHeader->totalBlocks = SWAP_BE32 (vcb->totalBlocks);
3969 volumeHeader->freeBlocks = SWAP_BE32 (vcb->freeBlocks + vcb->reclaimBlocks);
3970 volumeHeader->nextAllocation = SWAP_BE32 (vcb->nextAllocation);
3971 volumeHeader->rsrcClumpSize = SWAP_BE32 (vcb->vcbClpSiz);
3972 volumeHeader->dataClumpSize = SWAP_BE32 (vcb->vcbClpSiz);
3973 volumeHeader->nextCatalogID = SWAP_BE32 (vcb->vcbNxtCNID);
3974 volumeHeader->writeCount = SWAP_BE32 (vcb->vcbWrCnt);
3975 volumeHeader->encodingsBitmap = SWAP_BE64 (vcb->encodingsBitmap);
3976
3977 if (bcmp(vcb->vcbFndrInfo, volumeHeader->finderInfo, sizeof(volumeHeader->finderInfo)) != 0) {
3978 bcopy(vcb->vcbFndrInfo, volumeHeader->finderInfo, sizeof(volumeHeader->finderInfo));
3979 critical = true;
3980 }
3981
3982 if (!altflush && !ISSET(options, HFS_FVH_FLUSH_IF_DIRTY)) {
3983 goto done;
3984 }
3985
3986 /* Sync Extents over-flow file meta data */
3987 fp = VTOF(vcb->extentsRefNum);
3988 if (FTOC(fp)->c_flag & C_MODIFIED) {
3989 for (i = 0; i < kHFSPlusExtentDensity; i++) {
3990 volumeHeader->extentsFile.extents[i].startBlock =
3991 SWAP_BE32 (fp->ff_extents[i].startBlock);
3992 volumeHeader->extentsFile.extents[i].blockCount =
3993 SWAP_BE32 (fp->ff_extents[i].blockCount);
3994 }
3995 volumeHeader->extentsFile.logicalSize = SWAP_BE64 (fp->ff_size);
3996 volumeHeader->extentsFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
3997 volumeHeader->extentsFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize);
3998 FTOC(fp)->c_flag &= ~C_MODIFIED;
3999 altflush = true;
4000 }
4001
4002 /* Sync Catalog file meta data */
4003 fp = VTOF(vcb->catalogRefNum);
4004 if (FTOC(fp)->c_flag & C_MODIFIED) {
4005 for (i = 0; i < kHFSPlusExtentDensity; i++) {
4006 volumeHeader->catalogFile.extents[i].startBlock =
4007 SWAP_BE32 (fp->ff_extents[i].startBlock);
4008 volumeHeader->catalogFile.extents[i].blockCount =
4009 SWAP_BE32 (fp->ff_extents[i].blockCount);
4010 }
4011 volumeHeader->catalogFile.logicalSize = SWAP_BE64 (fp->ff_size);
4012 volumeHeader->catalogFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
4013 volumeHeader->catalogFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize);
4014 FTOC(fp)->c_flag &= ~C_MODIFIED;
4015 altflush = true;
4016 }
4017
4018 /* Sync Allocation file meta data */
4019 fp = VTOF(vcb->allocationsRefNum);
4020 if (FTOC(fp)->c_flag & C_MODIFIED) {
4021 for (i = 0; i < kHFSPlusExtentDensity; i++) {
4022 volumeHeader->allocationFile.extents[i].startBlock =
4023 SWAP_BE32 (fp->ff_extents[i].startBlock);
4024 volumeHeader->allocationFile.extents[i].blockCount =
4025 SWAP_BE32 (fp->ff_extents[i].blockCount);
4026 }
4027 volumeHeader->allocationFile.logicalSize = SWAP_BE64 (fp->ff_size);
4028 volumeHeader->allocationFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
4029 volumeHeader->allocationFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize);
4030 FTOC(fp)->c_flag &= ~C_MODIFIED;
4031 altflush = true;
4032 }
4033
4034 /* Sync Attribute file meta data */
4035 if (hfsmp->hfs_attribute_vp) {
4036 fp = VTOF(hfsmp->hfs_attribute_vp);
4037 for (i = 0; i < kHFSPlusExtentDensity; i++) {
4038 volumeHeader->attributesFile.extents[i].startBlock =
4039 SWAP_BE32 (fp->ff_extents[i].startBlock);
4040 volumeHeader->attributesFile.extents[i].blockCount =
4041 SWAP_BE32 (fp->ff_extents[i].blockCount);
4042 }
4043 if (ISSET(FTOC(fp)->c_flag, C_MODIFIED)) {
4044 FTOC(fp)->c_flag &= ~C_MODIFIED;
4045 altflush = true;
4046 }
4047 volumeHeader->attributesFile.logicalSize = SWAP_BE64 (fp->ff_size);
4048 volumeHeader->attributesFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
4049 volumeHeader->attributesFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize);
4050 }
4051
4052 /* Sync Startup file meta data */
4053 if (hfsmp->hfs_startup_vp) {
4054 fp = VTOF(hfsmp->hfs_startup_vp);
4055 if (FTOC(fp)->c_flag & C_MODIFIED) {
4056 for (i = 0; i < kHFSPlusExtentDensity; i++) {
4057 volumeHeader->startupFile.extents[i].startBlock =
4058 SWAP_BE32 (fp->ff_extents[i].startBlock);
4059 volumeHeader->startupFile.extents[i].blockCount =
4060 SWAP_BE32 (fp->ff_extents[i].blockCount);
4061 }
4062 volumeHeader->startupFile.logicalSize = SWAP_BE64 (fp->ff_size);
4063 volumeHeader->startupFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
4064 volumeHeader->startupFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize);
4065 FTOC(fp)->c_flag &= ~C_MODIFIED;
4066 altflush = true;
4067 }
4068 }
4069
4070 if (altflush)
4071 critical = true;
4072
4073 done:
4074 MarkVCBClean(hfsmp);
4075 hfs_unlock_mount (hfsmp);
4076
4077 /* If requested, flush out the alternate volume header */
4078 if (altflush) {
4079 /*
4080 * The two altVH offsets do not match --- which means that a smaller file
4081 * system exists in a larger partition. Verify that we have the correct
4082 * alternate volume header sector as per the current parititon size.
4083 * The GPT device that we are mounted on top could have changed sizes
4084 * without us knowning.
4085 *
4086 * We're in a transaction, so it's safe to modify the partition_avh_sector
4087 * field if necessary.
4088 */
4089 if (hfsmp->hfs_partition_avh_sector != hfsmp->hfs_fs_avh_sector) {
4090 uint64_t sector_count;
4091
4092 /* Get underlying device block count */
4093 if ((retval = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCGETBLOCKCOUNT,
4094 (caddr_t)&sector_count, 0, vfs_context_current()))) {
4095 printf("hfs_flushVH: err %d getting block count (%s) \n", retval, vcb->vcbVN);
4096 retval = ENXIO;
4097 goto err_exit;
4098 }
4099
4100 /* Partition size was changed without our knowledge */
4101 if (sector_count != (uint64_t)hfsmp->hfs_logical_block_count) {
4102 hfsmp->hfs_partition_avh_sector = (hfsmp->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
4103 HFS_ALT_SECTOR(hfsmp->hfs_logical_block_size, sector_count);
4104 /* Note: hfs_fs_avh_sector will remain unchanged */
4105 printf ("hfs_flushVH: altflush: partition size changed, partition_avh_sector=%qu, fs_avh_sector=%qu\n",
4106 hfsmp->hfs_partition_avh_sector, hfsmp->hfs_fs_avh_sector);
4107 }
4108 }
4109
4110 /*
4111 * First see if we need to write I/O to the "secondary" AVH
4112 * located at FS Size - 1024 bytes, because this one will
4113 * always go into the journal. We put this AVH into the journal
4114 * because even if the filesystem size has shrunk, this LBA should be
4115 * reachable after the partition-size modification has occurred.
4116 * The one where we need to be careful is partitionsize-1024, since the
4117 * partition size should hopefully shrink.
4118 *
4119 * Most of the time this block will not execute.
4120 */
4121 if ((hfsmp->hfs_fs_avh_sector) &&
4122 (hfsmp->hfs_partition_avh_sector != hfsmp->hfs_fs_avh_sector)) {
4123 if (buf_meta_bread(hfsmp->hfs_devvp,
4124 HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_fs_avh_sector, hfsmp->hfs_log_per_phys),
4125 hfsmp->hfs_physical_block_size, NOCRED, &alt_bp) == 0) {
4126 if (hfsmp->jnl) {
4127 journal_modify_block_start(hfsmp->jnl, alt_bp);
4128 }
4129
4130 bcopy(volumeHeader, (char *)buf_dataptr(alt_bp) +
4131 HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size),
4132 kMDBSize);
4133
4134 if (hfsmp->jnl) {
4135 journal_modify_block_end(hfsmp->jnl, alt_bp, NULL, NULL);
4136 } else {
4137 (void) VNOP_BWRITE(alt_bp);
4138 }
4139 } else if (alt_bp) {
4140 buf_brelse(alt_bp);
4141 }
4142 }
4143
4144 /*
4145 * Flush out alternate volume header located at 1024 bytes before
4146 * end of the partition as part of journal transaction. In
4147 * most cases, this will be the only alternate volume header
4148 * that we need to worry about because the file system size is
4149 * same as the partition size, therefore hfs_fs_avh_sector is
4150 * same as hfs_partition_avh_sector. This is the "priority" AVH.
4151 *
4152 * However, do not always put this I/O into the journal. If we skipped the
4153 * FS-Size AVH write above, then we will put this I/O into the journal as
4154 * that indicates the two were in sync. However, if the FS size is
4155 * not the same as the partition size, we are tracking two. We don't
4156 * put it in the journal in that case, since if the partition
4157 * size changes between uptimes, and we need to replay the journal,
4158 * this I/O could generate an EIO if during replay it is now trying
4159 * to access blocks beyond the device EOF.
4160 */
4161 if (hfsmp->hfs_partition_avh_sector) {
4162 if (buf_meta_bread(hfsmp->hfs_devvp,
4163 HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_partition_avh_sector, hfsmp->hfs_log_per_phys),
4164 hfsmp->hfs_physical_block_size, NOCRED, &alt_bp) == 0) {
4165
4166 /* only one AVH, put this I/O in the journal. */
4167 if ((hfsmp->jnl) && (hfsmp->hfs_partition_avh_sector == hfsmp->hfs_fs_avh_sector)) {
4168 journal_modify_block_start(hfsmp->jnl, alt_bp);
4169 }
4170
4171 bcopy(volumeHeader, (char *)buf_dataptr(alt_bp) +
4172 HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size),
4173 kMDBSize);
4174
4175 /* If journaled and we only have one AVH to track */
4176 if ((hfsmp->jnl) && (hfsmp->hfs_partition_avh_sector == hfsmp->hfs_fs_avh_sector)) {
4177 journal_modify_block_end (hfsmp->jnl, alt_bp, NULL, NULL);
4178 } else {
4179 /*
4180 * If we don't have a journal or there are two AVH's at the
4181 * moment, then this one doesn't go in the journal. Note that
4182 * this one may generate I/O errors, since the partition
4183 * can be resized behind our backs at any moment and this I/O
4184 * may now appear to be beyond the device EOF.
4185 */
4186 (void) VNOP_BWRITE(alt_bp);
4187 hfs_flush(hfsmp, HFS_FLUSH_CACHE);
4188 }
4189 } else if (alt_bp) {
4190 buf_brelse(alt_bp);
4191 }
4192 }
4193 }
4194
4195 /* Finish modifying the block for the primary VH */
4196 if (hfsmp->jnl) {
4197 journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL);
4198 } else {
4199 if (!ISSET(options, HFS_FVH_WAIT)) {
4200 buf_bawrite(bp);
4201 } else {
4202 retval = VNOP_BWRITE(bp);
4203 /* When critical data changes, flush the device cache */
4204 if (critical && (retval == 0)) {
4205 hfs_flush(hfsmp, HFS_FLUSH_CACHE);
4206 }
4207 }
4208 }
4209 hfs_end_transaction(hfsmp);
4210
4211 return (retval);
4212
4213 err_exit:
4214 if (alt_bp)
4215 buf_brelse(alt_bp);
4216 if (bp)
4217 buf_brelse(bp);
4218 hfs_end_transaction(hfsmp);
4219 return retval;
4220 }
4221
4222
4223 /*
4224 * Creates a UUID from a unique "name" in the HFS UUID Name space.
4225 * See version 3 UUID.
4226 */
4227 void
4228 hfs_getvoluuid(struct hfsmount *hfsmp, uuid_t result_uuid)
4229 {
4230
4231 if (uuid_is_null(hfsmp->hfs_full_uuid)) {
4232 uuid_t result;
4233
4234 MD5_CTX md5c;
4235 uint8_t rawUUID[8];
4236
4237 ((uint32_t *)rawUUID)[0] = hfsmp->vcbFndrInfo[6];
4238 ((uint32_t *)rawUUID)[1] = hfsmp->vcbFndrInfo[7];
4239
4240 MD5Init( &md5c );
4241 MD5Update( &md5c, HFS_UUID_NAMESPACE_ID, sizeof( uuid_t ) );
4242 MD5Update( &md5c, rawUUID, sizeof (rawUUID) );
4243 MD5Final( result, &md5c );
4244
4245 result[6] = 0x30 | ( result[6] & 0x0F );
4246 result[8] = 0x80 | ( result[8] & 0x3F );
4247
4248 uuid_copy(hfsmp->hfs_full_uuid, result);
4249 }
4250 uuid_copy (result_uuid, hfsmp->hfs_full_uuid);
4251
4252 }
4253
4254 /*
4255 * Get file system attributes.
4256 */
4257 static int
4258 hfs_vfs_getattr(struct mount *mp, struct vfs_attr *fsap, __unused vfs_context_t context)
4259 {
4260 #define HFS_ATTR_FILE_VALIDMASK (ATTR_FILE_VALIDMASK & ~(ATTR_FILE_FILETYPE | ATTR_FILE_FORKCOUNT | ATTR_FILE_FORKLIST | ATTR_FILE_CLUMPSIZE))
4261 #define HFS_ATTR_CMN_VOL_VALIDMASK (ATTR_CMN_VALIDMASK & ~(ATTR_CMN_DATA_PROTECT_FLAGS))
4262
4263 ExtendedVCB *vcb = VFSTOVCB(mp);
4264 struct hfsmount *hfsmp = VFSTOHFS(mp);
4265
4266 int searchfs_on = 0;
4267 int exchangedata_on = 1;
4268
4269 #if CONFIG_SEARCHFS
4270 searchfs_on = 1;
4271 #endif
4272
4273 #if CONFIG_PROTECT
4274 if (cp_fs_protected(mp)) {
4275 exchangedata_on = 0;
4276 }
4277 #endif
4278
4279 /*
4280 * Some of these attributes can be expensive to query if we're
4281 * backed by a disk image; hfs_freeblks() has to ask the backing
4282 * store, and this might involve a trip to a network file server.
4283 * Only ask for them if the caller really wants them. Preserve old
4284 * behavior for file systems not backed by a disk image.
4285 */
4286 #if HFS_SPARSE_DEV
4287 const int diskimage = (hfsmp->hfs_backingvp != NULL);
4288 #else
4289 const int diskimage = 0;
4290 #endif
4291
4292 VFSATTR_RETURN(fsap, f_objcount, (u_int64_t)hfsmp->vcbFilCnt + (u_int64_t)hfsmp->vcbDirCnt);
4293 VFSATTR_RETURN(fsap, f_filecount, (u_int64_t)hfsmp->vcbFilCnt);
4294 VFSATTR_RETURN(fsap, f_dircount, (u_int64_t)hfsmp->vcbDirCnt);
4295 VFSATTR_RETURN(fsap, f_maxobjcount, (u_int64_t)0xFFFFFFFF);
4296 VFSATTR_RETURN(fsap, f_iosize, (size_t)cluster_max_io_size(mp, 0));
4297 VFSATTR_RETURN(fsap, f_blocks, (u_int64_t)hfsmp->totalBlocks);
4298 if (VFSATTR_WANTED(fsap, f_bfree) || !diskimage) {
4299 VFSATTR_RETURN(fsap, f_bfree, (u_int64_t)hfs_freeblks(hfsmp, 0));
4300 }
4301 if (VFSATTR_WANTED(fsap, f_bavail) || !diskimage) {
4302 VFSATTR_RETURN(fsap, f_bavail, (u_int64_t)hfs_freeblks(hfsmp, 1));
4303 }
4304 VFSATTR_RETURN(fsap, f_bsize, (u_int32_t)vcb->blockSize);
4305 /* XXX needs clarification */
4306 if (VFSATTR_WANTED(fsap, f_bused) || !diskimage) {
4307 VFSATTR_RETURN(fsap, f_bused, hfsmp->totalBlocks - hfs_freeblks(hfsmp, 1));
4308 }
4309 VFSATTR_RETURN(fsap, f_files, (u_int64_t)HFS_MAX_FILES);
4310 VFSATTR_RETURN(fsap, f_ffree, (u_int64_t)hfs_free_cnids(hfsmp));
4311
4312 fsap->f_fsid.val[0] = hfsmp->hfs_raw_dev;
4313 fsap->f_fsid.val[1] = vfs_typenum(mp);
4314 VFSATTR_SET_SUPPORTED(fsap, f_fsid);
4315
4316 VFSATTR_RETURN(fsap, f_signature, vcb->vcbSigWord);
4317 VFSATTR_RETURN(fsap, f_carbon_fsid, 0);
4318
4319 if (VFSATTR_IS_ACTIVE(fsap, f_capabilities)) {
4320 vol_capabilities_attr_t *cap;
4321
4322 cap = &fsap->f_capabilities;
4323
4324 if ((hfsmp->hfs_flags & HFS_STANDARD) == 0) {
4325 /* HFS+ & variants */
4326 cap->capabilities[VOL_CAPABILITIES_FORMAT] =
4327 VOL_CAP_FMT_PERSISTENTOBJECTIDS |
4328 VOL_CAP_FMT_SYMBOLICLINKS |
4329 VOL_CAP_FMT_HARDLINKS |
4330 VOL_CAP_FMT_JOURNAL |
4331 VOL_CAP_FMT_ZERO_RUNS |
4332 (hfsmp->jnl ? VOL_CAP_FMT_JOURNAL_ACTIVE : 0) |
4333 (hfsmp->hfs_flags & HFS_CASE_SENSITIVE ? VOL_CAP_FMT_CASE_SENSITIVE : 0) |
4334 VOL_CAP_FMT_CASE_PRESERVING |
4335 VOL_CAP_FMT_FAST_STATFS |
4336 VOL_CAP_FMT_2TB_FILESIZE |
4337 VOL_CAP_FMT_HIDDEN_FILES |
4338 #if HFS_COMPRESSION
4339 VOL_CAP_FMT_DECMPFS_COMPRESSION |
4340 #endif
4341 #if CONFIG_HFS_DIRLINK
4342 VOL_CAP_FMT_DIR_HARDLINKS |
4343 #endif
4344 #ifdef VOL_CAP_FMT_DOCUMENT_ID
4345 VOL_CAP_FMT_DOCUMENT_ID |
4346 #endif /* VOL_CAP_FMT_DOCUMENT_ID */
4347 #ifdef VOL_CAP_FMT_WRITE_GENERATION_COUNT
4348 VOL_CAP_FMT_WRITE_GENERATION_COUNT |
4349 #endif /* VOL_CAP_FMT_WRITE_GENERATION_COUNT */
4350 VOL_CAP_FMT_PATH_FROM_ID;
4351 }
4352 #if CONFIG_HFS_STD
4353 else {
4354 /* HFS standard */
4355 cap->capabilities[VOL_CAPABILITIES_FORMAT] =
4356 VOL_CAP_FMT_PERSISTENTOBJECTIDS |
4357 VOL_CAP_FMT_CASE_PRESERVING |
4358 VOL_CAP_FMT_FAST_STATFS |
4359 VOL_CAP_FMT_HIDDEN_FILES |
4360 VOL_CAP_FMT_PATH_FROM_ID;
4361 }
4362 #endif
4363
4364 /*
4365 * The capabilities word in 'cap' tell you whether or not
4366 * this particular filesystem instance has feature X enabled.
4367 */
4368
4369 cap->capabilities[VOL_CAPABILITIES_INTERFACES] =
4370 VOL_CAP_INT_ATTRLIST |
4371 VOL_CAP_INT_NFSEXPORT |
4372 VOL_CAP_INT_READDIRATTR |
4373 VOL_CAP_INT_ALLOCATE |
4374 VOL_CAP_INT_VOL_RENAME |
4375 VOL_CAP_INT_ADVLOCK |
4376 VOL_CAP_INT_FLOCK |
4377 #if VOL_CAP_INT_RENAME_EXCL
4378 VOL_CAP_INT_RENAME_EXCL |
4379 #endif
4380 #if NAMEDSTREAMS
4381 VOL_CAP_INT_EXTENDED_ATTR |
4382 VOL_CAP_INT_NAMEDSTREAMS;
4383 #else
4384 VOL_CAP_INT_EXTENDED_ATTR;
4385 #endif
4386
4387 /* HFS may conditionally support searchfs and exchangedata depending on the runtime */
4388
4389 if (searchfs_on) {
4390 cap->capabilities[VOL_CAPABILITIES_INTERFACES] |= VOL_CAP_INT_SEARCHFS;
4391 }
4392 if (exchangedata_on) {
4393 cap->capabilities[VOL_CAPABILITIES_INTERFACES] |= VOL_CAP_INT_EXCHANGEDATA;
4394 }
4395
4396 cap->capabilities[VOL_CAPABILITIES_RESERVED1] = 0;
4397 cap->capabilities[VOL_CAPABILITIES_RESERVED2] = 0;
4398
4399 cap->valid[VOL_CAPABILITIES_FORMAT] =
4400 VOL_CAP_FMT_PERSISTENTOBJECTIDS |
4401 VOL_CAP_FMT_SYMBOLICLINKS |
4402 VOL_CAP_FMT_HARDLINKS |
4403 VOL_CAP_FMT_JOURNAL |
4404 VOL_CAP_FMT_JOURNAL_ACTIVE |
4405 VOL_CAP_FMT_NO_ROOT_TIMES |
4406 VOL_CAP_FMT_SPARSE_FILES |
4407 VOL_CAP_FMT_ZERO_RUNS |
4408 VOL_CAP_FMT_CASE_SENSITIVE |
4409 VOL_CAP_FMT_CASE_PRESERVING |
4410 VOL_CAP_FMT_FAST_STATFS |
4411 VOL_CAP_FMT_2TB_FILESIZE |
4412 VOL_CAP_FMT_OPENDENYMODES |
4413 VOL_CAP_FMT_HIDDEN_FILES |
4414 VOL_CAP_FMT_PATH_FROM_ID |
4415 VOL_CAP_FMT_DECMPFS_COMPRESSION |
4416 #ifdef VOL_CAP_FMT_DOCUMENT_ID
4417 VOL_CAP_FMT_DOCUMENT_ID |
4418 #endif /* VOL_CAP_FMT_DOCUMENT_ID */
4419 #ifdef VOL_CAP_FMT_WRITE_GENERATION_COUNT
4420 VOL_CAP_FMT_WRITE_GENERATION_COUNT |
4421 #endif /* VOL_CAP_FMT_WRITE_GENERATION_COUNT */
4422 VOL_CAP_FMT_DIR_HARDLINKS;
4423
4424 /*
4425 * Bits in the "valid" field tell you whether or not the on-disk
4426 * format supports feature X.
4427 */
4428
4429 cap->valid[VOL_CAPABILITIES_INTERFACES] =
4430 VOL_CAP_INT_ATTRLIST |
4431 VOL_CAP_INT_NFSEXPORT |
4432 VOL_CAP_INT_READDIRATTR |
4433 VOL_CAP_INT_COPYFILE |
4434 VOL_CAP_INT_ALLOCATE |
4435 VOL_CAP_INT_VOL_RENAME |
4436 VOL_CAP_INT_ADVLOCK |
4437 VOL_CAP_INT_FLOCK |
4438 VOL_CAP_INT_MANLOCK |
4439 #if VOL_CAP_INT_RENAME_EXCL
4440 VOL_CAP_INT_RENAME_EXCL |
4441 #endif
4442
4443 #if NAMEDSTREAMS
4444 VOL_CAP_INT_EXTENDED_ATTR |
4445 VOL_CAP_INT_NAMEDSTREAMS;
4446 #else
4447 VOL_CAP_INT_EXTENDED_ATTR;
4448 #endif
4449
4450 /* HFS always supports exchangedata and searchfs in the on-disk format natively */
4451 cap->valid[VOL_CAPABILITIES_INTERFACES] |= (VOL_CAP_INT_SEARCHFS | VOL_CAP_INT_EXCHANGEDATA);
4452
4453
4454 cap->valid[VOL_CAPABILITIES_RESERVED1] = 0;
4455 cap->valid[VOL_CAPABILITIES_RESERVED2] = 0;
4456 VFSATTR_SET_SUPPORTED(fsap, f_capabilities);
4457 }
4458 if (VFSATTR_IS_ACTIVE(fsap, f_attributes)) {
4459 vol_attributes_attr_t *attrp = &fsap->f_attributes;
4460
4461 attrp->validattr.commonattr = HFS_ATTR_CMN_VOL_VALIDMASK;
4462 #if CONFIG_PROTECT
4463 attrp->validattr.commonattr |= ATTR_CMN_DATA_PROTECT_FLAGS;
4464 #endif // CONFIG_PROTECT
4465
4466 attrp->validattr.volattr = ATTR_VOL_VALIDMASK & ~ATTR_VOL_INFO;
4467 attrp->validattr.dirattr = ATTR_DIR_VALIDMASK;
4468 attrp->validattr.fileattr = HFS_ATTR_FILE_VALIDMASK;
4469 attrp->validattr.forkattr = 0;
4470
4471 attrp->nativeattr.commonattr = HFS_ATTR_CMN_VOL_VALIDMASK;
4472 #if CONFIG_PROTECT
4473 attrp->nativeattr.commonattr |= ATTR_CMN_DATA_PROTECT_FLAGS;
4474 #endif // CONFIG_PROTECT
4475
4476 attrp->nativeattr.volattr = ATTR_VOL_VALIDMASK & ~ATTR_VOL_INFO;
4477 attrp->nativeattr.dirattr = ATTR_DIR_VALIDMASK;
4478 attrp->nativeattr.fileattr = HFS_ATTR_FILE_VALIDMASK;
4479 attrp->nativeattr.forkattr = 0;
4480 VFSATTR_SET_SUPPORTED(fsap, f_attributes);
4481 }
4482 fsap->f_create_time.tv_sec = hfsmp->hfs_itime;
4483 fsap->f_create_time.tv_nsec = 0;
4484 VFSATTR_SET_SUPPORTED(fsap, f_create_time);
4485 fsap->f_modify_time.tv_sec = hfsmp->vcbLsMod;
4486 fsap->f_modify_time.tv_nsec = 0;
4487 VFSATTR_SET_SUPPORTED(fsap, f_modify_time);
4488 // We really don't have volume access time, they should check the root node, fake it up
4489 if (VFSATTR_IS_ACTIVE(fsap, f_access_time)) {
4490 struct timeval tv;
4491
4492 microtime(&tv);
4493 fsap->f_access_time.tv_sec = tv.tv_sec;
4494 fsap->f_access_time.tv_nsec = 0;
4495 VFSATTR_SET_SUPPORTED(fsap, f_access_time);
4496 }
4497
4498 fsap->f_backup_time.tv_sec = hfsmp->vcbVolBkUp;
4499 fsap->f_backup_time.tv_nsec = 0;
4500 VFSATTR_SET_SUPPORTED(fsap, f_backup_time);
4501
4502 if (VFSATTR_IS_ACTIVE(fsap, f_fssubtype)) {
4503 u_int16_t subtype = 0;
4504
4505 /*
4506 * Subtypes (flavors) for HFS
4507 * 0: Mac OS Extended
4508 * 1: Mac OS Extended (Journaled)
4509 * 2: Mac OS Extended (Case Sensitive)
4510 * 3: Mac OS Extended (Case Sensitive, Journaled)
4511 * 4 - 127: Reserved
4512 * 128: Mac OS Standard
4513 *
4514 */
4515 if ((hfsmp->hfs_flags & HFS_STANDARD) == 0) {
4516 if (hfsmp->jnl) {
4517 subtype |= HFS_SUBTYPE_JOURNALED;
4518 }
4519 if (hfsmp->hfs_flags & HFS_CASE_SENSITIVE) {
4520 subtype |= HFS_SUBTYPE_CASESENSITIVE;
4521 }
4522 }
4523 #if CONFIG_HFS_STD
4524 else {
4525 subtype = HFS_SUBTYPE_STANDARDHFS;
4526 }
4527 #endif
4528 fsap->f_fssubtype = subtype;
4529 VFSATTR_SET_SUPPORTED(fsap, f_fssubtype);
4530 }
4531
4532 if (VFSATTR_IS_ACTIVE(fsap, f_vol_name)) {
4533 strlcpy(fsap->f_vol_name, (char *) hfsmp->vcbVN, MAXPATHLEN);
4534 VFSATTR_SET_SUPPORTED(fsap, f_vol_name);
4535 }
4536 if (VFSATTR_IS_ACTIVE(fsap, f_uuid)) {
4537 hfs_getvoluuid(hfsmp, fsap->f_uuid);
4538 VFSATTR_SET_SUPPORTED(fsap, f_uuid);
4539 }
4540 return (0);
4541 }
4542
4543 /*
4544 * Perform a volume rename. Requires the FS' root vp.
4545 */
4546 static int
4547 hfs_rename_volume(struct vnode *vp, const char *name, proc_t p)
4548 {
4549 ExtendedVCB *vcb = VTOVCB(vp);
4550 struct cnode *cp = VTOC(vp);
4551 struct hfsmount *hfsmp = VTOHFS(vp);
4552 struct cat_desc to_desc;
4553 struct cat_desc todir_desc;
4554 struct cat_desc new_desc;
4555 cat_cookie_t cookie;
4556 int lockflags;
4557 int error = 0;
4558 char converted_volname[256];
4559 size_t volname_length = 0;
4560 size_t conv_volname_length = 0;
4561
4562
4563 /*
4564 * Ignore attempts to rename a volume to a zero-length name.
4565 */
4566 if (name[0] == 0)
4567 return(0);
4568
4569 bzero(&to_desc, sizeof(to_desc));
4570 bzero(&todir_desc, sizeof(todir_desc));
4571 bzero(&new_desc, sizeof(new_desc));
4572 bzero(&cookie, sizeof(cookie));
4573
4574 todir_desc.cd_parentcnid = kHFSRootParentID;
4575 todir_desc.cd_cnid = kHFSRootFolderID;
4576 todir_desc.cd_flags = CD_ISDIR;
4577
4578 to_desc.cd_nameptr = (const u_int8_t *)name;
4579 to_desc.cd_namelen = strlen(name);
4580 to_desc.cd_parentcnid = kHFSRootParentID;
4581 to_desc.cd_cnid = cp->c_cnid;
4582 to_desc.cd_flags = CD_ISDIR;
4583
4584 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)) == 0) {
4585 if ((error = hfs_start_transaction(hfsmp)) == 0) {
4586 if ((error = cat_preflight(hfsmp, CAT_RENAME, &cookie, p)) == 0) {
4587 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
4588
4589 error = cat_rename(hfsmp, &cp->c_desc, &todir_desc, &to_desc, &new_desc);
4590
4591 /*
4592 * If successful, update the name in the VCB, ensure it's terminated.
4593 */
4594 if (error == 0) {
4595 strlcpy((char *)vcb->vcbVN, name, sizeof(vcb->vcbVN));
4596
4597 volname_length = strlen ((const char*)vcb->vcbVN);
4598 /* Send the volume name down to CoreStorage if necessary */
4599 error = utf8_normalizestr(vcb->vcbVN, volname_length, (u_int8_t*)converted_volname, &conv_volname_length, 256, UTF_PRECOMPOSED);
4600 if (error == 0) {
4601 (void) VNOP_IOCTL (hfsmp->hfs_devvp, _DKIOCCSSETLVNAME, converted_volname, 0, vfs_context_current());
4602 }
4603 error = 0;
4604 }
4605
4606 hfs_systemfile_unlock(hfsmp, lockflags);
4607 cat_postflight(hfsmp, &cookie, p);
4608
4609 if (error)
4610 MarkVCBDirty(vcb);
4611 (void) hfs_flushvolumeheader(hfsmp, HFS_FVH_WAIT);
4612 }
4613 hfs_end_transaction(hfsmp);
4614 }
4615 if (!error) {
4616 /* Release old allocated name buffer */
4617 if (cp->c_desc.cd_flags & CD_HASBUF) {
4618 const char *tmp_name = (const char *)cp->c_desc.cd_nameptr;
4619
4620 cp->c_desc.cd_nameptr = 0;
4621 cp->c_desc.cd_namelen = 0;
4622 cp->c_desc.cd_flags &= ~CD_HASBUF;
4623 vfs_removename(tmp_name);
4624 }
4625 /* Update cnode's catalog descriptor */
4626 replace_desc(cp, &new_desc);
4627 vcb->volumeNameEncodingHint = new_desc.cd_encoding;
4628 cp->c_touch_chgtime = TRUE;
4629 }
4630
4631 hfs_unlock(cp);
4632 }
4633
4634 return(error);
4635 }
4636
4637 /*
4638 * Get file system attributes.
4639 */
4640 static int
4641 hfs_vfs_setattr(struct mount *mp, struct vfs_attr *fsap, vfs_context_t context)
4642 {
4643 kauth_cred_t cred = vfs_context_ucred(context);
4644 int error = 0;
4645
4646 /*
4647 * Must be superuser or owner of filesystem to change volume attributes
4648 */
4649 if (!kauth_cred_issuser(cred) && (kauth_cred_getuid(cred) != vfs_statfs(mp)->f_owner))
4650 return(EACCES);
4651
4652 if (VFSATTR_IS_ACTIVE(fsap, f_vol_name)) {
4653 vnode_t root_vp;
4654
4655 error = hfs_vfs_root(mp, &root_vp, context);
4656 if (error)
4657 goto out;
4658
4659 error = hfs_rename_volume(root_vp, fsap->f_vol_name, vfs_context_proc(context));
4660 (void) vnode_put(root_vp);
4661 if (error)
4662 goto out;
4663
4664 VFSATTR_SET_SUPPORTED(fsap, f_vol_name);
4665 }
4666
4667 out:
4668 return error;
4669 }
4670
4671 /* If a runtime corruption is detected, set the volume inconsistent
4672 * bit in the volume attributes. The volume inconsistent bit is a persistent
4673 * bit which represents that the volume is corrupt and needs repair.
4674 * The volume inconsistent bit can be set from the kernel when it detects
4675 * runtime corruption or from file system repair utilities like fsck_hfs when
4676 * a repair operation fails. The bit should be cleared only from file system
4677 * verify/repair utility like fsck_hfs when a verify/repair succeeds.
4678 */
4679 void hfs_mark_inconsistent(struct hfsmount *hfsmp,
4680 hfs_inconsistency_reason_t reason)
4681 {
4682 hfs_lock_mount (hfsmp);
4683 if ((hfsmp->vcbAtrb & kHFSVolumeInconsistentMask) == 0) {
4684 hfsmp->vcbAtrb |= kHFSVolumeInconsistentMask;
4685 MarkVCBDirty(hfsmp);
4686 }
4687 if ((hfsmp->hfs_flags & HFS_READ_ONLY)==0) {
4688 switch (reason) {
4689 case HFS_INCONSISTENCY_DETECTED:
4690 printf("hfs_mark_inconsistent: Runtime corruption detected on %s, fsck will be forced on next mount.\n",
4691 hfsmp->vcbVN);
4692 break;
4693 case HFS_ROLLBACK_FAILED:
4694 printf("hfs_mark_inconsistent: Failed to roll back; volume `%s' might be inconsistent; fsck will be forced on next mount.\n",
4695 hfsmp->vcbVN);
4696 break;
4697 case HFS_OP_INCOMPLETE:
4698 printf("hfs_mark_inconsistent: Failed to complete operation; volume `%s' might be inconsistent; fsck will be forced on next mount.\n",
4699 hfsmp->vcbVN);
4700 break;
4701 case HFS_FSCK_FORCED:
4702 printf("hfs_mark_inconsistent: fsck requested for `%s'; fsck will be forced on next mount.\n",
4703 hfsmp->vcbVN);
4704 break;
4705 }
4706 }
4707 hfs_unlock_mount (hfsmp);
4708 }
4709
4710 /* Replay the journal on the device node provided. Returns zero if
4711 * journal replay succeeded or no journal was supposed to be replayed.
4712 */
4713 static int hfs_journal_replay(vnode_t devvp, vfs_context_t context)
4714 {
4715 int retval = 0;
4716 int error = 0;
4717
4718 /* Replay allowed only on raw devices */
4719 if (!vnode_ischr(devvp) && !vnode_isblk(devvp))
4720 return EINVAL;
4721
4722 retval = hfs_mountfs(devvp, NULL, NULL, /* journal_replay_only: */ 1, context);
4723 buf_flushdirtyblks(devvp, TRUE, 0, "hfs_journal_replay");
4724
4725 /* FSYNC the devnode to be sure all data has been flushed */
4726 error = VNOP_FSYNC(devvp, MNT_WAIT, context);
4727 if (error) {
4728 retval = error;
4729 }
4730
4731 return retval;
4732 }
4733
4734
4735 /*
4736 * Cancel the syncer
4737 */
4738 static void
4739 hfs_syncer_free(struct hfsmount *hfsmp)
4740 {
4741 if (hfsmp && ISSET(hfsmp->hfs_flags, HFS_RUN_SYNCER)) {
4742 hfs_syncer_lock(hfsmp);
4743 CLR(hfsmp->hfs_flags, HFS_RUN_SYNCER);
4744 hfs_syncer_unlock(hfsmp);
4745
4746 // Wait for the syncer thread to finish
4747 if (hfsmp->hfs_syncer_thread) {
4748 hfs_syncer_wakeup(hfsmp);
4749 hfs_syncer_lock(hfsmp);
4750 while (hfsmp->hfs_syncer_thread)
4751 hfs_syncer_wait(hfsmp, NULL);
4752 hfs_syncer_unlock(hfsmp);
4753 }
4754 }
4755 }
4756
4757 static int hfs_vfs_ioctl(struct mount *mp, u_long command, caddr_t data,
4758 __unused int flags, __unused vfs_context_t context)
4759 {
4760 switch (command) {
4761 #if CONFIG_PROTECT
4762 case FIODEVICELOCKED:
4763 cp_device_locked_callback(mp, (cp_lock_state_t)data);
4764 return 0;
4765 #endif
4766 }
4767 return ENOTTY;
4768 }
4769
4770 /*
4771 * hfs vfs operations.
4772 */
4773 const struct vfsops hfs_vfsops = {
4774 .vfs_mount = hfs_mount,
4775 .vfs_start = hfs_start,
4776 .vfs_unmount = hfs_unmount,
4777 .vfs_root = hfs_vfs_root,
4778 .vfs_quotactl = hfs_quotactl,
4779 .vfs_getattr = hfs_vfs_getattr,
4780 .vfs_sync = hfs_sync,
4781 .vfs_vget = hfs_vfs_vget,
4782 .vfs_fhtovp = hfs_fhtovp,
4783 .vfs_vptofh = hfs_vptofh,
4784 .vfs_init = hfs_init,
4785 .vfs_sysctl = hfs_sysctl,
4786 .vfs_setattr = hfs_vfs_setattr,
4787 .vfs_ioctl = hfs_vfs_ioctl,
4788 };