]> git.saurik.com Git - apple/hfs.git/blob - core/hfs_vfsops.c
hfs-366.1.1.tar.gz
[apple/hfs.git] / core / hfs_vfsops.c
1 /*
2 * Copyright (c) 1999-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1991, 1993, 1994
30 * The Regents of the University of California. All rights reserved.
31 * (c) UNIX System Laboratories, Inc.
32 * All or some portions of this file are derived from material licensed
33 * to the University of California by American Telephone and Telegraph
34 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
35 * the permission of UNIX System Laboratories, Inc.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * hfs_vfsops.c
66 * derived from @(#)ufs_vfsops.c 8.8 (Berkeley) 5/20/95
67 *
68 * (c) Copyright 1997-2002 Apple Inc. All rights reserved.
69 *
70 * hfs_vfsops.c -- VFS layer for loadable HFS file system.
71 *
72 */
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/kauth.h>
76
77 #include <sys/ubc.h>
78 #include <sys/sysctl.h>
79 #include <sys/malloc.h>
80 #include <sys/stat.h>
81 #include <sys/quota.h>
82 #include <sys/disk.h>
83 #include <sys/paths.h>
84 #include <sys/utfconv.h>
85 #include <sys/kdebug.h>
86 #include <sys/fslog.h>
87 #include <sys/ubc.h>
88
89 /* for parsing boot-args */
90 #include <pexpert/pexpert.h>
91
92
93 #include <kern/locks.h>
94
95 #include "hfs_journal.h"
96
97 #include <miscfs/specfs/specdev.h>
98 #include "hfs_mount.h"
99
100 #include <libkern/crypto/md5.h>
101 #include <uuid/uuid.h>
102
103 #include "hfs_iokit.h"
104 #include "hfs.h"
105 #include "hfs_catalog.h"
106 #include "hfs_cnode.h"
107 #include "hfs_dbg.h"
108 #include "hfs_endian.h"
109 #include "hfs_hotfiles.h"
110 #include "hfs_quota.h"
111 #include "hfs_btreeio.h"
112 #include "hfs_kdebug.h"
113 #include "hfs_cprotect.h"
114
115 #include "FileMgrInternal.h"
116 #include "BTreesInternal.h"
117
118 #define HFS_MOUNT_DEBUG 1
119
120 /* Enable/disable debugging code for live volume resizing, defined in hfs_resize.c */
121 extern int hfs_resize_debug;
122
123 lck_grp_attr_t * hfs_group_attr;
124 lck_attr_t * hfs_lock_attr;
125 lck_grp_t * hfs_mutex_group;
126 lck_grp_t * hfs_rwlock_group;
127 lck_grp_t * hfs_spinlock_group;
128
129 extern struct vnodeopv_desc hfs_vnodeop_opv_desc;
130
131 #if CONFIG_HFS_STD
132 extern struct vnodeopv_desc hfs_std_vnodeop_opv_desc;
133 static int hfs_flushMDB(struct hfsmount *hfsmp, int waitfor, int altflush);
134 #endif
135
136 /* not static so we can re-use in hfs_readwrite.c for build_path calls */
137 int hfs_vfs_vget(struct mount *mp, ino64_t ino, struct vnode **vpp, vfs_context_t context);
138
139 static int hfs_changefs(struct mount *mp, struct hfs_mount_args *args);
140 static int hfs_fhtovp(struct mount *mp, int fhlen, unsigned char *fhp, struct vnode **vpp, vfs_context_t context);
141 static int hfs_flushfiles(struct mount *, int, struct proc *);
142 static int hfs_init(struct vfsconf *vfsp);
143 static void hfs_locks_destroy(struct hfsmount *hfsmp);
144 static int hfs_quotactl(struct mount *, int, uid_t, caddr_t, vfs_context_t context);
145 static int hfs_start(struct mount *mp, int flags, vfs_context_t context);
146 static int hfs_vptofh(struct vnode *vp, int *fhlenp, unsigned char *fhp, vfs_context_t context);
147 static void hfs_syncer_free(struct hfsmount *hfsmp);
148
149 void hfs_initialize_allocator (struct hfsmount *hfsmp);
150 int hfs_teardown_allocator (struct hfsmount *hfsmp);
151
152 int hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t context);
153 int hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, int journal_replay_only, vfs_context_t context);
154 int hfs_reload(struct mount *mp);
155 int hfs_statfs(struct mount *mp, register struct vfsstatfs *sbp, vfs_context_t context);
156 int hfs_sync(struct mount *mp, int waitfor, vfs_context_t context);
157 int hfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
158 user_addr_t newp, size_t newlen, vfs_context_t context);
159 int hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context);
160
161 static int hfs_journal_replay(vnode_t devvp, vfs_context_t context);
162
163 #if HFS_LEAK_DEBUG
164
165 #include <libkern/OSAtomic.h>
166 #include <IOKit/IOLib.h>
167
168 int hfs_active_mounts;
169
170 #endif
171
172 /*
173 * VFS Operations.
174 *
175 * mount system call
176 */
177
178 int
179 hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t context)
180 {
181
182 #if HFS_LEAK_DEBUG
183
184 #warning HFS_LEAK_DEBUG is on
185
186 hfs_alloc_trace_enable();
187
188 #endif
189
190 struct proc *p = vfs_context_proc(context);
191 struct hfsmount *hfsmp = NULL;
192 struct hfs_mount_args args;
193 int retval = E_NONE;
194 u_int32_t cmdflags;
195
196 if (data && (retval = copyin(data, (caddr_t)&args, sizeof(args)))) {
197 if (HFS_MOUNT_DEBUG) {
198 printf("hfs_mount: copyin returned %d for fs\n", retval);
199 }
200 return (retval);
201 }
202 cmdflags = (u_int32_t)vfs_flags(mp) & MNT_CMDFLAGS;
203 if (cmdflags & MNT_UPDATE) {
204 hfs_assert(data);
205
206 hfsmp = VFSTOHFS(mp);
207
208 /* Reload incore data after an fsck. */
209 if (cmdflags & MNT_RELOAD) {
210 if (vfs_isrdonly(mp)) {
211 int error = hfs_reload(mp);
212 if (error && HFS_MOUNT_DEBUG) {
213 printf("hfs_mount: hfs_reload returned %d on %s \n", error, hfsmp->vcbVN);
214 }
215 return error;
216 }
217 else {
218 if (HFS_MOUNT_DEBUG) {
219 printf("hfs_mount: MNT_RELOAD not supported on rdwr filesystem %s\n", hfsmp->vcbVN);
220 }
221 return (EINVAL);
222 }
223 }
224
225 /* Change to a read-only file system. */
226 if (((hfsmp->hfs_flags & HFS_READ_ONLY) == 0) &&
227 vfs_isrdonly(mp)) {
228 int flags;
229
230 /* Set flag to indicate that a downgrade to read-only
231 * is in progress and therefore block any further
232 * modifications to the file system.
233 */
234 hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK);
235 hfsmp->hfs_flags |= HFS_RDONLY_DOWNGRADE;
236 hfsmp->hfs_downgrading_thread = current_thread();
237 hfs_unlock_global (hfsmp);
238 hfs_syncer_free(hfsmp);
239
240 /* use hfs_sync to push out System (btree) files */
241 retval = hfs_sync(mp, MNT_WAIT, context);
242 if (retval && ((cmdflags & MNT_FORCE) == 0)) {
243 hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
244 hfsmp->hfs_downgrading_thread = NULL;
245 if (HFS_MOUNT_DEBUG) {
246 printf("hfs_mount: VFS_SYNC returned %d during b-tree sync of %s \n", retval, hfsmp->vcbVN);
247 }
248 goto out;
249 }
250
251 flags = WRITECLOSE;
252 if (cmdflags & MNT_FORCE)
253 flags |= FORCECLOSE;
254
255 if ((retval = hfs_flushfiles(mp, flags, p))) {
256 hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
257 hfsmp->hfs_downgrading_thread = NULL;
258 if (HFS_MOUNT_DEBUG) {
259 printf("hfs_mount: hfs_flushfiles returned %d on %s \n", retval, hfsmp->vcbVN);
260 }
261 goto out;
262 }
263
264 /* mark the volume cleanly unmounted */
265 hfsmp->vcbAtrb |= kHFSVolumeUnmountedMask;
266 retval = hfs_flushvolumeheader(hfsmp, HFS_FVH_WAIT);
267 hfsmp->hfs_flags |= HFS_READ_ONLY;
268
269 /*
270 * Close down the journal.
271 *
272 * NOTE: It is critically important to close down the journal
273 * and have it issue all pending I/O prior to calling VNOP_FSYNC below.
274 * In a journaled environment it is expected that the journal be
275 * the only actor permitted to issue I/O for metadata blocks in HFS.
276 * If we were to call VNOP_FSYNC prior to closing down the journal,
277 * we would inadvertantly issue (and wait for) the I/O we just
278 * initiated above as part of the flushvolumeheader call.
279 *
280 * To avoid this, we follow the same order of operations as in
281 * unmount and issue the journal_close prior to calling VNOP_FSYNC.
282 */
283
284 if (hfsmp->jnl) {
285 hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK);
286
287 journal_close(hfsmp->jnl);
288 hfsmp->jnl = NULL;
289
290 // Note: we explicitly don't want to shutdown
291 // access to the jvp because we may need
292 // it later if we go back to being read-write.
293
294 hfs_unlock_global (hfsmp);
295
296 vfs_clearflags(hfsmp->hfs_mp, MNT_JOURNALED);
297 }
298
299 /*
300 * Write out any pending I/O still outstanding against the device node
301 * now that the journal has been closed.
302 */
303 if (retval == 0) {
304 vnode_get(hfsmp->hfs_devvp);
305 retval = VNOP_FSYNC(hfsmp->hfs_devvp, MNT_WAIT, context);
306 vnode_put(hfsmp->hfs_devvp);
307 }
308
309 if (retval) {
310 if (HFS_MOUNT_DEBUG) {
311 printf("hfs_mount: FSYNC on devvp returned %d for fs %s\n", retval, hfsmp->vcbVN);
312 }
313 hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
314 hfsmp->hfs_downgrading_thread = NULL;
315 hfsmp->hfs_flags &= ~HFS_READ_ONLY;
316 goto out;
317 }
318
319 if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) {
320 if (hfsmp->hfs_summary_table) {
321 int err = 0;
322 /*
323 * Take the bitmap lock to serialize against a concurrent bitmap scan still in progress
324 */
325 if (hfsmp->hfs_allocation_vp) {
326 err = hfs_lock (VTOC(hfsmp->hfs_allocation_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
327 }
328 hfs_free(hfsmp->hfs_summary_table, hfsmp->hfs_summary_bytes);
329 hfsmp->hfs_summary_table = NULL;
330 hfsmp->hfs_flags &= ~HFS_SUMMARY_TABLE;
331 if (err == 0 && hfsmp->hfs_allocation_vp){
332 hfs_unlock (VTOC(hfsmp->hfs_allocation_vp));
333 }
334 }
335 }
336
337 hfsmp->hfs_downgrading_thread = NULL;
338 }
339
340 /* Change to a writable file system. */
341 if (vfs_iswriteupgrade(mp)) {
342 /*
343 * On inconsistent disks, do not allow read-write mount
344 * unless it is the boot volume being mounted.
345 */
346 if (!(vfs_flags(mp) & MNT_ROOTFS) &&
347 (hfsmp->vcbAtrb & kHFSVolumeInconsistentMask)) {
348 if (HFS_MOUNT_DEBUG) {
349 printf("hfs_mount: attempting to mount inconsistent non-root volume %s\n", (hfsmp->vcbVN));
350 }
351 retval = EINVAL;
352 goto out;
353 }
354
355 // If the journal was shut-down previously because we were
356 // asked to be read-only, let's start it back up again now
357
358 if ( (HFSTOVCB(hfsmp)->vcbAtrb & kHFSVolumeJournaledMask)
359 && hfsmp->jnl == NULL
360 && hfsmp->jvp != NULL) {
361 int jflags;
362
363 if (hfsmp->hfs_flags & HFS_NEED_JNL_RESET) {
364 jflags = JOURNAL_RESET;
365 } else {
366 jflags = 0;
367 }
368
369 hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK);
370
371 /* We provide the mount point twice here: The first is used as
372 * an opaque argument to be passed back when hfs_sync_metadata
373 * is called. The second is provided to the throttling code to
374 * indicate which mount's device should be used when accounting
375 * for metadata writes.
376 */
377 hfsmp->jnl = journal_open(hfsmp->jvp,
378 hfs_blk_to_bytes(hfsmp->jnl_start, HFSTOVCB(hfsmp)->blockSize) + (off_t)HFSTOVCB(hfsmp)->hfsPlusIOPosOffset,
379 hfsmp->jnl_size,
380 hfsmp->hfs_devvp,
381 hfsmp->hfs_logical_block_size,
382 jflags,
383 0,
384 hfs_sync_metadata, hfsmp->hfs_mp,
385 hfsmp->hfs_mp);
386
387 /*
388 * Set up the trim callback function so that we can add
389 * recently freed extents to the free extent cache once
390 * the transaction that freed them is written to the
391 * journal on disk.
392 */
393 if (hfsmp->jnl)
394 journal_trim_set_callback(hfsmp->jnl, hfs_trim_callback, hfsmp);
395
396 hfs_unlock_global (hfsmp);
397
398 if (hfsmp->jnl == NULL) {
399 if (HFS_MOUNT_DEBUG) {
400 printf("hfs_mount: journal_open == NULL; couldn't be opened on %s \n", (hfsmp->vcbVN));
401 }
402 retval = EINVAL;
403 goto out;
404 } else {
405 hfsmp->hfs_flags &= ~HFS_NEED_JNL_RESET;
406 vfs_setflags(hfsmp->hfs_mp, MNT_JOURNALED);
407 }
408 }
409
410 /* See if we need to erase unused Catalog nodes due to <rdar://problem/6947811>. */
411 retval = hfs_erase_unused_nodes(hfsmp);
412 if (retval != E_NONE) {
413 if (HFS_MOUNT_DEBUG) {
414 printf("hfs_mount: hfs_erase_unused_nodes returned %d for fs %s\n", retval, hfsmp->vcbVN);
415 }
416 goto out;
417 }
418
419 /* If this mount point was downgraded from read-write
420 * to read-only, clear that information as we are now
421 * moving back to read-write.
422 */
423 hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
424 hfsmp->hfs_downgrading_thread = NULL;
425
426 /* mark the volume dirty (clear clean unmount bit) */
427 hfsmp->vcbAtrb &= ~kHFSVolumeUnmountedMask;
428
429 retval = hfs_flushvolumeheader(hfsmp, HFS_FVH_WAIT);
430 if (retval != E_NONE) {
431 if (HFS_MOUNT_DEBUG) {
432 printf("hfs_mount: hfs_flushvolumeheader returned %d for fs %s\n", retval, hfsmp->vcbVN);
433 }
434 goto out;
435 }
436
437 /* Only clear HFS_READ_ONLY after a successful write */
438 hfsmp->hfs_flags &= ~HFS_READ_ONLY;
439
440
441 if (!(hfsmp->hfs_flags & (HFS_READ_ONLY | HFS_STANDARD))) {
442 /* Setup private/hidden directories for hardlinks. */
443 hfs_privatedir_init(hfsmp, FILE_HARDLINKS);
444 hfs_privatedir_init(hfsmp, DIR_HARDLINKS);
445
446 hfs_remove_orphans(hfsmp);
447
448 /*
449 * Since we're upgrading to a read-write mount, allow
450 * hot file clustering if conditions allow.
451 *
452 * Note: this normally only would happen if you booted
453 * single-user and upgraded the mount to read-write
454 *
455 * Note: at this point we are not allowed to fail the
456 * mount operation because the HotFile init code
457 * in hfs_recording_init() will lookup vnodes with
458 * VNOP_LOOKUP() which hangs vnodes off the mount
459 * (and if we were to fail, VFS is not prepared to
460 * clean that up at this point. Since HotFiles are
461 * optional, this is not a big deal.
462 */
463 if (ISSET(hfsmp->hfs_flags, HFS_METADATA_ZONE)
464 && (!ISSET(hfsmp->hfs_flags, HFS_SSD)
465 || ISSET(hfsmp->hfs_flags, HFS_CS_HOTFILE_PIN))) {
466 hfs_recording_init(hfsmp);
467 }
468 /* Force ACLs on HFS+ file systems. */
469 if (vfs_extendedsecurity(HFSTOVFS(hfsmp)) == 0) {
470 vfs_setextendedsecurity(HFSTOVFS(hfsmp));
471 }
472 }
473 }
474
475 /* Update file system parameters. */
476 retval = hfs_changefs(mp, &args);
477 if (retval && HFS_MOUNT_DEBUG) {
478 printf("hfs_mount: hfs_changefs returned %d for %s\n", retval, hfsmp->vcbVN);
479 }
480
481 } else /* not an update request */ {
482 /* Set the mount flag to indicate that we support volfs */
483 vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_DOVOLFS));
484
485 retval = hfs_mountfs(devvp, mp, data ? &args : NULL, 0, context);
486 if (retval) {
487 const char *name = vnode_getname(devvp);
488 printf("hfs_mount: hfs_mountfs returned error=%d for device %s\n", retval, (name ? name : "unknown-dev"));
489 if (name) {
490 vnode_putname(name);
491 }
492 goto out;
493 }
494
495 /* After hfs_mountfs succeeds, we should have valid hfsmp */
496 hfsmp = VFSTOHFS(mp);
497
498 /* Set up the maximum defrag file size */
499 hfsmp->hfs_defrag_max = HFS_INITIAL_DEFRAG_SIZE;
500
501
502 if (!data) {
503 // Root mount
504
505 hfsmp->hfs_uid = UNKNOWNUID;
506 hfsmp->hfs_gid = UNKNOWNGID;
507 hfsmp->hfs_dir_mask = (S_IRWXU | S_IRGRP|S_IXGRP | S_IROTH|S_IXOTH); /* 0755 */
508 hfsmp->hfs_file_mask = (S_IRWXU | S_IRGRP|S_IXGRP | S_IROTH|S_IXOTH); /* 0755 */
509
510 /* Establish the free block reserve. */
511 hfsmp->reserveBlocks = ((u_int64_t)hfsmp->totalBlocks * HFS_MINFREE) / 100;
512 hfsmp->reserveBlocks = MIN(hfsmp->reserveBlocks, HFS_MAXRESERVE / hfsmp->blockSize);
513 }
514
515 #if HFS_LEAK_DEBUG
516 OSIncrementAtomic(&hfs_active_mounts);
517 #endif
518 }
519
520 out:
521 if (retval == 0) {
522 (void)hfs_statfs(mp, vfs_statfs(mp), context);
523 }
524 return (retval);
525 }
526
527
528 struct hfs_changefs_cargs {
529 struct hfsmount *hfsmp;
530 int namefix;
531 int permfix;
532 int permswitch;
533 };
534
535 static int
536 hfs_changefs_callback(struct vnode *vp, void *cargs)
537 {
538 ExtendedVCB *vcb;
539 struct cnode *cp;
540 struct cat_desc cndesc;
541 struct cat_attr cnattr;
542 struct hfs_changefs_cargs *args;
543 int lockflags;
544 int error;
545
546 args = (struct hfs_changefs_cargs *)cargs;
547
548 cp = VTOC(vp);
549 vcb = HFSTOVCB(args->hfsmp);
550
551 lockflags = hfs_systemfile_lock(args->hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
552 error = cat_lookup(args->hfsmp, &cp->c_desc, 0, 0, &cndesc, &cnattr, NULL, NULL);
553 hfs_systemfile_unlock(args->hfsmp, lockflags);
554 if (error) {
555 /*
556 * If we couldn't find this guy skip to the next one
557 */
558 if (args->namefix)
559 cache_purge(vp);
560
561 return (VNODE_RETURNED);
562 }
563 /*
564 * Get the real uid/gid and perm mask from disk.
565 */
566 if (args->permswitch || args->permfix) {
567 cp->c_uid = cnattr.ca_uid;
568 cp->c_gid = cnattr.ca_gid;
569 cp->c_mode = cnattr.ca_mode;
570 }
571 /*
572 * If we're switching name converters then...
573 * Remove the existing entry from the namei cache.
574 * Update name to one based on new encoder.
575 */
576 if (args->namefix) {
577 cache_purge(vp);
578 replace_desc(cp, &cndesc);
579
580 if (cndesc.cd_cnid == kHFSRootFolderID) {
581 strlcpy((char *)vcb->vcbVN, (const char *)cp->c_desc.cd_nameptr, NAME_MAX+1);
582 cp->c_desc.cd_encoding = args->hfsmp->hfs_encoding;
583 }
584 } else {
585 cat_releasedesc(&cndesc);
586 }
587 return (VNODE_RETURNED);
588 }
589
590 /* Change fs mount parameters */
591 static int
592 hfs_changefs(struct mount *mp, struct hfs_mount_args *args)
593 {
594 int retval = 0;
595 int namefix, permfix, permswitch;
596 struct hfsmount *hfsmp;
597 ExtendedVCB *vcb;
598 struct hfs_changefs_cargs cargs;
599 u_int32_t mount_flags;
600
601 #if CONFIG_HFS_STD
602 u_int32_t old_encoding = 0;
603 hfs_to_unicode_func_t get_unicode_func;
604 unicode_to_hfs_func_t get_hfsname_func = NULL;
605 #endif
606
607 hfsmp = VFSTOHFS(mp);
608 vcb = HFSTOVCB(hfsmp);
609 mount_flags = (unsigned int)vfs_flags(mp);
610
611 hfsmp->hfs_flags |= HFS_IN_CHANGEFS;
612
613 permswitch = (((hfsmp->hfs_flags & HFS_UNKNOWN_PERMS) &&
614 ((mount_flags & MNT_UNKNOWNPERMISSIONS) == 0)) ||
615 (((hfsmp->hfs_flags & HFS_UNKNOWN_PERMS) == 0) &&
616 (mount_flags & MNT_UNKNOWNPERMISSIONS)));
617
618 /* The root filesystem must operate with actual permissions: */
619 if (permswitch && (mount_flags & MNT_ROOTFS) && (mount_flags & MNT_UNKNOWNPERMISSIONS)) {
620 vfs_clearflags(mp, (u_int64_t)((unsigned int)MNT_UNKNOWNPERMISSIONS)); /* Just say "No". */
621 retval = EINVAL;
622 goto exit;
623 }
624 if (mount_flags & MNT_UNKNOWNPERMISSIONS)
625 hfsmp->hfs_flags |= HFS_UNKNOWN_PERMS;
626 else
627 hfsmp->hfs_flags &= ~HFS_UNKNOWN_PERMS;
628
629 namefix = permfix = 0;
630
631 /*
632 * Tracking of hot files requires up-to-date access times. So if
633 * access time updates are disabled, we must also disable hot files.
634 */
635 if (mount_flags & MNT_NOATIME) {
636 (void) hfs_recording_suspend(hfsmp);
637 }
638
639 /* Change the timezone (Note: this affects all hfs volumes and hfs+ volume create dates) */
640 if (args->hfs_timezone.tz_minuteswest != VNOVAL) {
641 gTimeZone = args->hfs_timezone;
642 }
643
644 /* Change the default uid, gid and/or mask */
645 if ((args->hfs_uid != (uid_t)VNOVAL) && (hfsmp->hfs_uid != args->hfs_uid)) {
646 hfsmp->hfs_uid = args->hfs_uid;
647 if (vcb->vcbSigWord == kHFSPlusSigWord)
648 ++permfix;
649 }
650 if ((args->hfs_gid != (gid_t)VNOVAL) && (hfsmp->hfs_gid != args->hfs_gid)) {
651 hfsmp->hfs_gid = args->hfs_gid;
652 if (vcb->vcbSigWord == kHFSPlusSigWord)
653 ++permfix;
654 }
655 if (args->hfs_mask != (mode_t)VNOVAL) {
656 if (hfsmp->hfs_dir_mask != (args->hfs_mask & ALLPERMS)) {
657 hfsmp->hfs_dir_mask = args->hfs_mask & ALLPERMS;
658 hfsmp->hfs_file_mask = args->hfs_mask & ALLPERMS;
659 if ((args->flags != VNOVAL) && (args->flags & HFSFSMNT_NOXONFILES))
660 hfsmp->hfs_file_mask = (args->hfs_mask & DEFFILEMODE);
661 if (vcb->vcbSigWord == kHFSPlusSigWord)
662 ++permfix;
663 }
664 }
665
666 #if CONFIG_HFS_STD
667 /* Change the hfs encoding value (hfs only) */
668 if ((vcb->vcbSigWord == kHFSSigWord) &&
669 (args->hfs_encoding != (u_int32_t)VNOVAL) &&
670 (hfsmp->hfs_encoding != args->hfs_encoding)) {
671
672 retval = hfs_getconverter(args->hfs_encoding, &get_unicode_func, &get_hfsname_func);
673 if (retval)
674 goto exit;
675
676 /*
677 * Connect the new hfs_get_unicode converter but leave
678 * the old hfs_get_hfsname converter in place so that
679 * we can lookup existing vnodes to get their correctly
680 * encoded names.
681 *
682 * When we're all finished, we can then connect the new
683 * hfs_get_hfsname converter and release our interest
684 * in the old converters.
685 */
686 hfsmp->hfs_get_unicode = get_unicode_func;
687 old_encoding = hfsmp->hfs_encoding;
688 hfsmp->hfs_encoding = args->hfs_encoding;
689 ++namefix;
690 }
691 #endif
692
693 if (!(namefix || permfix || permswitch))
694 goto exit;
695
696 /* XXX 3762912 hack to support HFS filesystem 'owner' */
697 if (permfix) {
698 vfs_setowner(mp,
699 hfsmp->hfs_uid == UNKNOWNUID ? KAUTH_UID_NONE : hfsmp->hfs_uid,
700 hfsmp->hfs_gid == UNKNOWNGID ? KAUTH_GID_NONE : hfsmp->hfs_gid);
701 }
702
703 /*
704 * For each active vnode fix things that changed
705 *
706 * Note that we can visit a vnode more than once
707 * and we can race with fsync.
708 *
709 * hfs_changefs_callback will be called for each vnode
710 * hung off of this mount point
711 *
712 * The vnode will be properly referenced and unreferenced
713 * around the callback
714 */
715 cargs.hfsmp = hfsmp;
716 cargs.namefix = namefix;
717 cargs.permfix = permfix;
718 cargs.permswitch = permswitch;
719
720 vnode_iterate(mp, 0, hfs_changefs_callback, (void *)&cargs);
721
722 #if CONFIG_HFS_STD
723 /*
724 * If we're switching name converters we can now
725 * connect the new hfs_get_hfsname converter and
726 * release our interest in the old converters.
727 */
728 if (namefix) {
729 /* HFS standard only */
730 hfsmp->hfs_get_hfsname = get_hfsname_func;
731 vcb->volumeNameEncodingHint = args->hfs_encoding;
732 (void) hfs_relconverter(old_encoding);
733 }
734 #endif
735
736 exit:
737 hfsmp->hfs_flags &= ~HFS_IN_CHANGEFS;
738 return (retval);
739 }
740
741
742 struct hfs_reload_cargs {
743 struct hfsmount *hfsmp;
744 int error;
745 };
746
747 static int
748 hfs_reload_callback(struct vnode *vp, void *cargs)
749 {
750 struct cnode *cp;
751 struct hfs_reload_cargs *args;
752 int lockflags;
753
754 args = (struct hfs_reload_cargs *)cargs;
755 /*
756 * flush all the buffers associated with this node
757 */
758 (void) buf_invalidateblks(vp, 0, 0, 0);
759
760 cp = VTOC(vp);
761 /*
762 * Remove any directory hints
763 */
764 if (vnode_isdir(vp))
765 hfs_reldirhints(cp, 0);
766
767 /*
768 * Re-read cnode data for all active vnodes (non-metadata files).
769 */
770 if (!vnode_issystem(vp) && !VNODE_IS_RSRC(vp) && (cp->c_fileid >= kHFSFirstUserCatalogNodeID)) {
771 struct cat_fork *datafork;
772 struct cat_desc desc;
773
774 datafork = cp->c_datafork ? &cp->c_datafork->ff_data : NULL;
775
776 /* lookup by fileID since name could have changed */
777 lockflags = hfs_systemfile_lock(args->hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
778 args->error = cat_idlookup(args->hfsmp, cp->c_fileid, 0, 0, &desc, &cp->c_attr, datafork);
779 hfs_systemfile_unlock(args->hfsmp, lockflags);
780 if (args->error) {
781 return (VNODE_RETURNED_DONE);
782 }
783
784 /* update cnode's catalog descriptor */
785 (void) replace_desc(cp, &desc);
786 }
787 return (VNODE_RETURNED);
788 }
789
790 /*
791 * Reload all incore data for a filesystem (used after running fsck on
792 * the root filesystem and finding things to fix). The filesystem must
793 * be mounted read-only.
794 *
795 * Things to do to update the mount:
796 * invalidate all cached meta-data.
797 * invalidate all inactive vnodes.
798 * invalidate all cached file data.
799 * re-read volume header from disk.
800 * re-load meta-file info (extents, file size).
801 * re-load B-tree header data.
802 * re-read cnode data for all active vnodes.
803 */
804 int
805 hfs_reload(struct mount *mountp)
806 {
807 register struct vnode *devvp;
808 struct buf *bp;
809 int error, i;
810 struct hfsmount *hfsmp;
811 struct HFSPlusVolumeHeader *vhp;
812 ExtendedVCB *vcb;
813 struct filefork *forkp;
814 struct cat_desc cndesc;
815 struct hfs_reload_cargs args;
816 daddr64_t priIDSector;
817
818 hfsmp = VFSTOHFS(mountp);
819 vcb = HFSTOVCB(hfsmp);
820
821 if (vcb->vcbSigWord == kHFSSigWord)
822 return (EINVAL); /* rooting from HFS is not supported! */
823
824 /*
825 * Invalidate all cached meta-data.
826 */
827 devvp = hfsmp->hfs_devvp;
828 if (buf_invalidateblks(devvp, 0, 0, 0))
829 panic("hfs_reload: dirty1");
830
831 args.hfsmp = hfsmp;
832 args.error = 0;
833 /*
834 * hfs_reload_callback will be called for each vnode
835 * hung off of this mount point that can't be recycled...
836 * vnode_iterate will recycle those that it can (the VNODE_RELOAD option)
837 * the vnode will be in an 'unbusy' state (VNODE_WAIT) and
838 * properly referenced and unreferenced around the callback
839 */
840 vnode_iterate(mountp, VNODE_RELOAD | VNODE_WAIT, hfs_reload_callback, (void *)&args);
841
842 if (args.error)
843 return (args.error);
844
845 /*
846 * Re-read VolumeHeader from disk.
847 */
848 priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
849 HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size));
850
851 error = (int)buf_meta_bread(hfsmp->hfs_devvp,
852 HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys),
853 hfsmp->hfs_physical_block_size, NOCRED, &bp);
854 if (error) {
855 if (bp != NULL)
856 buf_brelse(bp);
857 return (error);
858 }
859
860 vhp = (HFSPlusVolumeHeader *) (buf_dataptr(bp) + HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size));
861
862 /* Do a quick sanity check */
863 if ((SWAP_BE16(vhp->signature) != kHFSPlusSigWord &&
864 SWAP_BE16(vhp->signature) != kHFSXSigWord) ||
865 (SWAP_BE16(vhp->version) != kHFSPlusVersion &&
866 SWAP_BE16(vhp->version) != kHFSXVersion) ||
867 SWAP_BE32(vhp->blockSize) != vcb->blockSize) {
868 buf_brelse(bp);
869 return (EIO);
870 }
871
872 vcb->vcbLsMod = to_bsd_time(SWAP_BE32(vhp->modifyDate));
873 vcb->vcbAtrb = SWAP_BE32 (vhp->attributes);
874 vcb->vcbJinfoBlock = SWAP_BE32(vhp->journalInfoBlock);
875 vcb->vcbClpSiz = SWAP_BE32 (vhp->rsrcClumpSize);
876 vcb->vcbNxtCNID = SWAP_BE32 (vhp->nextCatalogID);
877 vcb->vcbVolBkUp = to_bsd_time(SWAP_BE32(vhp->backupDate));
878 vcb->vcbWrCnt = SWAP_BE32 (vhp->writeCount);
879 vcb->vcbFilCnt = SWAP_BE32 (vhp->fileCount);
880 vcb->vcbDirCnt = SWAP_BE32 (vhp->folderCount);
881 HFS_UPDATE_NEXT_ALLOCATION(vcb, SWAP_BE32 (vhp->nextAllocation));
882 vcb->totalBlocks = SWAP_BE32 (vhp->totalBlocks);
883 vcb->freeBlocks = SWAP_BE32 (vhp->freeBlocks);
884 vcb->encodingsBitmap = SWAP_BE64 (vhp->encodingsBitmap);
885 bcopy(vhp->finderInfo, vcb->vcbFndrInfo, sizeof(vhp->finderInfo));
886 vcb->localCreateDate = SWAP_BE32 (vhp->createDate); /* hfs+ create date is in local time */
887
888 /*
889 * Re-load meta-file vnode data (extent info, file size, etc).
890 */
891 forkp = VTOF((struct vnode *)vcb->extentsRefNum);
892 for (i = 0; i < kHFSPlusExtentDensity; i++) {
893 forkp->ff_extents[i].startBlock =
894 SWAP_BE32 (vhp->extentsFile.extents[i].startBlock);
895 forkp->ff_extents[i].blockCount =
896 SWAP_BE32 (vhp->extentsFile.extents[i].blockCount);
897 }
898 forkp->ff_size = SWAP_BE64 (vhp->extentsFile.logicalSize);
899 forkp->ff_blocks = SWAP_BE32 (vhp->extentsFile.totalBlocks);
900 forkp->ff_clumpsize = SWAP_BE32 (vhp->extentsFile.clumpSize);
901
902
903 forkp = VTOF((struct vnode *)vcb->catalogRefNum);
904 for (i = 0; i < kHFSPlusExtentDensity; i++) {
905 forkp->ff_extents[i].startBlock =
906 SWAP_BE32 (vhp->catalogFile.extents[i].startBlock);
907 forkp->ff_extents[i].blockCount =
908 SWAP_BE32 (vhp->catalogFile.extents[i].blockCount);
909 }
910 forkp->ff_size = SWAP_BE64 (vhp->catalogFile.logicalSize);
911 forkp->ff_blocks = SWAP_BE32 (vhp->catalogFile.totalBlocks);
912 forkp->ff_clumpsize = SWAP_BE32 (vhp->catalogFile.clumpSize);
913
914 if (hfsmp->hfs_attribute_vp) {
915 forkp = VTOF(hfsmp->hfs_attribute_vp);
916 for (i = 0; i < kHFSPlusExtentDensity; i++) {
917 forkp->ff_extents[i].startBlock =
918 SWAP_BE32 (vhp->attributesFile.extents[i].startBlock);
919 forkp->ff_extents[i].blockCount =
920 SWAP_BE32 (vhp->attributesFile.extents[i].blockCount);
921 }
922 forkp->ff_size = SWAP_BE64 (vhp->attributesFile.logicalSize);
923 forkp->ff_blocks = SWAP_BE32 (vhp->attributesFile.totalBlocks);
924 forkp->ff_clumpsize = SWAP_BE32 (vhp->attributesFile.clumpSize);
925 }
926
927 forkp = VTOF((struct vnode *)vcb->allocationsRefNum);
928 for (i = 0; i < kHFSPlusExtentDensity; i++) {
929 forkp->ff_extents[i].startBlock =
930 SWAP_BE32 (vhp->allocationFile.extents[i].startBlock);
931 forkp->ff_extents[i].blockCount =
932 SWAP_BE32 (vhp->allocationFile.extents[i].blockCount);
933 }
934 forkp->ff_size = SWAP_BE64 (vhp->allocationFile.logicalSize);
935 forkp->ff_blocks = SWAP_BE32 (vhp->allocationFile.totalBlocks);
936 forkp->ff_clumpsize = SWAP_BE32 (vhp->allocationFile.clumpSize);
937
938 buf_brelse(bp);
939 vhp = NULL;
940
941 /*
942 * Re-load B-tree header data
943 */
944 forkp = VTOF((struct vnode *)vcb->extentsRefNum);
945 if ( (error = MacToVFSError( BTReloadData((FCB*)forkp) )) )
946 return (error);
947
948 forkp = VTOF((struct vnode *)vcb->catalogRefNum);
949 if ( (error = MacToVFSError( BTReloadData((FCB*)forkp) )) )
950 return (error);
951
952 if (hfsmp->hfs_attribute_vp) {
953 forkp = VTOF(hfsmp->hfs_attribute_vp);
954 if ( (error = MacToVFSError( BTReloadData((FCB*)forkp) )) )
955 return (error);
956 }
957
958 /* Reload the volume name */
959 if ((error = cat_idlookup(hfsmp, kHFSRootFolderID, 0, 0, &cndesc, NULL, NULL)))
960 return (error);
961 vcb->volumeNameEncodingHint = cndesc.cd_encoding;
962 bcopy(cndesc.cd_nameptr, vcb->vcbVN, min(255, cndesc.cd_namelen));
963 cat_releasedesc(&cndesc);
964
965 /* Re-establish private/hidden directories. */
966 hfs_privatedir_init(hfsmp, FILE_HARDLINKS);
967 hfs_privatedir_init(hfsmp, DIR_HARDLINKS);
968
969 /* In case any volume information changed to trigger a notification */
970 hfs_generate_volume_notifications(hfsmp);
971
972 return (0);
973 }
974
975 __unused
976 static uint64_t tv_to_usecs(struct timeval *tv)
977 {
978 return tv->tv_sec * 1000000ULL + tv->tv_usec;
979 }
980
981 // Returns TRUE if b - a >= usecs
982 static bool hfs_has_elapsed (const struct timeval *a,
983 const struct timeval *b,
984 uint64_t usecs)
985 {
986 struct timeval diff;
987 timersub(b, a, &diff);
988 return diff.tv_sec * 1000000ULL + diff.tv_usec >= usecs;
989 }
990
991 void hfs_syncer(void *arg, __unused wait_result_t wr)
992 {
993 struct hfsmount *hfsmp = arg;
994 struct timeval now;
995
996 KDBG(HFSDBG_SYNCER | DBG_FUNC_START, obfuscate_addr(hfsmp));
997
998 hfs_syncer_lock(hfsmp);
999
1000 while (ISSET(hfsmp->hfs_flags, HFS_RUN_SYNCER)
1001 && timerisset(&hfsmp->hfs_sync_req_oldest)) {
1002
1003 hfs_syncer_wait(hfsmp, &HFS_META_DELAY_TS);
1004
1005 if (!ISSET(hfsmp->hfs_flags, HFS_RUN_SYNCER)
1006 || !timerisset(&hfsmp->hfs_sync_req_oldest)) {
1007 break;
1008 }
1009
1010 /* Check to see whether we should flush now: either the oldest
1011 is > HFS_MAX_META_DELAY or HFS_META_DELAY has elapsed since
1012 the request and there are no pending writes. */
1013
1014 microuptime(&now);
1015 uint64_t idle_time = vfs_idle_time(hfsmp->hfs_mp);
1016
1017 if (!hfs_has_elapsed(&hfsmp->hfs_sync_req_oldest, &now,
1018 HFS_MAX_META_DELAY)
1019 && idle_time < HFS_META_DELAY) {
1020 continue;
1021 }
1022
1023 timerclear(&hfsmp->hfs_sync_req_oldest);
1024
1025 hfs_syncer_unlock(hfsmp);
1026
1027 KDBG(HFSDBG_SYNCER_TIMED | DBG_FUNC_START, obfuscate_addr(hfsmp));
1028
1029 /*
1030 * We intentionally do a synchronous flush (of the journal or entire volume) here.
1031 * For journaled volumes, this means we wait until the metadata blocks are written
1032 * to both the journal and their final locations (in the B-trees, etc.).
1033 *
1034 * This tends to avoid interleaving the metadata writes with other writes (for
1035 * example, user data, or to the journal when a later transaction notices that
1036 * an earlier transaction has finished its async writes, and then updates the
1037 * journal start in the journal header). Avoiding interleaving of writes is
1038 * very good for performance on simple flash devices like SD cards, thumb drives;
1039 * and on devices like floppies. Since removable devices tend to be this kind of
1040 * simple device, doing a synchronous flush actually improves performance in
1041 * practice.
1042 *
1043 * NOTE: For non-journaled volumes, the call to hfs_sync will also cause dirty
1044 * user data to be written.
1045 */
1046 if (hfsmp->jnl) {
1047 hfs_flush(hfsmp, HFS_FLUSH_JOURNAL_META);
1048 } else {
1049 hfs_sync(hfsmp->hfs_mp, MNT_WAIT, vfs_context_current());
1050 }
1051
1052 KDBG(HFSDBG_SYNCER_TIMED | DBG_FUNC_END);
1053
1054 hfs_syncer_lock(hfsmp);
1055 } // while (...)
1056
1057 hfsmp->hfs_syncer_thread = NULL;
1058 hfs_syncer_unlock(hfsmp);
1059 hfs_syncer_wakeup(hfsmp);
1060
1061 /* BE CAREFUL WHAT YOU ADD HERE: at this point hfs_unmount is free
1062 to continue and therefore hfsmp might be invalid. */
1063
1064 KDBG(HFSDBG_SYNCER | DBG_FUNC_END);
1065 }
1066
1067 /*
1068 * Call into the allocator code and perform a full scan of the bitmap file.
1069 *
1070 * This allows us to TRIM unallocated ranges if needed, and also to build up
1071 * an in-memory summary table of the state of the allocated blocks.
1072 */
1073 void hfs_scan_blocks (struct hfsmount *hfsmp) {
1074 /*
1075 * Take the allocation file lock. Journal transactions will block until
1076 * we're done here.
1077 */
1078
1079 int flags = hfs_systemfile_lock(hfsmp, SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
1080
1081 /*
1082 * We serialize here with the HFS mount lock as we're mounting.
1083 *
1084 * The mount can only proceed once this thread has acquired the bitmap
1085 * lock, since we absolutely do not want someone else racing in and
1086 * getting the bitmap lock, doing a read/write of the bitmap file,
1087 * then us getting the bitmap lock.
1088 *
1089 * To prevent this, the mount thread takes the HFS mount mutex, starts us
1090 * up, then immediately msleeps on the scan_var variable in the mount
1091 * point as a condition variable. This serialization is safe since
1092 * if we race in and try to proceed while they're still holding the lock,
1093 * we'll block trying to acquire the global lock. Since the mount thread
1094 * acquires the HFS mutex before starting this function in a new thread,
1095 * any lock acquisition on our part must be linearizably AFTER the mount thread's.
1096 *
1097 * Note that the HFS mount mutex is always taken last, and always for only
1098 * a short time. In this case, we just take it long enough to mark the
1099 * scan-in-flight bit.
1100 */
1101 (void) hfs_lock_mount (hfsmp);
1102 hfsmp->scan_var |= HFS_ALLOCATOR_SCAN_INFLIGHT;
1103 wakeup((caddr_t) &hfsmp->scan_var);
1104 hfs_unlock_mount (hfsmp);
1105
1106 /* Initialize the summary table */
1107 if (hfs_init_summary (hfsmp)) {
1108 printf("hfs: could not initialize summary table for %s\n", hfsmp->vcbVN);
1109 }
1110
1111 /*
1112 * ScanUnmapBlocks assumes that the bitmap lock is held when you
1113 * call the function. We don't care if there were any errors issuing unmaps.
1114 *
1115 * It will also attempt to build up the summary table for subsequent
1116 * allocator use, as configured.
1117 */
1118 (void) ScanUnmapBlocks(hfsmp);
1119
1120 (void) hfs_lock_mount (hfsmp);
1121 hfsmp->scan_var &= ~HFS_ALLOCATOR_SCAN_INFLIGHT;
1122 hfsmp->scan_var |= HFS_ALLOCATOR_SCAN_COMPLETED;
1123 wakeup((caddr_t) &hfsmp->scan_var);
1124 hfs_unlock_mount (hfsmp);
1125
1126 buf_invalidateblks(hfsmp->hfs_allocation_vp, 0, 0, 0);
1127
1128 hfs_systemfile_unlock(hfsmp, flags);
1129
1130 }
1131
1132 static int hfs_root_unmounted_cleanly = 0;
1133
1134 SYSCTL_DECL(_vfs_generic);
1135 HFS_SYSCTL(INT, _vfs_generic, OID_AUTO, root_unmounted_cleanly, CTLFLAG_RD, &hfs_root_unmounted_cleanly, 0, "Root filesystem was unmounted cleanly");
1136
1137 /*
1138 * Common code for mount and mountroot
1139 */
1140 int
1141 hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args,
1142 int journal_replay_only, vfs_context_t context)
1143 {
1144 struct proc *p = vfs_context_proc(context);
1145 int retval = E_NONE;
1146 struct hfsmount *hfsmp = NULL;
1147 struct buf *bp;
1148 dev_t dev;
1149 HFSMasterDirectoryBlock *mdbp = NULL;
1150 int ronly;
1151 #if QUOTA
1152 int i;
1153 #endif
1154 int mntwrapper;
1155 kauth_cred_t cred;
1156 u_int64_t disksize;
1157 daddr64_t log_blkcnt;
1158 u_int32_t log_blksize;
1159 u_int32_t phys_blksize;
1160 u_int32_t minblksize;
1161 u_int32_t iswritable;
1162 daddr64_t mdb_offset;
1163 int isvirtual = 0;
1164 int isroot = !journal_replay_only && args == NULL;
1165 u_int32_t device_features = 0;
1166 int isssd;
1167
1168 ronly = mp && vfs_isrdonly(mp);
1169 dev = vnode_specrdev(devvp);
1170 cred = p ? vfs_context_ucred(context) : NOCRED;
1171 mntwrapper = 0;
1172
1173 bp = NULL;
1174 hfsmp = NULL;
1175 mdbp = NULL;
1176 minblksize = kHFSBlockSize;
1177
1178 /* Advisory locking should be handled at the VFS layer */
1179 if (mp)
1180 vfs_setlocklocal(mp);
1181
1182 /* Get the logical block size (treated as physical block size everywhere) */
1183 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, (caddr_t)&log_blksize, 0, context)) {
1184 if (HFS_MOUNT_DEBUG) {
1185 printf("hfs_mountfs: DKIOCGETBLOCKSIZE failed\n");
1186 }
1187 retval = ENXIO;
1188 goto error_exit;
1189 }
1190 if (log_blksize == 0 || log_blksize > 1024*1024*1024) {
1191 printf("hfs: logical block size 0x%x looks bad. Not mounting.\n", log_blksize);
1192 retval = ENXIO;
1193 goto error_exit;
1194 }
1195
1196 /* Get the physical block size. */
1197 retval = VNOP_IOCTL(devvp, DKIOCGETPHYSICALBLOCKSIZE, (caddr_t)&phys_blksize, 0, context);
1198 if (retval) {
1199 if ((retval != ENOTSUP) && (retval != ENOTTY)) {
1200 if (HFS_MOUNT_DEBUG) {
1201 printf("hfs_mountfs: DKIOCGETPHYSICALBLOCKSIZE failed\n");
1202 }
1203 retval = ENXIO;
1204 goto error_exit;
1205 }
1206 /* If device does not support this ioctl, assume that physical
1207 * block size is same as logical block size
1208 */
1209 phys_blksize = log_blksize;
1210 }
1211 if (phys_blksize == 0 || phys_blksize > MAXBSIZE) {
1212 printf("hfs: physical block size 0x%x looks bad. Not mounting.\n", phys_blksize);
1213 retval = ENXIO;
1214 goto error_exit;
1215 }
1216
1217 /* Switch to 512 byte sectors (temporarily) */
1218 if (log_blksize > 512) {
1219 u_int32_t size512 = 512;
1220
1221 if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&size512, FWRITE, context)) {
1222 if (HFS_MOUNT_DEBUG) {
1223 printf("hfs_mountfs: DKIOCSETBLOCKSIZE failed \n");
1224 }
1225 retval = ENXIO;
1226 goto error_exit;
1227 }
1228 }
1229 /* Get the number of 512 byte physical blocks. */
1230 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
1231 /* resetting block size may fail if getting block count did */
1232 (void)VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context);
1233 if (HFS_MOUNT_DEBUG) {
1234 printf("hfs_mountfs: DKIOCGETBLOCKCOUNT failed\n");
1235 }
1236 retval = ENXIO;
1237 goto error_exit;
1238 }
1239 /* Compute an accurate disk size (i.e. within 512 bytes) */
1240 disksize = (u_int64_t)log_blkcnt * (u_int64_t)512;
1241
1242 /*
1243 * On Tiger it is not necessary to switch the device
1244 * block size to be 4k if there are more than 31-bits
1245 * worth of blocks but to insure compatibility with
1246 * pre-Tiger systems we have to do it.
1247 *
1248 * If the device size is not a multiple of 4K (8 * 512), then
1249 * switching the logical block size isn't going to help because
1250 * we will be unable to write the alternate volume header.
1251 * In this case, just leave the logical block size unchanged.
1252 */
1253 if (log_blkcnt > 0x000000007fffffff && (log_blkcnt & 7) == 0) {
1254 minblksize = log_blksize = 4096;
1255 if (phys_blksize < log_blksize)
1256 phys_blksize = log_blksize;
1257 }
1258
1259 /*
1260 * The cluster layer is not currently prepared to deal with a logical
1261 * block size larger than the system's page size. (It can handle
1262 * blocks per page, but not multiple pages per block.) So limit the
1263 * logical block size to the page size.
1264 */
1265 if (log_blksize > PAGE_SIZE) {
1266 log_blksize = PAGE_SIZE;
1267 }
1268
1269 /* Now switch to our preferred physical block size. */
1270 if (log_blksize > 512) {
1271 if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) {
1272 if (HFS_MOUNT_DEBUG) {
1273 printf("hfs_mountfs: DKIOCSETBLOCKSIZE (2) failed\n");
1274 }
1275 retval = ENXIO;
1276 goto error_exit;
1277 }
1278 /* Get the count of physical blocks. */
1279 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
1280 if (HFS_MOUNT_DEBUG) {
1281 printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (2) failed\n");
1282 }
1283 retval = ENXIO;
1284 goto error_exit;
1285 }
1286 }
1287 /*
1288 * At this point:
1289 * minblksize is the minimum physical block size
1290 * log_blksize has our preferred physical block size
1291 * log_blkcnt has the total number of physical blocks
1292 */
1293
1294 mdb_offset = (daddr64_t)HFS_PRI_SECTOR(log_blksize);
1295 if ((retval = (int)buf_meta_bread(devvp,
1296 HFS_PHYSBLK_ROUNDDOWN(mdb_offset, (phys_blksize/log_blksize)),
1297 phys_blksize, cred, &bp))) {
1298 if (HFS_MOUNT_DEBUG) {
1299 printf("hfs_mountfs: buf_meta_bread failed with %d\n", retval);
1300 }
1301 goto error_exit;
1302 }
1303 mdbp = hfs_malloc(kMDBSize);
1304 bcopy((char *)buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize), mdbp, kMDBSize);
1305 buf_brelse(bp);
1306 bp = NULL;
1307
1308 hfsmp = hfs_mallocz(sizeof(struct hfsmount));
1309
1310 hfs_chashinit_finish(hfsmp);
1311
1312 /* Init the ID lookup hashtable */
1313 hfs_idhash_init (hfsmp);
1314
1315 /*
1316 * See if the disk supports unmap (trim).
1317 *
1318 * NOTE: vfs_init_io_attributes has not been called yet, so we can't use the io_flags field
1319 * returned by vfs_ioattr. We need to call VNOP_IOCTL ourselves.
1320 */
1321 if (VNOP_IOCTL(devvp, DKIOCGETFEATURES, (caddr_t)&device_features, 0, context) == 0) {
1322 if (device_features & DK_FEATURE_UNMAP) {
1323 hfsmp->hfs_flags |= HFS_UNMAP;
1324 }
1325
1326 if(device_features & DK_FEATURE_BARRIER)
1327 hfsmp->hfs_flags |= HFS_FEATURE_BARRIER;
1328 }
1329
1330 /*
1331 * See if the disk is a solid state device, too. We need this to decide what to do about
1332 * hotfiles.
1333 */
1334 if (VNOP_IOCTL(devvp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, context) == 0) {
1335 if (isssd) {
1336 hfsmp->hfs_flags |= HFS_SSD;
1337 }
1338 }
1339
1340 /* See if the underlying device is Core Storage or not */
1341 dk_corestorage_info_t cs_info;
1342 memset(&cs_info, 0, sizeof(dk_corestorage_info_t));
1343 if (VNOP_IOCTL(devvp, DKIOCCORESTORAGE, (caddr_t)&cs_info, 0, context) == 0) {
1344 hfsmp->hfs_flags |= HFS_CS;
1345 if (isroot && (cs_info.flags & DK_CORESTORAGE_PIN_YOUR_METADATA)) {
1346 hfsmp->hfs_flags |= HFS_CS_METADATA_PIN;
1347 }
1348 if (isroot && (cs_info.flags & DK_CORESTORAGE_ENABLE_HOTFILES)) {
1349 hfsmp->hfs_flags |= HFS_CS_HOTFILE_PIN;
1350 hfsmp->hfs_cs_hotfile_size = cs_info.hotfile_size;
1351 }
1352 if ((cs_info.flags & DK_CORESTORAGE_PIN_YOUR_SWAPFILE)) {
1353 hfsmp->hfs_flags |= HFS_CS_SWAPFILE_PIN;
1354
1355 struct vfsioattr ioattr;
1356 vfs_ioattr(mp, &ioattr);
1357 ioattr.io_flags |= VFS_IOATTR_FLAGS_SWAPPIN_SUPPORTED;
1358 ioattr.io_max_swappin_available = cs_info.swapfile_pinning;
1359 vfs_setioattr(mp, &ioattr);
1360 }
1361 }
1362
1363 /*
1364 * Init the volume information structure
1365 */
1366
1367 lck_mtx_init(&hfsmp->hfs_mutex, hfs_mutex_group, hfs_lock_attr);
1368 lck_mtx_init(&hfsmp->hfc_mutex, hfs_mutex_group, hfs_lock_attr);
1369 lck_rw_init(&hfsmp->hfs_global_lock, hfs_rwlock_group, hfs_lock_attr);
1370 lck_spin_init(&hfsmp->vcbFreeExtLock, hfs_spinlock_group, hfs_lock_attr);
1371
1372 if (mp)
1373 vfs_setfsprivate(mp, hfsmp);
1374 hfsmp->hfs_mp = mp; /* Make VFSTOHFS work */
1375 hfsmp->hfs_raw_dev = vnode_specrdev(devvp);
1376 hfsmp->hfs_devvp = devvp;
1377 vnode_ref(devvp); /* Hold a ref on the device, dropped when hfsmp is freed. */
1378 hfsmp->hfs_logical_block_size = log_blksize;
1379 hfsmp->hfs_logical_block_count = log_blkcnt;
1380 hfsmp->hfs_logical_bytes = (uint64_t) log_blksize * (uint64_t) log_blkcnt;
1381 hfsmp->hfs_physical_block_size = phys_blksize;
1382 hfsmp->hfs_log_per_phys = (phys_blksize / log_blksize);
1383 hfsmp->hfs_flags |= HFS_WRITEABLE_MEDIA;
1384 if (ronly)
1385 hfsmp->hfs_flags |= HFS_READ_ONLY;
1386 if (mp && ((unsigned int)vfs_flags(mp)) & MNT_UNKNOWNPERMISSIONS)
1387 hfsmp->hfs_flags |= HFS_UNKNOWN_PERMS;
1388
1389 #if QUOTA
1390 for (i = 0; i < MAXQUOTAS; i++)
1391 dqfileinit(&hfsmp->hfs_qfiles[i]);
1392 #endif
1393
1394 if (args) {
1395 hfsmp->hfs_uid = (args->hfs_uid == (uid_t)VNOVAL) ? UNKNOWNUID : args->hfs_uid;
1396 if (hfsmp->hfs_uid == 0xfffffffd) hfsmp->hfs_uid = UNKNOWNUID;
1397 hfsmp->hfs_gid = (args->hfs_gid == (gid_t)VNOVAL) ? UNKNOWNGID : args->hfs_gid;
1398 if (hfsmp->hfs_gid == 0xfffffffd) hfsmp->hfs_gid = UNKNOWNGID;
1399 vfs_setowner(mp, hfsmp->hfs_uid, hfsmp->hfs_gid); /* tell the VFS */
1400 if (args->hfs_mask != (mode_t)VNOVAL) {
1401 hfsmp->hfs_dir_mask = args->hfs_mask & ALLPERMS;
1402 if (args->flags & HFSFSMNT_NOXONFILES) {
1403 hfsmp->hfs_file_mask = (args->hfs_mask & DEFFILEMODE);
1404 } else {
1405 hfsmp->hfs_file_mask = args->hfs_mask & ALLPERMS;
1406 }
1407 } else {
1408 hfsmp->hfs_dir_mask = UNKNOWNPERMISSIONS & ALLPERMS; /* 0777: rwx---rwx */
1409 hfsmp->hfs_file_mask = UNKNOWNPERMISSIONS & DEFFILEMODE; /* 0666: no --x by default? */
1410 }
1411 if ((args->flags != (int)VNOVAL) && (args->flags & HFSFSMNT_WRAPPER))
1412 mntwrapper = 1;
1413 } else {
1414 /* Even w/o explicit mount arguments, MNT_UNKNOWNPERMISSIONS requires setting up uid, gid, and mask: */
1415 if (mp && ((unsigned int)vfs_flags(mp)) & MNT_UNKNOWNPERMISSIONS) {
1416 hfsmp->hfs_uid = UNKNOWNUID;
1417 hfsmp->hfs_gid = UNKNOWNGID;
1418 vfs_setowner(mp, hfsmp->hfs_uid, hfsmp->hfs_gid); /* tell the VFS */
1419 hfsmp->hfs_dir_mask = UNKNOWNPERMISSIONS & ALLPERMS; /* 0777: rwx---rwx */
1420 hfsmp->hfs_file_mask = UNKNOWNPERMISSIONS & DEFFILEMODE; /* 0666: no --x by default? */
1421 }
1422 }
1423
1424 /* Find out if disk media is writable. */
1425 if (VNOP_IOCTL(devvp, DKIOCISWRITABLE, (caddr_t)&iswritable, 0, context) == 0) {
1426 if (iswritable)
1427 hfsmp->hfs_flags |= HFS_WRITEABLE_MEDIA;
1428 else
1429 hfsmp->hfs_flags &= ~HFS_WRITEABLE_MEDIA;
1430 }
1431
1432 // Reservations
1433 rl_init(&hfsmp->hfs_reserved_ranges[0]);
1434 rl_init(&hfsmp->hfs_reserved_ranges[1]);
1435
1436 // record the current time at which we're mounting this volume
1437 struct timeval tv;
1438 microtime(&tv);
1439 hfsmp->hfs_mount_time = tv.tv_sec;
1440
1441 /* Mount a standard HFS disk */
1442 if ((SWAP_BE16(mdbp->drSigWord) == kHFSSigWord) &&
1443 (mntwrapper || (SWAP_BE16(mdbp->drEmbedSigWord) != kHFSPlusSigWord))) {
1444 #if CONFIG_HFS_STD
1445 /* If only journal replay is requested, exit immediately */
1446 if (journal_replay_only) {
1447 retval = 0;
1448 goto error_exit;
1449 }
1450
1451 /* On 10.6 and beyond, non read-only mounts for HFS standard vols get rejected */
1452 if (vfs_isrdwr(mp)) {
1453 retval = EROFS;
1454 goto error_exit;
1455 }
1456
1457 printf("hfs_mountfs: Mounting HFS Standard volumes was deprecated in Mac OS 10.7 \n");
1458
1459 /* Treat it as if it's read-only and not writeable */
1460 hfsmp->hfs_flags |= HFS_READ_ONLY;
1461 hfsmp->hfs_flags &= ~HFS_WRITEABLE_MEDIA;
1462
1463 if ((vfs_flags(mp) & MNT_ROOTFS)) {
1464 retval = EINVAL; /* Cannot root from HFS standard disks */
1465 goto error_exit;
1466 }
1467 /* HFS disks can only use 512 byte physical blocks */
1468 if (log_blksize > kHFSBlockSize) {
1469 log_blksize = kHFSBlockSize;
1470 if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) {
1471 retval = ENXIO;
1472 goto error_exit;
1473 }
1474 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
1475 retval = ENXIO;
1476 goto error_exit;
1477 }
1478 hfsmp->hfs_logical_block_size = log_blksize;
1479 hfsmp->hfs_logical_block_count = log_blkcnt;
1480 hfsmp->hfs_logical_bytes = (uint64_t) log_blksize * (uint64_t) log_blkcnt;
1481 hfsmp->hfs_physical_block_size = log_blksize;
1482 hfsmp->hfs_log_per_phys = 1;
1483 }
1484 if (args) {
1485 hfsmp->hfs_encoding = args->hfs_encoding;
1486 HFSTOVCB(hfsmp)->volumeNameEncodingHint = args->hfs_encoding;
1487
1488 /* establish the timezone */
1489 gTimeZone = args->hfs_timezone;
1490 }
1491
1492 retval = hfs_getconverter(hfsmp->hfs_encoding, &hfsmp->hfs_get_unicode,
1493 &hfsmp->hfs_get_hfsname);
1494 if (retval)
1495 goto error_exit;
1496
1497 retval = hfs_MountHFSVolume(hfsmp, mdbp, p);
1498 if (retval)
1499 (void) hfs_relconverter(hfsmp->hfs_encoding);
1500 #else
1501 /* On platforms where HFS Standard is not supported, deny the mount altogether */
1502 retval = EINVAL;
1503 goto error_exit;
1504 #endif
1505
1506 }
1507 else { /* Mount an HFS Plus disk */
1508 HFSPlusVolumeHeader *vhp;
1509 off_t embeddedOffset;
1510 int jnl_disable = 0;
1511
1512 /* Get the embedded Volume Header */
1513 if (SWAP_BE16(mdbp->drEmbedSigWord) == kHFSPlusSigWord) {
1514 embeddedOffset = SWAP_BE16(mdbp->drAlBlSt) * kHFSBlockSize;
1515 embeddedOffset += (u_int64_t)SWAP_BE16(mdbp->drEmbedExtent.startBlock) *
1516 (u_int64_t)SWAP_BE32(mdbp->drAlBlkSiz);
1517
1518 /*
1519 * Cooperative Fusion is not allowed on embedded HFS+
1520 * filesystems (HFS+ inside HFS standard wrapper)
1521 */
1522 hfsmp->hfs_flags &= ~HFS_CS_METADATA_PIN;
1523
1524 /*
1525 * If the embedded volume doesn't start on a block
1526 * boundary, then switch the device to a 512-byte
1527 * block size so everything will line up on a block
1528 * boundary.
1529 */
1530 if ((embeddedOffset % log_blksize) != 0) {
1531 printf("hfs_mountfs: embedded volume offset not"
1532 " a multiple of physical block size (%d);"
1533 " switching to 512\n", log_blksize);
1534 log_blksize = 512;
1535 if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE,
1536 (caddr_t)&log_blksize, FWRITE, context)) {
1537
1538 if (HFS_MOUNT_DEBUG) {
1539 printf("hfs_mountfs: DKIOCSETBLOCKSIZE (3) failed\n");
1540 }
1541 retval = ENXIO;
1542 goto error_exit;
1543 }
1544 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT,
1545 (caddr_t)&log_blkcnt, 0, context)) {
1546 if (HFS_MOUNT_DEBUG) {
1547 printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (3) failed\n");
1548 }
1549 retval = ENXIO;
1550 goto error_exit;
1551 }
1552 /* Note: relative block count adjustment */
1553 hfsmp->hfs_logical_block_count *=
1554 hfsmp->hfs_logical_block_size / log_blksize;
1555
1556 /* Update logical /physical block size */
1557 hfsmp->hfs_logical_block_size = log_blksize;
1558 hfsmp->hfs_physical_block_size = log_blksize;
1559
1560 phys_blksize = log_blksize;
1561 hfsmp->hfs_log_per_phys = 1;
1562 }
1563
1564 disksize = (u_int64_t)SWAP_BE16(mdbp->drEmbedExtent.blockCount) *
1565 (u_int64_t)SWAP_BE32(mdbp->drAlBlkSiz);
1566
1567 hfsmp->hfs_logical_block_count = disksize / log_blksize;
1568
1569 hfsmp->hfs_logical_bytes = (uint64_t) hfsmp->hfs_logical_block_count * (uint64_t) hfsmp->hfs_logical_block_size;
1570
1571 mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize));
1572
1573 if (bp) {
1574 buf_markinvalid(bp);
1575 buf_brelse(bp);
1576 bp = NULL;
1577 }
1578 retval = (int)buf_meta_bread(devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys),
1579 phys_blksize, cred, &bp);
1580 if (retval) {
1581 if (HFS_MOUNT_DEBUG) {
1582 printf("hfs_mountfs: buf_meta_bread (2) failed with %d\n", retval);
1583 }
1584 goto error_exit;
1585 }
1586 bcopy((char *)buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize), mdbp, 512);
1587 buf_brelse(bp);
1588 bp = NULL;
1589 vhp = (HFSPlusVolumeHeader*) mdbp;
1590
1591 }
1592 else { /* pure HFS+ */
1593 embeddedOffset = 0;
1594 vhp = (HFSPlusVolumeHeader*) mdbp;
1595 }
1596
1597 retval = hfs_ValidateHFSPlusVolumeHeader(hfsmp, vhp);
1598 if (retval)
1599 goto error_exit;
1600
1601 /*
1602 * If allocation block size is less than the physical block size,
1603 * invalidate the buffer read in using native physical block size
1604 * to ensure data consistency.
1605 *
1606 * HFS Plus reserves one allocation block for the Volume Header.
1607 * If the physical size is larger, then when we read the volume header,
1608 * we will also end up reading in the next allocation block(s).
1609 * If those other allocation block(s) is/are modified, and then the volume
1610 * header is modified, the write of the volume header's buffer will write
1611 * out the old contents of the other allocation blocks.
1612 *
1613 * We assume that the physical block size is same as logical block size.
1614 * The physical block size value is used to round down the offsets for
1615 * reading and writing the primary and alternate volume headers.
1616 *
1617 * The same logic is also in hfs_MountHFSPlusVolume to ensure that
1618 * hfs_mountfs, hfs_MountHFSPlusVolume and later are doing the I/Os
1619 * using same block size.
1620 */
1621 if (SWAP_BE32(vhp->blockSize) < hfsmp->hfs_physical_block_size) {
1622 phys_blksize = hfsmp->hfs_logical_block_size;
1623 hfsmp->hfs_physical_block_size = hfsmp->hfs_logical_block_size;
1624 hfsmp->hfs_log_per_phys = 1;
1625 // There should be one bp associated with devvp in buffer cache.
1626 retval = buf_invalidateblks(devvp, 0, 0, 0);
1627 if (retval)
1628 goto error_exit;
1629 }
1630
1631 if (isroot) {
1632 hfs_root_unmounted_cleanly = ((SWAP_BE32(vhp->attributes) & kHFSVolumeUnmountedMask) != 0);
1633 }
1634
1635 /*
1636 * On inconsistent disks, do not allow read-write mount
1637 * unless it is the boot volume being mounted. We also
1638 * always want to replay the journal if the journal_replay_only
1639 * flag is set because that will (most likely) get the
1640 * disk into a consistent state before fsck_hfs starts
1641 * looking at it.
1642 */
1643 if (!journal_replay_only
1644 && !(vfs_flags(mp) & MNT_ROOTFS)
1645 && (SWAP_BE32(vhp->attributes) & kHFSVolumeInconsistentMask)
1646 && !(hfsmp->hfs_flags & HFS_READ_ONLY)) {
1647
1648 if (HFS_MOUNT_DEBUG) {
1649 printf("hfs_mountfs: failed to mount non-root inconsistent disk\n");
1650 }
1651 retval = EINVAL;
1652 goto error_exit;
1653 }
1654
1655
1656 // XXXdbg
1657 //
1658 hfsmp->jnl = NULL;
1659 hfsmp->jvp = NULL;
1660 if (args != NULL && (args->flags & HFSFSMNT_EXTENDED_ARGS) &&
1661 args->journal_disable) {
1662 jnl_disable = 1;
1663 }
1664
1665 //
1666 // We only initialize the journal here if the last person
1667 // to mount this volume was journaling aware. Otherwise
1668 // we delay journal initialization until later at the end
1669 // of hfs_MountHFSPlusVolume() because the last person who
1670 // mounted it could have messed things up behind our back
1671 // (so we need to go find the .journal file, make sure it's
1672 // the right size, re-sync up if it was moved, etc).
1673 //
1674 if ( (SWAP_BE32(vhp->lastMountedVersion) == kHFSJMountVersion)
1675 && (SWAP_BE32(vhp->attributes) & kHFSVolumeJournaledMask)
1676 && !jnl_disable) {
1677
1678 // if we're able to init the journal, mark the mount
1679 // point as journaled.
1680 //
1681 if ((retval = hfs_early_journal_init(hfsmp, vhp, args, embeddedOffset, mdb_offset, mdbp, cred)) == 0) {
1682 if (mp)
1683 vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
1684 } else {
1685 if (retval == EROFS) {
1686 // EROFS is a special error code that means the volume has an external
1687 // journal which we couldn't find. in that case we do not want to
1688 // rewrite the volume header - we'll just refuse to mount the volume.
1689 if (HFS_MOUNT_DEBUG) {
1690 printf("hfs_mountfs: hfs_early_journal_init indicated external jnl \n");
1691 }
1692 retval = EINVAL;
1693 goto error_exit;
1694 }
1695
1696 // if the journal failed to open, then set the lastMountedVersion
1697 // to be "FSK!" which fsck_hfs will see and force the fsck instead
1698 // of just bailing out because the volume is journaled.
1699 if (!ronly) {
1700 if (HFS_MOUNT_DEBUG) {
1701 printf("hfs_mountfs: hfs_early_journal_init failed, setting to FSK \n");
1702 }
1703
1704 HFSPlusVolumeHeader *jvhp;
1705
1706 hfsmp->hfs_flags |= HFS_NEED_JNL_RESET;
1707
1708 if (mdb_offset == 0) {
1709 mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize));
1710 }
1711
1712 bp = NULL;
1713 retval = (int)buf_meta_bread(devvp,
1714 HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys),
1715 phys_blksize, cred, &bp);
1716 if (retval == 0) {
1717 jvhp = (HFSPlusVolumeHeader *)(buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize));
1718
1719 if (SWAP_BE16(jvhp->signature) == kHFSPlusSigWord || SWAP_BE16(jvhp->signature) == kHFSXSigWord) {
1720 printf ("hfs(1): Journal replay fail. Writing lastMountVersion as FSK!\n");
1721 jvhp->lastMountedVersion = SWAP_BE32(kFSKMountVersion);
1722 buf_bwrite(bp);
1723 } else {
1724 buf_brelse(bp);
1725 }
1726 bp = NULL;
1727 } else if (bp) {
1728 buf_brelse(bp);
1729 // clear this so the error exit path won't try to use it
1730 bp = NULL;
1731 }
1732 }
1733
1734 // if this isn't the root device just bail out.
1735 // If it is the root device we just continue on
1736 // in the hopes that fsck_hfs will be able to
1737 // fix any damage that exists on the volume.
1738 if (mp && !(vfs_flags(mp) & MNT_ROOTFS)) {
1739 if (HFS_MOUNT_DEBUG) {
1740 printf("hfs_mountfs: hfs_early_journal_init failed, erroring out \n");
1741 }
1742 retval = EINVAL;
1743 goto error_exit;
1744 }
1745 }
1746 }
1747
1748 /* Either the journal is replayed successfully, or there
1749 * was nothing to replay, or no journal exists. In any case,
1750 * return success.
1751 */
1752 if (journal_replay_only) {
1753 retval = 0;
1754 goto error_exit;
1755 }
1756
1757 #if CONFIG_HFS_STD
1758 (void) hfs_getconverter(0, &hfsmp->hfs_get_unicode, &hfsmp->hfs_get_hfsname);
1759 #endif
1760
1761 retval = hfs_MountHFSPlusVolume(hfsmp, vhp, embeddedOffset, disksize, p, args, cred);
1762 /*
1763 * If the backend didn't like our physical blocksize
1764 * then retry with physical blocksize of 512.
1765 */
1766 if ((retval == ENXIO) && (log_blksize > 512) && (log_blksize != minblksize)) {
1767 printf("hfs_mountfs: could not use physical block size "
1768 "(%d) switching to 512\n", log_blksize);
1769 log_blksize = 512;
1770 if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) {
1771 if (HFS_MOUNT_DEBUG) {
1772 printf("hfs_mountfs: DKIOCSETBLOCKSIZE (4) failed \n");
1773 }
1774 retval = ENXIO;
1775 goto error_exit;
1776 }
1777 if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
1778 if (HFS_MOUNT_DEBUG) {
1779 printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (4) failed \n");
1780 }
1781 retval = ENXIO;
1782 goto error_exit;
1783 }
1784 set_fsblocksize(devvp);
1785 /* Note: relative block count adjustment (in case this is an embedded volume). */
1786 hfsmp->hfs_logical_block_count *= hfsmp->hfs_logical_block_size / log_blksize;
1787 hfsmp->hfs_logical_block_size = log_blksize;
1788 hfsmp->hfs_log_per_phys = hfsmp->hfs_physical_block_size / log_blksize;
1789
1790 hfsmp->hfs_logical_bytes = (uint64_t) hfsmp->hfs_logical_block_count * (uint64_t) hfsmp->hfs_logical_block_size;
1791
1792 if (hfsmp->jnl && hfsmp->jvp == devvp) {
1793 // close and re-open this with the new block size
1794 journal_close(hfsmp->jnl);
1795 hfsmp->jnl = NULL;
1796 if (hfs_early_journal_init(hfsmp, vhp, args, embeddedOffset, mdb_offset, mdbp, cred) == 0) {
1797 vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
1798 } else {
1799 // if the journal failed to open, then set the lastMountedVersion
1800 // to be "FSK!" which fsck_hfs will see and force the fsck instead
1801 // of just bailing out because the volume is journaled.
1802 if (!ronly) {
1803 if (HFS_MOUNT_DEBUG) {
1804 printf("hfs_mountfs: hfs_early_journal_init (2) resetting.. \n");
1805 }
1806 HFSPlusVolumeHeader *jvhp;
1807
1808 hfsmp->hfs_flags |= HFS_NEED_JNL_RESET;
1809
1810 if (mdb_offset == 0) {
1811 mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize));
1812 }
1813
1814 bp = NULL;
1815 retval = (int)buf_meta_bread(devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys),
1816 phys_blksize, cred, &bp);
1817 if (retval == 0) {
1818 jvhp = (HFSPlusVolumeHeader *)(buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize));
1819
1820 if (SWAP_BE16(jvhp->signature) == kHFSPlusSigWord || SWAP_BE16(jvhp->signature) == kHFSXSigWord) {
1821 printf ("hfs(2): Journal replay fail. Writing lastMountVersion as FSK!\n");
1822 jvhp->lastMountedVersion = SWAP_BE32(kFSKMountVersion);
1823 buf_bwrite(bp);
1824 } else {
1825 buf_brelse(bp);
1826 }
1827 bp = NULL;
1828 } else if (bp) {
1829 buf_brelse(bp);
1830 // clear this so the error exit path won't try to use it
1831 bp = NULL;
1832 }
1833 }
1834
1835 // if this isn't the root device just bail out.
1836 // If it is the root device we just continue on
1837 // in the hopes that fsck_hfs will be able to
1838 // fix any damage that exists on the volume.
1839 if ( !(vfs_flags(mp) & MNT_ROOTFS)) {
1840 if (HFS_MOUNT_DEBUG) {
1841 printf("hfs_mountfs: hfs_early_journal_init (2) failed \n");
1842 }
1843 retval = EINVAL;
1844 goto error_exit;
1845 }
1846 }
1847 }
1848
1849 /* Try again with a smaller block size... */
1850 retval = hfs_MountHFSPlusVolume(hfsmp, vhp, embeddedOffset, disksize, p, args, cred);
1851 if (retval && HFS_MOUNT_DEBUG) {
1852 printf("hfs_MountHFSPlusVolume (late) returned %d\n",retval);
1853 }
1854 }
1855 #if CONFIG_HFS_STD
1856 if (retval)
1857 (void) hfs_relconverter(0);
1858 #endif
1859 }
1860
1861 // save off a snapshot of the mtime from the previous mount
1862 // (for matador).
1863 hfsmp->hfs_last_mounted_mtime = hfsmp->hfs_mtime;
1864
1865 if ( retval ) {
1866 if (HFS_MOUNT_DEBUG) {
1867 printf("hfs_mountfs: encountered failure %d \n", retval);
1868 }
1869 goto error_exit;
1870 }
1871
1872 struct vfsstatfs *vsfs = vfs_statfs(mp);
1873 vsfs->f_fsid.val[0] = dev;
1874 vsfs->f_fsid.val[1] = vfs_typenum(mp);
1875
1876 vfs_setmaxsymlen(mp, 0);
1877
1878 #if CONFIG_HFS_STD
1879 if (ISSET(hfsmp->hfs_flags, HFS_STANDARD)) {
1880 /* HFS standard doesn't support extended readdir! */
1881 mount_set_noreaddirext (mp);
1882 }
1883 #endif
1884
1885 if (args) {
1886 /*
1887 * Set the free space warning levels for a non-root volume:
1888 *
1889 * Set the "danger" limit to 1% of the volume size or 100MB, whichever
1890 * is less. Set the "warning" limit to 2% of the volume size or 150MB,
1891 * whichever is less. And last, set the "desired" freespace level to
1892 * to 3% of the volume size or 200MB, whichever is less.
1893 */
1894 hfsmp->hfs_freespace_notify_dangerlimit =
1895 MIN(HFS_VERYLOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize,
1896 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_VERYLOWDISKTRIGGERFRACTION);
1897 hfsmp->hfs_freespace_notify_warninglimit =
1898 MIN(HFS_LOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize,
1899 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_LOWDISKTRIGGERFRACTION);
1900 hfsmp->hfs_freespace_notify_desiredlevel =
1901 MIN(HFS_LOWDISKSHUTOFFLEVEL / HFSTOVCB(hfsmp)->blockSize,
1902 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_LOWDISKSHUTOFFFRACTION);
1903 } else {
1904 /*
1905 * Set the free space warning levels for the root volume:
1906 *
1907 * Set the "danger" limit to 5% of the volume size or 512MB, whichever
1908 * is less. Set the "warning" limit to 10% of the volume size or 1GB,
1909 * whichever is less. And last, set the "desired" freespace level to
1910 * to 11% of the volume size or 1.25GB, whichever is less.
1911 *
1912 * NOTE: While those are the default limits, KernelEventAgent (as of 3/2016)
1913 * will unilaterally override these to the following on OSX only:
1914 * Danger: 3GB
1915 * Warning: Min (2% of root volume, 10GB), with a floor of 10GB
1916 * Desired: Warning Threshold + 1.5GB
1917 */
1918 hfsmp->hfs_freespace_notify_dangerlimit =
1919 MIN(HFS_ROOTVERYLOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize,
1920 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTVERYLOWDISKTRIGGERFRACTION);
1921 hfsmp->hfs_freespace_notify_warninglimit =
1922 MIN(HFS_ROOTLOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize,
1923 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTLOWDISKTRIGGERFRACTION);
1924 hfsmp->hfs_freespace_notify_desiredlevel =
1925 MIN(HFS_ROOTLOWDISKSHUTOFFLEVEL / HFSTOVCB(hfsmp)->blockSize,
1926 (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTLOWDISKSHUTOFFFRACTION);
1927 };
1928
1929 /* Check if the file system exists on virtual device, like disk image */
1930 if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, context) == 0) {
1931 if (isvirtual) {
1932 hfsmp->hfs_flags |= HFS_VIRTUAL_DEVICE;
1933 }
1934 }
1935
1936 if (!isroot
1937 && !ISSET(hfsmp->hfs_flags, HFS_VIRTUAL_DEVICE)
1938 && hfs_is_ejectable(vfs_statfs(mp)->f_mntfromname)) {
1939 SET(hfsmp->hfs_flags, HFS_RUN_SYNCER);
1940 }
1941
1942 const char *dev_name = (hfsmp->hfs_devvp
1943 ? vnode_getname_printable(hfsmp->hfs_devvp) : NULL);
1944
1945 printf("hfs: mounted %s on device %s\n",
1946 (hfsmp->vcbVN[0] ? (const char*) hfsmp->vcbVN : "unknown"),
1947 dev_name ?: "unknown device");
1948
1949 if (dev_name)
1950 vnode_putname_printable(dev_name);
1951
1952 /*
1953 * Start looking for free space to drop below this level and generate a
1954 * warning immediately if needed:
1955 */
1956 hfsmp->hfs_notification_conditions = 0;
1957 hfs_generate_volume_notifications(hfsmp);
1958
1959 if (ronly == 0) {
1960 (void) hfs_flushvolumeheader(hfsmp, HFS_FVH_WAIT);
1961 }
1962 hfs_free(mdbp, kMDBSize);
1963 return (0);
1964
1965 error_exit:
1966 if (bp)
1967 buf_brelse(bp);
1968
1969 hfs_free(mdbp, kMDBSize);
1970
1971 hfs_close_jvp(hfsmp);
1972
1973 if (hfsmp) {
1974 if (hfsmp->hfs_devvp) {
1975 vnode_rele(hfsmp->hfs_devvp);
1976 }
1977 hfs_locks_destroy(hfsmp);
1978 hfs_delete_chash(hfsmp);
1979 hfs_idhash_destroy (hfsmp);
1980
1981 hfs_free(hfsmp, sizeof(*hfsmp));
1982 if (mp)
1983 vfs_setfsprivate(mp, NULL);
1984 }
1985 return (retval);
1986 }
1987
1988
1989 /*
1990 * Make a filesystem operational.
1991 * Nothing to do at the moment.
1992 */
1993 /* ARGSUSED */
1994 static int
1995 hfs_start(__unused struct mount *mp, __unused int flags, __unused vfs_context_t context)
1996 {
1997 return (0);
1998 }
1999
2000
2001 /*
2002 * unmount system call
2003 */
2004 int
2005 hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context)
2006 {
2007 struct proc *p = vfs_context_proc(context);
2008 struct hfsmount *hfsmp = VFSTOHFS(mp);
2009 int retval = E_NONE;
2010 int flags;
2011 int force;
2012 int started_tr = 0;
2013
2014 flags = 0;
2015 force = 0;
2016 if (mntflags & MNT_FORCE) {
2017 flags |= FORCECLOSE;
2018 force = 1;
2019 }
2020
2021 const char *dev_name = (hfsmp->hfs_devvp
2022 ? vnode_getname_printable(hfsmp->hfs_devvp) : NULL);
2023
2024 printf("hfs: unmount initiated on %s on device %s\n",
2025 (hfsmp->vcbVN[0] ? (const char*) hfsmp->vcbVN : "unknown"),
2026 dev_name ?: "unknown device");
2027
2028 if (dev_name)
2029 vnode_putname_printable(dev_name);
2030
2031 if ((retval = hfs_flushfiles(mp, flags, p)) && !force)
2032 return (retval);
2033
2034 if (hfsmp->hfs_flags & HFS_METADATA_ZONE)
2035 (void) hfs_recording_suspend(hfsmp);
2036
2037 hfs_syncer_free(hfsmp);
2038
2039 if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) {
2040 if (hfsmp->hfs_summary_table) {
2041 int err = 0;
2042 /*
2043 * Take the bitmap lock to serialize against a concurrent bitmap scan still in progress
2044 */
2045 if (hfsmp->hfs_allocation_vp) {
2046 err = hfs_lock (VTOC(hfsmp->hfs_allocation_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2047 }
2048 hfs_free(hfsmp->hfs_summary_table, hfsmp->hfs_summary_bytes);
2049 hfsmp->hfs_summary_table = NULL;
2050 hfsmp->hfs_flags &= ~HFS_SUMMARY_TABLE;
2051
2052 if (err == 0 && hfsmp->hfs_allocation_vp){
2053 hfs_unlock (VTOC(hfsmp->hfs_allocation_vp));
2054 }
2055
2056 }
2057 }
2058
2059 /*
2060 * Flush out the b-trees, volume bitmap and Volume Header
2061 */
2062 if ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0) {
2063 retval = hfs_start_transaction(hfsmp);
2064 if (retval == 0) {
2065 started_tr = 1;
2066 } else if (!force) {
2067 goto err_exit;
2068 }
2069
2070 if (hfsmp->hfs_startup_vp) {
2071 (void) hfs_lock(VTOC(hfsmp->hfs_startup_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2072 retval = hfs_fsync(hfsmp->hfs_startup_vp, MNT_WAIT, 0, p);
2073 hfs_unlock(VTOC(hfsmp->hfs_startup_vp));
2074 if (retval && !force)
2075 goto err_exit;
2076 }
2077
2078 if (hfsmp->hfs_attribute_vp) {
2079 (void) hfs_lock(VTOC(hfsmp->hfs_attribute_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2080 retval = hfs_fsync(hfsmp->hfs_attribute_vp, MNT_WAIT, 0, p);
2081 hfs_unlock(VTOC(hfsmp->hfs_attribute_vp));
2082 if (retval && !force)
2083 goto err_exit;
2084 }
2085
2086 (void) hfs_lock(VTOC(hfsmp->hfs_catalog_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2087 retval = hfs_fsync(hfsmp->hfs_catalog_vp, MNT_WAIT, 0, p);
2088 hfs_unlock(VTOC(hfsmp->hfs_catalog_vp));
2089 if (retval && !force)
2090 goto err_exit;
2091
2092 (void) hfs_lock(VTOC(hfsmp->hfs_extents_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2093 retval = hfs_fsync(hfsmp->hfs_extents_vp, MNT_WAIT, 0, p);
2094 hfs_unlock(VTOC(hfsmp->hfs_extents_vp));
2095 if (retval && !force)
2096 goto err_exit;
2097
2098 if (hfsmp->hfs_allocation_vp) {
2099 (void) hfs_lock(VTOC(hfsmp->hfs_allocation_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2100 retval = hfs_fsync(hfsmp->hfs_allocation_vp, MNT_WAIT, 0, p);
2101 hfs_unlock(VTOC(hfsmp->hfs_allocation_vp));
2102 if (retval && !force)
2103 goto err_exit;
2104 }
2105
2106 if (hfsmp->hfc_filevp && vnode_issystem(hfsmp->hfc_filevp)) {
2107 retval = hfs_fsync(hfsmp->hfc_filevp, MNT_WAIT, 0, p);
2108 if (retval && !force)
2109 goto err_exit;
2110 }
2111
2112 /* If runtime corruption was detected, indicate that the volume
2113 * was not unmounted cleanly.
2114 */
2115 if (hfsmp->vcbAtrb & kHFSVolumeInconsistentMask) {
2116 HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeUnmountedMask;
2117 } else {
2118 HFSTOVCB(hfsmp)->vcbAtrb |= kHFSVolumeUnmountedMask;
2119 }
2120
2121 if (hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) {
2122 int i;
2123 u_int32_t min_start = hfsmp->totalBlocks;
2124
2125 // set the nextAllocation pointer to the smallest free block number
2126 // we've seen so on the next mount we won't rescan unnecessarily
2127 lck_spin_lock(&hfsmp->vcbFreeExtLock);
2128 for(i=0; i < (int)hfsmp->vcbFreeExtCnt; i++) {
2129 if (hfsmp->vcbFreeExt[i].startBlock < min_start) {
2130 min_start = hfsmp->vcbFreeExt[i].startBlock;
2131 }
2132 }
2133 lck_spin_unlock(&hfsmp->vcbFreeExtLock);
2134 if (min_start < hfsmp->nextAllocation) {
2135 hfsmp->nextAllocation = min_start;
2136 }
2137 }
2138
2139 retval = hfs_flushvolumeheader(hfsmp, HFS_FVH_WAIT);
2140 if (retval) {
2141 HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeUnmountedMask;
2142 if (!force)
2143 goto err_exit; /* could not flush everything */
2144 }
2145
2146 if (started_tr) {
2147 hfs_end_transaction(hfsmp);
2148 started_tr = 0;
2149 }
2150 }
2151
2152 if (hfsmp->jnl) {
2153 hfs_flush(hfsmp, HFS_FLUSH_FULL);
2154 }
2155
2156 /*
2157 * Invalidate our caches and release metadata vnodes
2158 */
2159 (void) hfsUnmount(hfsmp, p);
2160
2161 #if CONFIG_HFS_STD
2162 if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord) {
2163 (void) hfs_relconverter(hfsmp->hfs_encoding);
2164 }
2165 #endif
2166
2167 // XXXdbg
2168 if (hfsmp->jnl) {
2169 journal_close(hfsmp->jnl);
2170 hfsmp->jnl = NULL;
2171 }
2172
2173 VNOP_FSYNC(hfsmp->hfs_devvp, MNT_WAIT, context);
2174
2175 hfs_close_jvp(hfsmp);
2176
2177 /*
2178 * Last chance to dump unreferenced system files.
2179 */
2180 (void) vflush(mp, NULLVP, FORCECLOSE);
2181
2182 #if HFS_SPARSE_DEV
2183 /* Drop our reference on the backing fs (if any). */
2184 if ((hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) && hfsmp->hfs_backingvp) {
2185 struct vnode * tmpvp;
2186
2187 hfsmp->hfs_flags &= ~HFS_HAS_SPARSE_DEVICE;
2188 tmpvp = hfsmp->hfs_backingvp;
2189 hfsmp->hfs_backingvp = NULLVP;
2190 vnode_rele(tmpvp);
2191 }
2192 #endif /* HFS_SPARSE_DEV */
2193
2194 vnode_rele(hfsmp->hfs_devvp);
2195
2196 hfs_locks_destroy(hfsmp);
2197 hfs_delete_chash(hfsmp);
2198 hfs_idhash_destroy(hfsmp);
2199
2200 hfs_assert(TAILQ_EMPTY(&hfsmp->hfs_reserved_ranges[HFS_TENTATIVE_BLOCKS])
2201 && TAILQ_EMPTY(&hfsmp->hfs_reserved_ranges[HFS_LOCKED_BLOCKS]));
2202 hfs_assert(!hfsmp->lockedBlocks);
2203
2204 hfs_free(hfsmp, sizeof(*hfsmp));
2205
2206 #if HFS_LEAK_DEBUG
2207 if (OSDecrementAtomic(&hfs_active_mounts) == 1) {
2208 if (hfs_dump_allocations())
2209 Debugger(NULL);
2210 else {
2211 printf("hfs: last unmount and nothing was leaked!\n");
2212 msleep(hfs_unmount, NULL, PINOD, "hfs_unmount",
2213 &(struct timespec){ 5, 0 });
2214 }
2215 }
2216 #endif
2217
2218 return (0);
2219
2220 err_exit:
2221 if (started_tr) {
2222 hfs_end_transaction(hfsmp);
2223 }
2224 return retval;
2225 }
2226
2227
2228 /*
2229 * Return the root of a filesystem.
2230 */
2231 int hfs_vfs_root(struct mount *mp, struct vnode **vpp, __unused vfs_context_t context)
2232 {
2233 return hfs_vget(VFSTOHFS(mp), (cnid_t)kHFSRootFolderID, vpp, 1, 0);
2234 }
2235
2236
2237 /*
2238 * Do operations associated with quotas
2239 */
2240 #if !QUOTA
2241 static int
2242 hfs_quotactl(__unused struct mount *mp, __unused int cmds, __unused uid_t uid, __unused caddr_t datap, __unused vfs_context_t context)
2243 {
2244 return (ENOTSUP);
2245 }
2246 #else
2247 static int
2248 hfs_quotactl(struct mount *mp, int cmds, uid_t uid, caddr_t datap, vfs_context_t context)
2249 {
2250 struct proc *p = vfs_context_proc(context);
2251 int cmd, type, error;
2252
2253 if (uid == ~0U)
2254 uid = kauth_cred_getuid(vfs_context_ucred(context));
2255 cmd = cmds >> SUBCMDSHIFT;
2256
2257 switch (cmd) {
2258 case Q_SYNC:
2259 case Q_QUOTASTAT:
2260 break;
2261 case Q_GETQUOTA:
2262 if (uid == kauth_cred_getuid(vfs_context_ucred(context)))
2263 break;
2264 /* fall through */
2265 default:
2266 if ( (error = vfs_context_suser(context)) )
2267 return (error);
2268 }
2269
2270 type = cmds & SUBCMDMASK;
2271 if ((u_int)type >= MAXQUOTAS)
2272 return (EINVAL);
2273 if (vfs_busy(mp, LK_NOWAIT))
2274 return (0);
2275
2276 switch (cmd) {
2277
2278 case Q_QUOTAON:
2279 error = hfs_quotaon(p, mp, type, datap);
2280 break;
2281
2282 case Q_QUOTAOFF:
2283 error = hfs_quotaoff(p, mp, type);
2284 break;
2285
2286 case Q_SETQUOTA:
2287 error = hfs_setquota(mp, uid, type, datap);
2288 break;
2289
2290 case Q_SETUSE:
2291 error = hfs_setuse(mp, uid, type, datap);
2292 break;
2293
2294 case Q_GETQUOTA:
2295 error = hfs_getquota(mp, uid, type, datap);
2296 break;
2297
2298 case Q_SYNC:
2299 error = hfs_qsync(mp);
2300 break;
2301
2302 case Q_QUOTASTAT:
2303 error = hfs_quotastat(mp, type, datap);
2304 break;
2305
2306 default:
2307 error = EINVAL;
2308 break;
2309 }
2310 vfs_unbusy(mp);
2311
2312 return (error);
2313 }
2314 #endif /* QUOTA */
2315
2316 /* Subtype is composite of bits */
2317 #define HFS_SUBTYPE_JOURNALED 0x01
2318 #define HFS_SUBTYPE_CASESENSITIVE 0x02
2319 /* bits 2 - 6 reserved */
2320 #define HFS_SUBTYPE_STANDARDHFS 0x80
2321
2322 /*
2323 * Get file system statistics.
2324 */
2325 int
2326 hfs_statfs(struct mount *mp, register struct vfsstatfs *sbp, __unused vfs_context_t context)
2327 {
2328 ExtendedVCB *vcb = VFSTOVCB(mp);
2329 struct hfsmount *hfsmp = VFSTOHFS(mp);
2330 u_int16_t subtype = 0;
2331
2332 sbp->f_bsize = (u_int32_t)vcb->blockSize;
2333 sbp->f_iosize = (size_t)cluster_max_io_size(mp, 0);
2334 sbp->f_blocks = (u_int64_t)((u_int32_t)vcb->totalBlocks);
2335 sbp->f_bfree = (u_int64_t)((u_int32_t )hfs_freeblks(hfsmp, 0));
2336 sbp->f_bavail = (u_int64_t)((u_int32_t )hfs_freeblks(hfsmp, 1));
2337 sbp->f_files = (u_int64_t)HFS_MAX_FILES;
2338 sbp->f_ffree = (u_int64_t)hfs_free_cnids(hfsmp);
2339
2340 /*
2341 * Subtypes (flavors) for HFS
2342 * 0: Mac OS Extended
2343 * 1: Mac OS Extended (Journaled)
2344 * 2: Mac OS Extended (Case Sensitive)
2345 * 3: Mac OS Extended (Case Sensitive, Journaled)
2346 * 4 - 127: Reserved
2347 * 128: Mac OS Standard
2348 *
2349 */
2350 if ((hfsmp->hfs_flags & HFS_STANDARD) == 0) {
2351 /* HFS+ & variants */
2352 if (hfsmp->jnl) {
2353 subtype |= HFS_SUBTYPE_JOURNALED;
2354 }
2355 if (hfsmp->hfs_flags & HFS_CASE_SENSITIVE) {
2356 subtype |= HFS_SUBTYPE_CASESENSITIVE;
2357 }
2358 }
2359 #if CONFIG_HFS_STD
2360 else {
2361 /* HFS standard */
2362 subtype = HFS_SUBTYPE_STANDARDHFS;
2363 }
2364 #endif
2365 sbp->f_fssubtype = subtype;
2366
2367 return (0);
2368 }
2369
2370
2371 //
2372 // XXXdbg -- this is a callback to be used by the journal to
2373 // get meta data blocks flushed out to disk.
2374 //
2375 // XXXdbg -- be smarter and don't flush *every* block on each
2376 // call. try to only flush some so we don't wind up
2377 // being too synchronous.
2378 //
2379 void
2380 hfs_sync_metadata(void *arg)
2381 {
2382 struct mount *mp = (struct mount *)arg;
2383 struct hfsmount *hfsmp;
2384 ExtendedVCB *vcb;
2385 buf_t bp;
2386 int retval;
2387 daddr64_t priIDSector;
2388 hfsmp = VFSTOHFS(mp);
2389 vcb = HFSTOVCB(hfsmp);
2390
2391 // now make sure the super block is flushed
2392 priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
2393 HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size));
2394
2395 retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
2396 HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys),
2397 hfsmp->hfs_physical_block_size, NOCRED, &bp);
2398 if ((retval != 0 ) && (retval != ENXIO)) {
2399 printf("hfs_sync_metadata: can't read volume header at %d! (retval 0x%x)\n",
2400 (int)priIDSector, retval);
2401 }
2402
2403 if (retval == 0 && ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI)) {
2404 buf_bwrite(bp);
2405 } else if (bp) {
2406 buf_brelse(bp);
2407 }
2408
2409 /* Note that these I/Os bypass the journal (no calls to journal_start_modify_block) */
2410
2411 // the alternate super block...
2412 // XXXdbg - we probably don't need to do this each and every time.
2413 // hfs_btreeio.c:FlushAlternate() should flag when it was
2414 // written...
2415 if (hfsmp->hfs_partition_avh_sector) {
2416 retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
2417 HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_partition_avh_sector, hfsmp->hfs_log_per_phys),
2418 hfsmp->hfs_physical_block_size, NOCRED, &bp);
2419 if (retval == 0 && ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI)) {
2420 /*
2421 * note this I/O can fail if the partition shrank behind our backs!
2422 * So failure should be OK here.
2423 */
2424 buf_bwrite(bp);
2425 } else if (bp) {
2426 buf_brelse(bp);
2427 }
2428 }
2429
2430 /* Is the FS's idea of the AVH different than the partition ? */
2431 if ((hfsmp->hfs_fs_avh_sector) && (hfsmp->hfs_partition_avh_sector != hfsmp->hfs_fs_avh_sector)) {
2432 retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
2433 HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_fs_avh_sector, hfsmp->hfs_log_per_phys),
2434 hfsmp->hfs_physical_block_size, NOCRED, &bp);
2435 if (retval == 0 && ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI)) {
2436 buf_bwrite(bp);
2437 } else if (bp) {
2438 buf_brelse(bp);
2439 }
2440 }
2441
2442 }
2443
2444
2445 struct hfs_sync_cargs {
2446 kauth_cred_t cred;
2447 struct proc *p;
2448 int waitfor;
2449 int error;
2450 int atime_only_syncs;
2451 time_t sync_start_time;
2452 };
2453
2454
2455 static int
2456 hfs_sync_callback(struct vnode *vp, void *cargs)
2457 {
2458 struct cnode *cp = VTOC(vp);
2459 struct hfs_sync_cargs *args;
2460 int error;
2461
2462 args = (struct hfs_sync_cargs *)cargs;
2463
2464 if (hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0) {
2465 return (VNODE_RETURNED);
2466 }
2467
2468 hfs_dirty_t dirty_state = hfs_is_dirty(cp);
2469
2470 bool sync = dirty_state == HFS_DIRTY || vnode_hasdirtyblks(vp);
2471
2472 if (!sync && dirty_state == HFS_DIRTY_ATIME
2473 && args->atime_only_syncs < 256) {
2474 // We only update if the atime changed more than 60s ago
2475 if (args->sync_start_time - cp->c_attr.ca_atime > 60) {
2476 sync = true;
2477 ++args->atime_only_syncs;
2478 }
2479 }
2480
2481 if (sync) {
2482 error = hfs_fsync(vp, args->waitfor, 0, args->p);
2483
2484 if (error)
2485 args->error = error;
2486 } else if (cp->c_touch_acctime)
2487 hfs_touchtimes(VTOHFS(vp), cp);
2488
2489 hfs_unlock(cp);
2490 return (VNODE_RETURNED);
2491 }
2492
2493
2494
2495 /*
2496 * Go through the disk queues to initiate sandbagged IO;
2497 * go through the inodes to write those that have been modified;
2498 * initiate the writing of the super block if it has been modified.
2499 *
2500 * Note: we are always called with the filesystem marked `MPBUSY'.
2501 */
2502 int
2503 hfs_sync(struct mount *mp, int waitfor, vfs_context_t context)
2504 {
2505 struct proc *p = vfs_context_proc(context);
2506 struct cnode *cp;
2507 struct hfsmount *hfsmp;
2508 ExtendedVCB *vcb;
2509 struct vnode *meta_vp[4];
2510 int i;
2511 int error, allerror = 0;
2512 struct hfs_sync_cargs args;
2513
2514 hfsmp = VFSTOHFS(mp);
2515
2516 // Back off if hfs_changefs or a freeze is underway
2517 hfs_lock_mount(hfsmp);
2518 if ((hfsmp->hfs_flags & HFS_IN_CHANGEFS)
2519 || hfsmp->hfs_freeze_state != HFS_THAWED) {
2520 hfs_unlock_mount(hfsmp);
2521 return 0;
2522 }
2523
2524 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
2525 hfs_unlock_mount(hfsmp);
2526 return (EROFS);
2527 }
2528
2529 ++hfsmp->hfs_syncers;
2530 hfs_unlock_mount(hfsmp);
2531
2532 args.cred = kauth_cred_get();
2533 args.waitfor = waitfor;
2534 args.p = p;
2535 args.error = 0;
2536 args.atime_only_syncs = 0;
2537
2538 struct timeval tv;
2539 microtime(&tv);
2540
2541 args.sync_start_time = tv.tv_sec;
2542
2543 /*
2544 * hfs_sync_callback will be called for each vnode
2545 * hung off of this mount point... the vnode will be
2546 * properly referenced and unreferenced around the callback
2547 */
2548 vnode_iterate(mp, 0, hfs_sync_callback, (void *)&args);
2549
2550 if (args.error)
2551 allerror = args.error;
2552
2553 vcb = HFSTOVCB(hfsmp);
2554
2555 meta_vp[0] = vcb->extentsRefNum;
2556 meta_vp[1] = vcb->catalogRefNum;
2557 meta_vp[2] = vcb->allocationsRefNum; /* This is NULL for standard HFS */
2558 meta_vp[3] = hfsmp->hfs_attribute_vp; /* Optional file */
2559
2560 /* Now sync our three metadata files */
2561 for (i = 0; i < 4; ++i) {
2562 struct vnode *btvp;
2563
2564 btvp = meta_vp[i];;
2565 if ((btvp==0) || (vnode_mount(btvp) != mp))
2566 continue;
2567
2568 /* XXX use hfs_systemfile_lock instead ? */
2569 (void) hfs_lock(VTOC(btvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2570 cp = VTOC(btvp);
2571
2572 if (!hfs_is_dirty(cp) && !vnode_hasdirtyblks(btvp)) {
2573 hfs_unlock(VTOC(btvp));
2574 continue;
2575 }
2576 error = vnode_get(btvp);
2577 if (error) {
2578 hfs_unlock(VTOC(btvp));
2579 continue;
2580 }
2581 if ((error = hfs_fsync(btvp, waitfor, 0, p)))
2582 allerror = error;
2583
2584 hfs_unlock(cp);
2585 vnode_put(btvp);
2586 };
2587
2588
2589 #if CONFIG_HFS_STD
2590 /*
2591 * Force stale file system control information to be flushed.
2592 */
2593 if (vcb->vcbSigWord == kHFSSigWord) {
2594 if ((error = VNOP_FSYNC(hfsmp->hfs_devvp, waitfor, context))) {
2595 allerror = error;
2596 }
2597 }
2598 #endif
2599
2600 #if QUOTA
2601 hfs_qsync(mp);
2602 #endif /* QUOTA */
2603
2604 hfs_hotfilesync(hfsmp, vfs_context_kernel());
2605
2606 /*
2607 * Write back modified superblock.
2608 */
2609 if (IsVCBDirty(vcb)) {
2610 error = hfs_flushvolumeheader(hfsmp, waitfor == MNT_WAIT ? HFS_FVH_WAIT : 0);
2611 if (error)
2612 allerror = error;
2613 }
2614
2615 if (hfsmp->jnl) {
2616 hfs_flush(hfsmp, HFS_FLUSH_JOURNAL);
2617 }
2618
2619 hfs_lock_mount(hfsmp);
2620 boolean_t wake = (!--hfsmp->hfs_syncers
2621 && hfsmp->hfs_freeze_state == HFS_WANT_TO_FREEZE);
2622 hfs_unlock_mount(hfsmp);
2623 if (wake)
2624 wakeup(&hfsmp->hfs_freeze_state);
2625
2626 return (allerror);
2627 }
2628
2629
2630 /*
2631 * File handle to vnode
2632 *
2633 * Have to be really careful about stale file handles:
2634 * - check that the cnode id is valid
2635 * - call hfs_vget() to get the locked cnode
2636 * - check for an unallocated cnode (i_mode == 0)
2637 * - check that the given client host has export rights and return
2638 * those rights via. exflagsp and credanonp
2639 */
2640 static int
2641 hfs_fhtovp(struct mount *mp, int fhlen, unsigned char *fhp, struct vnode **vpp, __unused vfs_context_t context)
2642 {
2643 struct hfsfid *hfsfhp;
2644 struct vnode *nvp;
2645 int result;
2646
2647 *vpp = NULL;
2648 hfsfhp = (struct hfsfid *)fhp;
2649
2650 if (fhlen < (int)sizeof(struct hfsfid))
2651 return (EINVAL);
2652
2653 result = hfs_vget(VFSTOHFS(mp), ntohl(hfsfhp->hfsfid_cnid), &nvp, 0, 0);
2654 if (result) {
2655 if (result == ENOENT)
2656 result = ESTALE;
2657 return result;
2658 }
2659
2660 /*
2661 * We used to use the create time as the gen id of the file handle,
2662 * but it is not static enough because it can change at any point
2663 * via system calls. We still don't have another volume ID or other
2664 * unique identifier to use for a generation ID across reboots that
2665 * persists until the file is removed. Using only the CNID exposes
2666 * us to the potential wrap-around case, but as of 2/2008, it would take
2667 * over 2 months to wrap around if the machine did nothing but allocate
2668 * CNIDs. Using some kind of wrap counter would only be effective if
2669 * each file had the wrap counter associated with it. For now,
2670 * we use only the CNID to identify the file as it's good enough.
2671 */
2672
2673 *vpp = nvp;
2674
2675 hfs_unlock(VTOC(nvp));
2676 return (0);
2677 }
2678
2679
2680 /*
2681 * Vnode pointer to File handle
2682 */
2683 /* ARGSUSED */
2684 static int
2685 hfs_vptofh(struct vnode *vp, int *fhlenp, unsigned char *fhp, __unused vfs_context_t context)
2686 {
2687 struct cnode *cp;
2688 struct hfsfid *hfsfhp;
2689
2690 if (ISHFS(VTOVCB(vp)))
2691 return (ENOTSUP); /* hfs standard is not exportable */
2692
2693 if (*fhlenp < (int)sizeof(struct hfsfid))
2694 return (EOVERFLOW);
2695
2696 cp = VTOC(vp);
2697 hfsfhp = (struct hfsfid *)fhp;
2698 /* only the CNID is used to identify the file now */
2699 hfsfhp->hfsfid_cnid = htonl(cp->c_fileid);
2700 hfsfhp->hfsfid_gen = htonl(cp->c_fileid);
2701 *fhlenp = sizeof(struct hfsfid);
2702
2703 return (0);
2704 }
2705
2706
2707 /*
2708 * Initialize HFS filesystems, done only once per boot.
2709 *
2710 * HFS is not a kext-based file system. This makes it difficult to find
2711 * out when the last HFS file system was unmounted and call hfs_uninit()
2712 * to deallocate data structures allocated in hfs_init(). Therefore we
2713 * never deallocate memory allocated by lock attribute and group initializations
2714 * in this function.
2715 */
2716 static int
2717 hfs_init(__unused struct vfsconf *vfsp)
2718 {
2719 static int done = 0;
2720
2721 if (done)
2722 return (0);
2723 done = 1;
2724 hfs_chashinit();
2725
2726 BTReserveSetup();
2727
2728 hfs_lock_attr = lck_attr_alloc_init();
2729 hfs_group_attr = lck_grp_attr_alloc_init();
2730 hfs_mutex_group = lck_grp_alloc_init("hfs-mutex", hfs_group_attr);
2731 hfs_rwlock_group = lck_grp_alloc_init("hfs-rwlock", hfs_group_attr);
2732 hfs_spinlock_group = lck_grp_alloc_init("hfs-spinlock", hfs_group_attr);
2733
2734 #if HFS_COMPRESSION
2735 decmpfs_init();
2736 #endif
2737
2738 journal_init();
2739
2740 return (0);
2741 }
2742
2743
2744 /*
2745 * Destroy all locks, mutexes and spinlocks in hfsmp on unmount or failed mount
2746 */
2747 static void
2748 hfs_locks_destroy(struct hfsmount *hfsmp)
2749 {
2750
2751 lck_mtx_destroy(&hfsmp->hfs_mutex, hfs_mutex_group);
2752 lck_mtx_destroy(&hfsmp->hfc_mutex, hfs_mutex_group);
2753 lck_rw_destroy(&hfsmp->hfs_global_lock, hfs_rwlock_group);
2754 lck_spin_destroy(&hfsmp->vcbFreeExtLock, hfs_spinlock_group);
2755
2756 return;
2757 }
2758
2759
2760 static int
2761 hfs_getmountpoint(struct vnode *vp, struct hfsmount **hfsmpp)
2762 {
2763 struct hfsmount * hfsmp;
2764 char fstypename[MFSNAMELEN];
2765
2766 if (vp == NULL)
2767 return (EINVAL);
2768
2769 if (!vnode_isvroot(vp))
2770 return (EINVAL);
2771
2772 vnode_vfsname(vp, fstypename);
2773 if (strncmp(fstypename, "hfs", sizeof(fstypename)) != 0)
2774 return (EINVAL);
2775
2776 hfsmp = VTOHFS(vp);
2777
2778 if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord)
2779 return (EINVAL);
2780
2781 *hfsmpp = hfsmp;
2782
2783 return (0);
2784 }
2785
2786 // Replace user-space value
2787 static errno_t ureplace(user_addr_t oldp, size_t *oldlenp,
2788 user_addr_t newp, size_t newlen,
2789 void *data, size_t len)
2790 {
2791 errno_t error;
2792 if (!oldlenp)
2793 return EFAULT;
2794 if (oldp && *oldlenp < len)
2795 return ENOMEM;
2796 if (newp && newlen != len)
2797 return EINVAL;
2798 *oldlenp = len;
2799 if (oldp) {
2800 error = copyout(data, oldp, len);
2801 if (error)
2802 return error;
2803 }
2804 return newp ? copyin(newp, data, len) : 0;
2805 }
2806
2807 #define UREPLACE(oldp, oldlenp, newp, newlenp, v) \
2808 ureplace(oldp, oldlenp, newp, newlenp, &v, sizeof(v))
2809
2810 static hfsmount_t *hfs_mount_from_cwd(vfs_context_t ctx)
2811 {
2812 vnode_t vp = vfs_context_cwd(ctx);
2813
2814 if (!vp)
2815 return NULL;
2816
2817 /*
2818 * We could use vnode_tag, but it is probably more future proof to
2819 * compare fstypename.
2820 */
2821 char fstypename[MFSNAMELEN];
2822 vnode_vfsname(vp, fstypename);
2823
2824 if (strcmp(fstypename, "hfs"))
2825 return NULL;
2826
2827 return VTOHFS(vp);
2828 }
2829
2830 /*
2831 * HFS filesystem related variables.
2832 */
2833 int
2834 hfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
2835 user_addr_t newp, size_t newlen, vfs_context_t context)
2836 {
2837 #if !TARGET_OS_EMBEDDED
2838 struct proc *p = vfs_context_proc(context);
2839 #endif
2840 int error;
2841 struct hfsmount *hfsmp;
2842
2843 /* all sysctl names at this level are terminal */
2844
2845 #if !TARGET_OS_EMBEDDED
2846 if (name[0] == HFS_ENCODINGBIAS) {
2847 int bias;
2848
2849 bias = hfs_getencodingbias();
2850
2851 error = UREPLACE(oldp, oldlenp, newp, newlen, bias);
2852 if (error || !newp)
2853 return error;
2854
2855 hfs_setencodingbias(bias);
2856
2857 return 0;
2858 } else
2859 #endif
2860 if (name[0] == HFS_EXTEND_FS) {
2861 u_int64_t newsize = 0;
2862 vnode_t vp = vfs_context_cwd(context);
2863
2864 if (newp == USER_ADDR_NULL || vp == NULLVP
2865 || newlen != sizeof(quad_t) || !oldlenp)
2866 return EINVAL;
2867 if ((error = hfs_getmountpoint(vp, &hfsmp)))
2868 return (error);
2869
2870 /* Start with the 'size' set to the current number of bytes in the filesystem */
2871 newsize = ((uint64_t)hfsmp->totalBlocks) * ((uint64_t)hfsmp->blockSize);
2872
2873 error = UREPLACE(oldp, oldlenp, newp, newlen, newsize);
2874 if (error)
2875 return error;
2876
2877 return hfs_extendfs(hfsmp, newsize, context);
2878 } else if (name[0] == HFS_ENABLE_JOURNALING) {
2879 // make the file system journaled...
2880 vnode_t jvp;
2881 ExtendedVCB *vcb;
2882 struct cat_attr jnl_attr;
2883 struct cat_attr jinfo_attr;
2884 struct cat_fork jnl_fork;
2885 struct cat_fork jinfo_fork;
2886 buf_t jib_buf;
2887 uint64_t jib_blkno;
2888 uint32_t tmpblkno;
2889 uint64_t journal_byte_offset;
2890 uint64_t journal_size;
2891 vnode_t jib_vp = NULLVP;
2892 struct JournalInfoBlock local_jib;
2893 int err = 0;
2894 void *jnl = NULL;
2895 int lockflags;
2896
2897 /* Only root can enable journaling */
2898 if (!kauth_cred_issuser(kauth_cred_get())) {
2899 return (EPERM);
2900 }
2901 if (namelen != 4)
2902 return EINVAL;
2903 hfsmp = hfs_mount_from_cwd(context);
2904 if (!hfsmp)
2905 return EINVAL;
2906
2907 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
2908 return EROFS;
2909 }
2910 if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord) {
2911 printf("hfs: can't make a plain hfs volume journaled.\n");
2912 return EINVAL;
2913 }
2914
2915 if (hfsmp->jnl) {
2916 printf("hfs: volume %s is already journaled!\n", hfsmp->vcbVN);
2917 return EAGAIN;
2918 }
2919 vcb = HFSTOVCB(hfsmp);
2920
2921 /* Set up local copies of the initialization info */
2922 tmpblkno = (uint32_t) name[1];
2923 jib_blkno = (uint64_t) tmpblkno;
2924 journal_byte_offset = (uint64_t) name[2];
2925 journal_byte_offset *= hfsmp->blockSize;
2926 journal_byte_offset += hfsmp->hfsPlusIOPosOffset;
2927 journal_size = (uint64_t)((unsigned)name[3]);
2928
2929 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS, HFS_EXCLUSIVE_LOCK);
2930 if (BTHasContiguousNodes(VTOF(vcb->catalogRefNum)) == 0 ||
2931 BTHasContiguousNodes(VTOF(vcb->extentsRefNum)) == 0) {
2932
2933 printf("hfs: volume has a btree w/non-contiguous nodes. can not enable journaling.\n");
2934 hfs_systemfile_unlock(hfsmp, lockflags);
2935 return EINVAL;
2936 }
2937 hfs_systemfile_unlock(hfsmp, lockflags);
2938
2939 // make sure these both exist!
2940 if ( GetFileInfo(vcb, kHFSRootFolderID, ".journal_info_block", &jinfo_attr, &jinfo_fork) == 0
2941 || GetFileInfo(vcb, kHFSRootFolderID, ".journal", &jnl_attr, &jnl_fork) == 0) {
2942
2943 return EINVAL;
2944 }
2945
2946 /*
2947 * At this point, we have a copy of the metadata that lives in the catalog for the
2948 * journal info block. Compare that the journal info block's single extent matches
2949 * that which was passed into this sysctl.
2950 *
2951 * If it is different, deny the journal enable call.
2952 */
2953 if (jinfo_fork.cf_blocks > 1) {
2954 /* too many blocks */
2955 return EINVAL;
2956 }
2957
2958 if (jinfo_fork.cf_extents[0].startBlock != jib_blkno) {
2959 /* Wrong block */
2960 return EINVAL;
2961 }
2962
2963 /*
2964 * We want to immediately purge the vnode for the JIB.
2965 *
2966 * Because it was written to from userland, there's probably
2967 * a vnode somewhere in the vnode cache (possibly with UBC backed blocks).
2968 * So we bring the vnode into core, then immediately do whatever
2969 * we can to flush/vclean it out. This is because those blocks will be
2970 * interpreted as user data, which may be treated separately on some platforms
2971 * than metadata. If the vnode is gone, then there cannot be backing blocks
2972 * in the UBC.
2973 */
2974 if (hfs_vget (hfsmp, jinfo_attr.ca_fileid, &jib_vp, 1, 0)) {
2975 return EINVAL;
2976 }
2977 /*
2978 * Now we have a vnode for the JIB. recycle it. Because we hold an iocount
2979 * on the vnode, we'll just mark it for termination when the last iocount
2980 * (hopefully ours), is dropped.
2981 */
2982 vnode_recycle (jib_vp);
2983 err = vnode_put (jib_vp);
2984 if (err) {
2985 return EINVAL;
2986 }
2987
2988 /* Initialize the local copy of the JIB (just like hfs.util) */
2989 memset (&local_jib, 'Z', sizeof(struct JournalInfoBlock));
2990 local_jib.flags = SWAP_BE32(kJIJournalInFSMask);
2991 /* Note that the JIB's offset is in bytes */
2992 local_jib.offset = SWAP_BE64(journal_byte_offset);
2993 local_jib.size = SWAP_BE64(journal_size);
2994
2995 /*
2996 * Now write out the local JIB. This essentially overwrites the userland
2997 * copy of the JIB. Read it as BLK_META to treat it as a metadata read/write.
2998 */
2999 jib_buf = buf_getblk (hfsmp->hfs_devvp,
3000 jib_blkno * (hfsmp->blockSize / hfsmp->hfs_logical_block_size),
3001 hfsmp->blockSize, 0, 0, BLK_META);
3002 char* buf_ptr = (char*) buf_dataptr (jib_buf);
3003
3004 /* Zero out the portion of the block that won't contain JIB data */
3005 memset (buf_ptr, 0, hfsmp->blockSize);
3006
3007 bcopy(&local_jib, buf_ptr, sizeof(local_jib));
3008 if (buf_bwrite (jib_buf)) {
3009 return EIO;
3010 }
3011
3012 /* Force a flush track cache */
3013 hfs_flush(hfsmp, HFS_FLUSH_CACHE);
3014
3015 /* Now proceed with full volume sync */
3016 hfs_sync(hfsmp->hfs_mp, MNT_WAIT, context);
3017
3018 printf("hfs: Initializing the journal (joffset 0x%llx sz 0x%llx)...\n",
3019 (off_t)name[2], (off_t)name[3]);
3020
3021 //
3022 // XXXdbg - note that currently (Sept, 08) hfs_util does not support
3023 // enabling the journal on a separate device so it is safe
3024 // to just copy hfs_devvp here. If hfs_util gets the ability
3025 // to dynamically enable the journal on a separate device then
3026 // we will have to do the same thing as hfs_early_journal_init()
3027 // to locate and open the journal device.
3028 //
3029 jvp = hfsmp->hfs_devvp;
3030 jnl = journal_create(jvp, journal_byte_offset, journal_size,
3031 hfsmp->hfs_devvp,
3032 hfsmp->hfs_logical_block_size,
3033 0,
3034 0,
3035 hfs_sync_metadata, hfsmp->hfs_mp,
3036 hfsmp->hfs_mp);
3037
3038 /*
3039 * Set up the trim callback function so that we can add
3040 * recently freed extents to the free extent cache once
3041 * the transaction that freed them is written to the
3042 * journal on disk.
3043 */
3044 if (jnl)
3045 journal_trim_set_callback(jnl, hfs_trim_callback, hfsmp);
3046
3047 if (jnl == NULL) {
3048 printf("hfs: FAILED to create the journal!\n");
3049 return EIO;
3050 }
3051
3052 hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK);
3053
3054 /*
3055 * Flush all dirty metadata buffers.
3056 */
3057 buf_flushdirtyblks(hfsmp->hfs_devvp, TRUE, 0, "hfs_sysctl");
3058 buf_flushdirtyblks(hfsmp->hfs_extents_vp, TRUE, 0, "hfs_sysctl");
3059 buf_flushdirtyblks(hfsmp->hfs_catalog_vp, TRUE, 0, "hfs_sysctl");
3060 buf_flushdirtyblks(hfsmp->hfs_allocation_vp, TRUE, 0, "hfs_sysctl");
3061 if (hfsmp->hfs_attribute_vp)
3062 buf_flushdirtyblks(hfsmp->hfs_attribute_vp, TRUE, 0, "hfs_sysctl");
3063
3064 HFSTOVCB(hfsmp)->vcbJinfoBlock = name[1];
3065 HFSTOVCB(hfsmp)->vcbAtrb |= kHFSVolumeJournaledMask;
3066 hfsmp->jvp = jvp;
3067 hfsmp->jnl = jnl;
3068
3069 // save this off for the hack-y check in hfs_remove()
3070 hfsmp->jnl_start = (u_int32_t)name[2];
3071 hfsmp->jnl_size = (off_t)((unsigned)name[3]);
3072 hfsmp->hfs_jnlinfoblkid = jinfo_attr.ca_fileid;
3073 hfsmp->hfs_jnlfileid = jnl_attr.ca_fileid;
3074
3075 vfs_setflags(hfsmp->hfs_mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
3076
3077 hfs_unlock_global (hfsmp);
3078 hfs_flushvolumeheader(hfsmp, HFS_FVH_WAIT | HFS_FVH_WRITE_ALT);
3079
3080 {
3081 fsid_t fsid;
3082
3083 fsid.val[0] = (int32_t)hfsmp->hfs_raw_dev;
3084 fsid.val[1] = (int32_t)vfs_typenum(HFSTOVFS(hfsmp));
3085 vfs_event_signal(&fsid, VQ_UPDATE, (intptr_t)NULL);
3086 }
3087 return 0;
3088 } else if (name[0] == HFS_DISABLE_JOURNALING) {
3089 // clear the journaling bit
3090
3091 /* Only root can disable journaling */
3092 if (!kauth_cred_issuser(kauth_cred_get())) {
3093 return (EPERM);
3094 }
3095
3096 hfsmp = hfs_mount_from_cwd(context);
3097 if (!hfsmp)
3098 return EINVAL;
3099
3100 /*
3101 * Disabling journaling is disallowed on volumes with directory hard links
3102 * because we have not tested the relevant code path.
3103 */
3104 if (hfsmp->hfs_private_attr[DIR_HARDLINKS].ca_entries != 0){
3105 printf("hfs: cannot disable journaling on volumes with directory hardlinks\n");
3106 return EPERM;
3107 }
3108
3109 printf("hfs: disabling journaling for %s\n", hfsmp->vcbVN);
3110
3111 hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK);
3112
3113 // Lights out for you buddy!
3114 journal_close(hfsmp->jnl);
3115 hfsmp->jnl = NULL;
3116
3117 hfs_close_jvp(hfsmp);
3118 vfs_clearflags(hfsmp->hfs_mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
3119 hfsmp->jnl_start = 0;
3120 hfsmp->hfs_jnlinfoblkid = 0;
3121 hfsmp->hfs_jnlfileid = 0;
3122
3123 HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeJournaledMask;
3124
3125 hfs_unlock_global (hfsmp);
3126
3127 hfs_flushvolumeheader(hfsmp, HFS_FVH_WAIT | HFS_FVH_WRITE_ALT);
3128
3129 {
3130 fsid_t fsid;
3131
3132 fsid.val[0] = (int32_t)hfsmp->hfs_raw_dev;
3133 fsid.val[1] = (int32_t)vfs_typenum(HFSTOVFS(hfsmp));
3134 vfs_event_signal(&fsid, VQ_UPDATE, (intptr_t)NULL);
3135 }
3136 return 0;
3137 } else if (name[0] == VFS_CTL_QUERY) {
3138 #if TARGET_OS_EMBEDDED
3139 return EPERM;
3140 #else
3141 struct sysctl_req *req;
3142 union union_vfsidctl vc;
3143 struct mount *mp;
3144 struct vfsquery vq;
3145
3146 req = CAST_DOWN(struct sysctl_req *, oldp); /* we're new style vfs sysctl. */
3147 if (req == NULL) {
3148 return EFAULT;
3149 }
3150
3151 error = SYSCTL_IN(req, &vc, proc_is64bit(p)? sizeof(vc.vc64):sizeof(vc.vc32));
3152 if (error) return (error);
3153
3154 mp = vfs_getvfs(&vc.vc32.vc_fsid); /* works for 32 and 64 */
3155 if (mp == NULL) return (ENOENT);
3156
3157 hfsmp = VFSTOHFS(mp);
3158 bzero(&vq, sizeof(vq));
3159 vq.vq_flags = hfsmp->hfs_notification_conditions;
3160 return SYSCTL_OUT(req, &vq, sizeof(vq));;
3161 #endif
3162 } else if (name[0] == HFS_REPLAY_JOURNAL) {
3163 vnode_t devvp = NULL;
3164 int device_fd;
3165 if (namelen != 2) {
3166 return (EINVAL);
3167 }
3168 device_fd = name[1];
3169 error = file_vnode(device_fd, &devvp);
3170 if (error) {
3171 return error;
3172 }
3173 error = vnode_getwithref(devvp);
3174 if (error) {
3175 file_drop(device_fd);
3176 return error;
3177 }
3178 error = hfs_journal_replay(devvp, context);
3179 file_drop(device_fd);
3180 vnode_put(devvp);
3181 return error;
3182 }
3183 #if DEBUG || !TARGET_OS_EMBEDDED
3184 else if (name[0] == HFS_ENABLE_RESIZE_DEBUG) {
3185 if (!kauth_cred_issuser(kauth_cred_get())) {
3186 return (EPERM);
3187 }
3188
3189 int old = hfs_resize_debug;
3190
3191 int res = UREPLACE(oldp, oldlenp, newp, newlen, hfs_resize_debug);
3192
3193 if (old != hfs_resize_debug) {
3194 printf("hfs: %s resize debug\n",
3195 hfs_resize_debug ? "enabled" : "disabled");
3196 }
3197
3198 return res;
3199 }
3200 #endif
3201
3202 return (ENOTSUP);
3203 }
3204
3205 /*
3206 * hfs_vfs_vget is not static since it is used in hfs_readwrite.c to support
3207 * the build_path ioctl. We use it to leverage the code below that updates
3208 * the origin list cache if necessary
3209 */
3210
3211 int
3212 hfs_vfs_vget(struct mount *mp, ino64_t ino, struct vnode **vpp, __unused vfs_context_t context)
3213 {
3214 int error;
3215 int lockflags;
3216 struct hfsmount *hfsmp;
3217
3218 hfsmp = VFSTOHFS(mp);
3219
3220 error = hfs_vget(hfsmp, (cnid_t)ino, vpp, 1, 0);
3221 if (error)
3222 return error;
3223
3224 /*
3225 * If the look-up was via the object ID (rather than the link ID),
3226 * then we make sure there's a parent here. We can't leave this
3227 * until hfs_vnop_getattr because if there's a problem getting the
3228 * parent at that point, all the caller will do is call
3229 * hfs_vfs_vget again and we'll end up in an infinite loop.
3230 */
3231
3232 cnode_t *cp = VTOC(*vpp);
3233
3234 if (ISSET(cp->c_flag, C_HARDLINK) && ino == cp->c_fileid) {
3235 hfs_lock_always(cp, HFS_SHARED_LOCK);
3236
3237 if (!hfs_haslinkorigin(cp)) {
3238 if (!hfs_lock_upgrade(cp))
3239 hfs_lock_always(cp, HFS_EXCLUSIVE_LOCK);
3240
3241 if (cp->c_cnid == cp->c_fileid) {
3242 /*
3243 * Descriptor is stale, so we need to refresh it. We
3244 * pick the first link.
3245 */
3246 cnid_t link_id;
3247
3248 error = hfs_first_link(hfsmp, cp, &link_id);
3249
3250 if (!error) {
3251 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
3252 error = cat_findname(hfsmp, link_id, &cp->c_desc);
3253 hfs_systemfile_unlock(hfsmp, lockflags);
3254 }
3255 } else {
3256 // We'll use whatever link the descriptor happens to have
3257 error = 0;
3258 }
3259 if (!error)
3260 hfs_savelinkorigin(cp, cp->c_parentcnid);
3261 }
3262
3263 hfs_unlock(cp);
3264
3265 if (error) {
3266 vnode_put(*vpp);
3267 *vpp = NULL;
3268 }
3269 }
3270
3271 return error;
3272 }
3273
3274
3275 /*
3276 * Look up an HFS object by ID.
3277 *
3278 * The object is returned with an iocount reference and the cnode locked.
3279 *
3280 * If the object is a file then it will represent the data fork.
3281 */
3282 int
3283 hfs_vget(struct hfsmount *hfsmp, cnid_t cnid, struct vnode **vpp, int skiplock, int allow_deleted)
3284 {
3285 struct vnode *vp = NULLVP;
3286 struct cat_desc cndesc;
3287 struct cat_attr cnattr;
3288 struct cat_fork cnfork;
3289 u_int32_t linkref = 0;
3290 int error;
3291
3292 /* Check for cnids that should't be exported. */
3293 if ((cnid < kHFSFirstUserCatalogNodeID) &&
3294 (cnid != kHFSRootFolderID && cnid != kHFSRootParentID)) {
3295 return (ENOENT);
3296 }
3297 /* Don't export our private directories. */
3298 if (cnid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid ||
3299 cnid == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) {
3300 return (ENOENT);
3301 }
3302 /*
3303 * Check the hash first
3304 */
3305 vp = hfs_chash_getvnode(hfsmp, cnid, 0, skiplock, allow_deleted);
3306 if (vp) {
3307 *vpp = vp;
3308 return(0);
3309 }
3310
3311 bzero(&cndesc, sizeof(cndesc));
3312 bzero(&cnattr, sizeof(cnattr));
3313 bzero(&cnfork, sizeof(cnfork));
3314
3315 /*
3316 * Not in hash, lookup in catalog
3317 */
3318 if (cnid == kHFSRootParentID) {
3319 static char hfs_rootname[] = "/";
3320
3321 cndesc.cd_nameptr = (const u_int8_t *)&hfs_rootname[0];
3322 cndesc.cd_namelen = 1;
3323 cndesc.cd_parentcnid = kHFSRootParentID;
3324 cndesc.cd_cnid = kHFSRootFolderID;
3325 cndesc.cd_flags = CD_ISDIR;
3326
3327 cnattr.ca_fileid = kHFSRootFolderID;
3328 cnattr.ca_linkcount = 1;
3329 cnattr.ca_entries = 1;
3330 cnattr.ca_dircount = 1;
3331 cnattr.ca_mode = (S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO);
3332 } else {
3333 int lockflags;
3334 cnid_t pid;
3335 const char *nameptr;
3336
3337 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
3338 error = cat_idlookup(hfsmp, cnid, 0, 0, &cndesc, &cnattr, &cnfork);
3339 hfs_systemfile_unlock(hfsmp, lockflags);
3340
3341 if (error) {
3342 *vpp = NULL;
3343 return (error);
3344 }
3345
3346 /*
3347 * Check for a raw hardlink inode and save its linkref.
3348 */
3349 pid = cndesc.cd_parentcnid;
3350 nameptr = (const char *)cndesc.cd_nameptr;
3351
3352 if ((pid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) &&
3353 cndesc.cd_namelen > HFS_INODE_PREFIX_LEN &&
3354 (bcmp(nameptr, HFS_INODE_PREFIX, HFS_INODE_PREFIX_LEN) == 0)) {
3355 linkref = strtoul(&nameptr[HFS_INODE_PREFIX_LEN], NULL, 10);
3356
3357 } else if ((pid == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) &&
3358 cndesc.cd_namelen > HFS_DIRINODE_PREFIX_LEN &&
3359 (bcmp(nameptr, HFS_DIRINODE_PREFIX, HFS_DIRINODE_PREFIX_LEN) == 0)) {
3360 linkref = strtoul(&nameptr[HFS_DIRINODE_PREFIX_LEN], NULL, 10);
3361
3362 } else if ((pid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) &&
3363 cndesc.cd_namelen > HFS_DELETE_PREFIX_LEN &&
3364 (bcmp(nameptr, HFS_DELETE_PREFIX, HFS_DELETE_PREFIX_LEN) == 0)) {
3365 *vpp = NULL;
3366 cat_releasedesc(&cndesc);
3367 return (ENOENT); /* open unlinked file */
3368 }
3369 }
3370
3371 /*
3372 * Finish initializing cnode descriptor for hardlinks.
3373 *
3374 * We need a valid name and parent for reverse lookups.
3375 */
3376 if (linkref) {
3377 cnid_t lastid;
3378 struct cat_desc linkdesc;
3379 int linkerr = 0;
3380
3381 cnattr.ca_linkref = linkref;
3382 bzero (&linkdesc, sizeof (linkdesc));
3383
3384 /*
3385 * If the caller supplied the raw inode value, then we don't know exactly
3386 * which hardlink they wanted. It's likely that they acquired the raw inode
3387 * value BEFORE the item became a hardlink, in which case, they probably
3388 * want the oldest link. So request the oldest link from the catalog.
3389 *
3390 * Unfortunately, this requires that we iterate through all N hardlinks. On the plus
3391 * side, since we know that we want the last linkID, we can also have this one
3392 * call give us back the name of the last ID, since it's going to have it in-hand...
3393 */
3394 linkerr = hfs_lookup_lastlink (hfsmp, linkref, &lastid, &linkdesc);
3395 if ((linkerr == 0) && (lastid != 0)) {
3396 /*
3397 * Release any lingering buffers attached to our local descriptor.
3398 * Then copy the name and other business into the cndesc
3399 */
3400 cat_releasedesc (&cndesc);
3401 bcopy (&linkdesc, &cndesc, sizeof(linkdesc));
3402 }
3403 /* If it failed, the linkref code will just use whatever it had in-hand below. */
3404 }
3405
3406 if (linkref) {
3407 int newvnode_flags = 0;
3408
3409 error = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr,
3410 &cnfork, &vp, &newvnode_flags);
3411 if (error == 0) {
3412 VTOC(vp)->c_flag |= C_HARDLINK;
3413 vnode_setmultipath(vp);
3414 }
3415 } else {
3416 int newvnode_flags = 0;
3417
3418 void *buf = hfs_malloc(MAXPATHLEN);
3419
3420 /* Supply hfs_getnewvnode with a component name. */
3421 struct componentname cn = {
3422 .cn_nameiop = LOOKUP,
3423 .cn_flags = ISLASTCN,
3424 .cn_pnlen = MAXPATHLEN,
3425 .cn_namelen = cndesc.cd_namelen,
3426 .cn_pnbuf = buf,
3427 .cn_nameptr = buf
3428 };
3429
3430 bcopy(cndesc.cd_nameptr, cn.cn_nameptr, cndesc.cd_namelen + 1);
3431
3432 error = hfs_getnewvnode(hfsmp, NULLVP, &cn, &cndesc, 0, &cnattr,
3433 &cnfork, &vp, &newvnode_flags);
3434
3435 if (error == 0 && (VTOC(vp)->c_flag & C_HARDLINK)) {
3436 hfs_savelinkorigin(VTOC(vp), cndesc.cd_parentcnid);
3437 }
3438
3439 hfs_free(buf, MAXPATHLEN);
3440 }
3441 cat_releasedesc(&cndesc);
3442
3443 *vpp = vp;
3444 if (vp && skiplock) {
3445 hfs_unlock(VTOC(vp));
3446 }
3447 return (error);
3448 }
3449
3450
3451 /*
3452 * Flush out all the files in a filesystem.
3453 */
3454 static int
3455 #if QUOTA
3456 hfs_flushfiles(struct mount *mp, int flags, struct proc *p)
3457 #else
3458 hfs_flushfiles(struct mount *mp, int flags, __unused struct proc *p)
3459 #endif /* QUOTA */
3460 {
3461 struct hfsmount *hfsmp;
3462 struct vnode *skipvp = NULLVP;
3463 int error;
3464 int accounted_root_usecounts;
3465 #if QUOTA
3466 int i;
3467 #endif
3468
3469 hfsmp = VFSTOHFS(mp);
3470
3471 accounted_root_usecounts = 0;
3472 #if QUOTA
3473 /*
3474 * The open quota files have an indirect reference on
3475 * the root directory vnode. We must account for this
3476 * extra reference when doing the intial vflush.
3477 */
3478 if (((unsigned int)vfs_flags(mp)) & MNT_QUOTA) {
3479 /* Find out how many quota files we have open. */
3480 for (i = 0; i < MAXQUOTAS; i++) {
3481 if (hfsmp->hfs_qfiles[i].qf_vp != NULLVP)
3482 ++accounted_root_usecounts;
3483 }
3484 }
3485 #endif /* QUOTA */
3486
3487 if (accounted_root_usecounts > 0) {
3488 /* Obtain the root vnode so we can skip over it. */
3489 skipvp = hfs_chash_getvnode(hfsmp, kHFSRootFolderID, 0, 0, 0);
3490 }
3491
3492 error = vflush(mp, skipvp, SKIPSYSTEM | SKIPSWAP | flags);
3493 if (error != 0)
3494 return(error);
3495
3496 error = vflush(mp, skipvp, SKIPSYSTEM | flags);
3497
3498 if (skipvp) {
3499 /*
3500 * See if there are additional references on the
3501 * root vp besides the ones obtained from the open
3502 * quota files and CoreStorage.
3503 */
3504 if ((error == 0) &&
3505 (vnode_isinuse(skipvp, accounted_root_usecounts))) {
3506 error = EBUSY; /* root directory is still open */
3507 }
3508 hfs_unlock(VTOC(skipvp));
3509 /* release the iocount from the hfs_chash_getvnode call above. */
3510 vnode_put(skipvp);
3511 }
3512 if (error && (flags & FORCECLOSE) == 0)
3513 return (error);
3514
3515 #if QUOTA
3516 if (((unsigned int)vfs_flags(mp)) & MNT_QUOTA) {
3517 for (i = 0; i < MAXQUOTAS; i++) {
3518 if (hfsmp->hfs_qfiles[i].qf_vp == NULLVP)
3519 continue;
3520 hfs_quotaoff(p, mp, i);
3521 }
3522 }
3523 #endif /* QUOTA */
3524
3525 if (skipvp) {
3526 error = vflush(mp, NULLVP, SKIPSYSTEM | flags);
3527 }
3528
3529 return (error);
3530 }
3531
3532 /*
3533 * Update volume encoding bitmap (HFS Plus only)
3534 *
3535 * Mark a legacy text encoding as in-use (as needed)
3536 * in the volume header of this HFS+ filesystem.
3537 */
3538 void
3539 hfs_setencodingbits(struct hfsmount *hfsmp, u_int32_t encoding)
3540 {
3541 #define kIndexMacUkrainian 48 /* MacUkrainian encoding is 152 */
3542 #define kIndexMacFarsi 49 /* MacFarsi encoding is 140 */
3543
3544 u_int32_t index;
3545
3546 switch (encoding) {
3547 case kTextEncodingMacUkrainian:
3548 index = kIndexMacUkrainian;
3549 break;
3550 case kTextEncodingMacFarsi:
3551 index = kIndexMacFarsi;
3552 break;
3553 default:
3554 index = encoding;
3555 break;
3556 }
3557
3558 /* Only mark the encoding as in-use if it wasn't already set */
3559 if (index < 64 && (hfsmp->encodingsBitmap & (u_int64_t)(1ULL << index)) == 0) {
3560 hfs_lock_mount (hfsmp);
3561 hfsmp->encodingsBitmap |= (u_int64_t)(1ULL << index);
3562 MarkVCBDirty(hfsmp);
3563 hfs_unlock_mount(hfsmp);
3564 }
3565 }
3566
3567 /*
3568 * Update volume stats
3569 *
3570 * On journal volumes this will cause a volume header flush
3571 */
3572 int
3573 hfs_volupdate(struct hfsmount *hfsmp, enum volop op, int inroot)
3574 {
3575 struct timeval tv;
3576
3577 microtime(&tv);
3578
3579 hfs_lock_mount (hfsmp);
3580
3581 MarkVCBDirty(hfsmp);
3582 hfsmp->hfs_mtime = tv.tv_sec;
3583
3584 switch (op) {
3585 case VOL_UPDATE:
3586 break;
3587 case VOL_MKDIR:
3588 if (hfsmp->hfs_dircount != 0xFFFFFFFF)
3589 ++hfsmp->hfs_dircount;
3590 if (inroot && hfsmp->vcbNmRtDirs != 0xFFFF)
3591 ++hfsmp->vcbNmRtDirs;
3592 break;
3593 case VOL_RMDIR:
3594 if (hfsmp->hfs_dircount != 0)
3595 --hfsmp->hfs_dircount;
3596 if (inroot && hfsmp->vcbNmRtDirs != 0xFFFF)
3597 --hfsmp->vcbNmRtDirs;
3598 break;
3599 case VOL_MKFILE:
3600 if (hfsmp->hfs_filecount != 0xFFFFFFFF)
3601 ++hfsmp->hfs_filecount;
3602 if (inroot && hfsmp->vcbNmFls != 0xFFFF)
3603 ++hfsmp->vcbNmFls;
3604 break;
3605 case VOL_RMFILE:
3606 if (hfsmp->hfs_filecount != 0)
3607 --hfsmp->hfs_filecount;
3608 if (inroot && hfsmp->vcbNmFls != 0xFFFF)
3609 --hfsmp->vcbNmFls;
3610 break;
3611 }
3612
3613 hfs_unlock_mount (hfsmp);
3614
3615 if (hfsmp->jnl) {
3616 hfs_flushvolumeheader(hfsmp, 0);
3617 }
3618
3619 return (0);
3620 }
3621
3622
3623 #if CONFIG_HFS_STD
3624 /* HFS Standard MDB flush */
3625 static int
3626 hfs_flushMDB(struct hfsmount *hfsmp, int waitfor, int altflush)
3627 {
3628 ExtendedVCB *vcb = HFSTOVCB(hfsmp);
3629 struct filefork *fp;
3630 HFSMasterDirectoryBlock *mdb;
3631 struct buf *bp = NULL;
3632 int retval;
3633 int sector_size;
3634 ByteCount namelen;
3635
3636 sector_size = hfsmp->hfs_logical_block_size;
3637 retval = (int)buf_bread(hfsmp->hfs_devvp, (daddr64_t)HFS_PRI_SECTOR(sector_size), sector_size, NOCRED, &bp);
3638 if (retval) {
3639 if (bp)
3640 buf_brelse(bp);
3641 return retval;
3642 }
3643
3644 hfs_lock_mount (hfsmp);
3645
3646 mdb = (HFSMasterDirectoryBlock *)(buf_dataptr(bp) + HFS_PRI_OFFSET(sector_size));
3647
3648 mdb->drCrDate = SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->hfs_itime)));
3649 mdb->drLsMod = SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->vcbLsMod)));
3650 mdb->drAtrb = SWAP_BE16 (vcb->vcbAtrb);
3651 mdb->drNmFls = SWAP_BE16 (vcb->vcbNmFls);
3652 mdb->drAllocPtr = SWAP_BE16 (vcb->nextAllocation);
3653 mdb->drClpSiz = SWAP_BE32 (vcb->vcbClpSiz);
3654 mdb->drNxtCNID = SWAP_BE32 (vcb->vcbNxtCNID);
3655 mdb->drFreeBks = SWAP_BE16 (vcb->freeBlocks);
3656
3657 namelen = strlen((char *)vcb->vcbVN);
3658 retval = utf8_to_hfs(vcb, namelen, vcb->vcbVN, mdb->drVN);
3659 /* Retry with MacRoman in case that's how it was exported. */
3660 if (retval)
3661 retval = utf8_to_mac_roman(namelen, vcb->vcbVN, mdb->drVN);
3662
3663 mdb->drVolBkUp = SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->vcbVolBkUp)));
3664 mdb->drWrCnt = SWAP_BE32 (vcb->vcbWrCnt);
3665 mdb->drNmRtDirs = SWAP_BE16 (vcb->vcbNmRtDirs);
3666 mdb->drFilCnt = SWAP_BE32 (vcb->vcbFilCnt);
3667 mdb->drDirCnt = SWAP_BE32 (vcb->vcbDirCnt);
3668
3669 bcopy(vcb->vcbFndrInfo, mdb->drFndrInfo, sizeof(mdb->drFndrInfo));
3670
3671 fp = VTOF(vcb->extentsRefNum);
3672 mdb->drXTExtRec[0].startBlock = SWAP_BE16 (fp->ff_extents[0].startBlock);
3673 mdb->drXTExtRec[0].blockCount = SWAP_BE16 (fp->ff_extents[0].blockCount);
3674 mdb->drXTExtRec[1].startBlock = SWAP_BE16 (fp->ff_extents[1].startBlock);
3675 mdb->drXTExtRec[1].blockCount = SWAP_BE16 (fp->ff_extents[1].blockCount);
3676 mdb->drXTExtRec[2].startBlock = SWAP_BE16 (fp->ff_extents[2].startBlock);
3677 mdb->drXTExtRec[2].blockCount = SWAP_BE16 (fp->ff_extents[2].blockCount);
3678 mdb->drXTFlSize = SWAP_BE32 (fp->ff_blocks * vcb->blockSize);
3679 mdb->drXTClpSiz = SWAP_BE32 (fp->ff_clumpsize);
3680 FTOC(fp)->c_flag &= ~C_MODIFIED;
3681
3682 fp = VTOF(vcb->catalogRefNum);
3683 mdb->drCTExtRec[0].startBlock = SWAP_BE16 (fp->ff_extents[0].startBlock);
3684 mdb->drCTExtRec[0].blockCount = SWAP_BE16 (fp->ff_extents[0].blockCount);
3685 mdb->drCTExtRec[1].startBlock = SWAP_BE16 (fp->ff_extents[1].startBlock);
3686 mdb->drCTExtRec[1].blockCount = SWAP_BE16 (fp->ff_extents[1].blockCount);
3687 mdb->drCTExtRec[2].startBlock = SWAP_BE16 (fp->ff_extents[2].startBlock);
3688 mdb->drCTExtRec[2].blockCount = SWAP_BE16 (fp->ff_extents[2].blockCount);
3689 mdb->drCTFlSize = SWAP_BE32 (fp->ff_blocks * vcb->blockSize);
3690 mdb->drCTClpSiz = SWAP_BE32 (fp->ff_clumpsize);
3691 FTOC(fp)->c_flag &= ~C_MODIFIED;
3692
3693 MarkVCBClean( vcb );
3694
3695 hfs_unlock_mount (hfsmp);
3696
3697 /* If requested, flush out the alternate MDB */
3698 if (altflush) {
3699 struct buf *alt_bp = NULL;
3700
3701 if (buf_meta_bread(hfsmp->hfs_devvp, hfsmp->hfs_partition_avh_sector, sector_size, NOCRED, &alt_bp) == 0) {
3702 bcopy(mdb, (char *)buf_dataptr(alt_bp) + HFS_ALT_OFFSET(sector_size), kMDBSize);
3703
3704 (void) VNOP_BWRITE(alt_bp);
3705 } else if (alt_bp)
3706 buf_brelse(alt_bp);
3707 }
3708
3709 if (waitfor != MNT_WAIT)
3710 buf_bawrite(bp);
3711 else
3712 retval = VNOP_BWRITE(bp);
3713
3714 return (retval);
3715 }
3716 #endif
3717
3718 /*
3719 * Flush any dirty in-memory mount data to the on-disk
3720 * volume header.
3721 *
3722 * Note: the on-disk volume signature is intentionally
3723 * not flushed since the on-disk "H+" and "HX" signatures
3724 * are always stored in-memory as "H+".
3725 */
3726 int
3727 hfs_flushvolumeheader(struct hfsmount *hfsmp,
3728 hfs_flush_volume_header_options_t options)
3729 {
3730 ExtendedVCB *vcb = HFSTOVCB(hfsmp);
3731 struct filefork *fp;
3732 HFSPlusVolumeHeader *volumeHeader, *altVH;
3733 int retval;
3734 struct buf *bp, *alt_bp;
3735 int i;
3736 daddr64_t priIDSector;
3737 bool critical = false;
3738 u_int16_t signature;
3739 u_int16_t hfsversion;
3740 daddr64_t avh_sector;
3741 bool altflush = ISSET(options, HFS_FVH_WRITE_ALT);
3742
3743 if (ISSET(options, HFS_FVH_FLUSH_IF_DIRTY)
3744 && !hfs_header_needs_flushing(hfsmp)) {
3745 return 0;
3746 }
3747
3748 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
3749 return(0);
3750 }
3751 #if CONFIG_HFS_STD
3752 if (hfsmp->hfs_flags & HFS_STANDARD) {
3753 return hfs_flushMDB(hfsmp, ISSET(options, HFS_FVH_WAIT) ? MNT_WAIT : 0, altflush);
3754 }
3755 #endif
3756 priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
3757 HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size));
3758
3759 if (hfs_start_transaction(hfsmp) != 0) {
3760 return EINVAL;
3761 }
3762
3763 bp = NULL;
3764 alt_bp = NULL;
3765
3766 retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
3767 HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys),
3768 hfsmp->hfs_physical_block_size, NOCRED, &bp);
3769 if (retval) {
3770 printf("hfs: err %d reading VH blk (vol=%s)\n", retval, vcb->vcbVN);
3771 goto err_exit;
3772 }
3773
3774 volumeHeader = (HFSPlusVolumeHeader *)((char *)buf_dataptr(bp) +
3775 HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size));
3776
3777 /*
3778 * Sanity check what we just read. If it's bad, try the alternate
3779 * instead.
3780 */
3781 signature = SWAP_BE16 (volumeHeader->signature);
3782 hfsversion = SWAP_BE16 (volumeHeader->version);
3783 if ((signature != kHFSPlusSigWord && signature != kHFSXSigWord) ||
3784 (hfsversion < kHFSPlusVersion) || (hfsversion > 100) ||
3785 (SWAP_BE32 (volumeHeader->blockSize) != vcb->blockSize)) {
3786 printf("hfs: corrupt VH on %s, sig 0x%04x, ver %d, blksize %d\n",
3787 vcb->vcbVN, signature, hfsversion,
3788 SWAP_BE32 (volumeHeader->blockSize));
3789 hfs_mark_inconsistent(hfsmp, HFS_INCONSISTENCY_DETECTED);
3790
3791 /* Almost always we read AVH relative to the partition size */
3792 avh_sector = hfsmp->hfs_partition_avh_sector;
3793
3794 if (hfsmp->hfs_partition_avh_sector != hfsmp->hfs_fs_avh_sector) {
3795 /*
3796 * The two altVH offsets do not match --- which means that a smaller file
3797 * system exists in a larger partition. Verify that we have the correct
3798 * alternate volume header sector as per the current parititon size.
3799 * The GPT device that we are mounted on top could have changed sizes
3800 * without us knowing.
3801 *
3802 * We're in a transaction, so it's safe to modify the partition_avh_sector
3803 * field if necessary.
3804 */
3805
3806 uint64_t sector_count;
3807
3808 /* Get underlying device block count */
3809 if ((retval = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCGETBLOCKCOUNT,
3810 (caddr_t)&sector_count, 0, vfs_context_current()))) {
3811 printf("hfs_flushVH: err %d getting block count (%s) \n", retval, vcb->vcbVN);
3812 retval = ENXIO;
3813 goto err_exit;
3814 }
3815
3816 /* Partition size was changed without our knowledge */
3817 if (sector_count != (uint64_t)hfsmp->hfs_logical_block_count) {
3818 hfsmp->hfs_partition_avh_sector = (hfsmp->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
3819 HFS_ALT_SECTOR(hfsmp->hfs_logical_block_size, sector_count);
3820 /* Note: hfs_fs_avh_sector will remain unchanged */
3821 printf ("hfs_flushVH: partition size changed, partition_avh_sector=%qu, fs_avh_sector=%qu\n",
3822 hfsmp->hfs_partition_avh_sector, hfsmp->hfs_fs_avh_sector);
3823
3824 /*
3825 * We just updated the offset for AVH relative to
3826 * the partition size, so the content of that AVH
3827 * will be invalid. But since we are also maintaining
3828 * a valid AVH relative to the file system size, we
3829 * can read it since primary VH and partition AVH
3830 * are not valid.
3831 */
3832 avh_sector = hfsmp->hfs_fs_avh_sector;
3833 }
3834 }
3835
3836 printf ("hfs: trying alternate (for %s) avh_sector=%qu\n",
3837 (avh_sector == hfsmp->hfs_fs_avh_sector) ? "file system" : "partition", avh_sector);
3838
3839 if (avh_sector) {
3840 retval = buf_meta_bread(hfsmp->hfs_devvp,
3841 HFS_PHYSBLK_ROUNDDOWN(avh_sector, hfsmp->hfs_log_per_phys),
3842 hfsmp->hfs_physical_block_size, NOCRED, &alt_bp);
3843 if (retval) {
3844 printf("hfs: err %d reading alternate VH (%s)\n", retval, vcb->vcbVN);
3845 goto err_exit;
3846 }
3847
3848 altVH = (HFSPlusVolumeHeader *)((char *)buf_dataptr(alt_bp) +
3849 HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size));
3850 signature = SWAP_BE16(altVH->signature);
3851 hfsversion = SWAP_BE16(altVH->version);
3852
3853 if ((signature != kHFSPlusSigWord && signature != kHFSXSigWord) ||
3854 (hfsversion < kHFSPlusVersion) || (kHFSPlusVersion > 100) ||
3855 (SWAP_BE32(altVH->blockSize) != vcb->blockSize)) {
3856 printf("hfs: corrupt alternate VH on %s, sig 0x%04x, ver %d, blksize %d\n",
3857 vcb->vcbVN, signature, hfsversion,
3858 SWAP_BE32(altVH->blockSize));
3859 retval = EIO;
3860 goto err_exit;
3861 }
3862
3863 /* The alternate is plausible, so use it. */
3864 bcopy(altVH, volumeHeader, kMDBSize);
3865 buf_brelse(alt_bp);
3866 alt_bp = NULL;
3867 } else {
3868 /* No alternate VH, nothing more we can do. */
3869 retval = EIO;
3870 goto err_exit;
3871 }
3872 }
3873
3874 if (hfsmp->jnl) {
3875 journal_modify_block_start(hfsmp->jnl, bp);
3876 }
3877
3878 /*
3879 * For embedded HFS+ volumes, update create date if it changed
3880 * (ie from a setattrlist call)
3881 */
3882 if ((vcb->hfsPlusIOPosOffset != 0) &&
3883 (SWAP_BE32 (volumeHeader->createDate) != vcb->localCreateDate)) {
3884 struct buf *bp2;
3885 HFSMasterDirectoryBlock *mdb;
3886
3887 retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
3888 HFS_PHYSBLK_ROUNDDOWN(HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size), hfsmp->hfs_log_per_phys),
3889 hfsmp->hfs_physical_block_size, NOCRED, &bp2);
3890 if (retval) {
3891 if (bp2)
3892 buf_brelse(bp2);
3893 retval = 0;
3894 } else {
3895 mdb = (HFSMasterDirectoryBlock *)(buf_dataptr(bp2) +
3896 HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size));
3897
3898 if ( SWAP_BE32 (mdb->drCrDate) != vcb->localCreateDate )
3899 {
3900 if (hfsmp->jnl) {
3901 journal_modify_block_start(hfsmp->jnl, bp2);
3902 }
3903
3904 mdb->drCrDate = SWAP_BE32 (vcb->localCreateDate); /* pick up the new create date */
3905
3906 if (hfsmp->jnl) {
3907 journal_modify_block_end(hfsmp->jnl, bp2, NULL, NULL);
3908 } else {
3909 (void) VNOP_BWRITE(bp2); /* write out the changes */
3910 }
3911 }
3912 else
3913 {
3914 buf_brelse(bp2); /* just release it */
3915 }
3916 }
3917 }
3918
3919 hfs_lock_mount (hfsmp);
3920
3921 /* Note: only update the lower 16 bits worth of attributes */
3922 volumeHeader->attributes = SWAP_BE32 (vcb->vcbAtrb);
3923 volumeHeader->journalInfoBlock = SWAP_BE32 (vcb->vcbJinfoBlock);
3924 if (hfsmp->jnl) {
3925 volumeHeader->lastMountedVersion = SWAP_BE32 (kHFSJMountVersion);
3926 } else {
3927 volumeHeader->lastMountedVersion = SWAP_BE32 (kHFSPlusMountVersion);
3928 }
3929 volumeHeader->createDate = SWAP_BE32 (vcb->localCreateDate); /* volume create date is in local time */
3930 volumeHeader->modifyDate = SWAP_BE32 (to_hfs_time(vcb->vcbLsMod));
3931 volumeHeader->backupDate = SWAP_BE32 (to_hfs_time(vcb->vcbVolBkUp));
3932 volumeHeader->fileCount = SWAP_BE32 (vcb->vcbFilCnt);
3933 volumeHeader->folderCount = SWAP_BE32 (vcb->vcbDirCnt);
3934 volumeHeader->totalBlocks = SWAP_BE32 (vcb->totalBlocks);
3935 volumeHeader->freeBlocks = SWAP_BE32 (vcb->freeBlocks + vcb->reclaimBlocks);
3936 volumeHeader->nextAllocation = SWAP_BE32 (vcb->nextAllocation);
3937 volumeHeader->rsrcClumpSize = SWAP_BE32 (vcb->vcbClpSiz);
3938 volumeHeader->dataClumpSize = SWAP_BE32 (vcb->vcbClpSiz);
3939 volumeHeader->nextCatalogID = SWAP_BE32 (vcb->vcbNxtCNID);
3940 volumeHeader->writeCount = SWAP_BE32 (vcb->vcbWrCnt);
3941 volumeHeader->encodingsBitmap = SWAP_BE64 (vcb->encodingsBitmap);
3942
3943 if (bcmp(vcb->vcbFndrInfo, volumeHeader->finderInfo, sizeof(volumeHeader->finderInfo)) != 0) {
3944 bcopy(vcb->vcbFndrInfo, volumeHeader->finderInfo, sizeof(volumeHeader->finderInfo));
3945 critical = true;
3946 }
3947
3948 if (!altflush && !ISSET(options, HFS_FVH_FLUSH_IF_DIRTY)) {
3949 goto done;
3950 }
3951
3952 /* Sync Extents over-flow file meta data */
3953 fp = VTOF(vcb->extentsRefNum);
3954 if (FTOC(fp)->c_flag & C_MODIFIED) {
3955 for (i = 0; i < kHFSPlusExtentDensity; i++) {
3956 volumeHeader->extentsFile.extents[i].startBlock =
3957 SWAP_BE32 (fp->ff_extents[i].startBlock);
3958 volumeHeader->extentsFile.extents[i].blockCount =
3959 SWAP_BE32 (fp->ff_extents[i].blockCount);
3960 }
3961 volumeHeader->extentsFile.logicalSize = SWAP_BE64 (fp->ff_size);
3962 volumeHeader->extentsFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
3963 volumeHeader->extentsFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize);
3964 FTOC(fp)->c_flag &= ~C_MODIFIED;
3965 altflush = true;
3966 }
3967
3968 /* Sync Catalog file meta data */
3969 fp = VTOF(vcb->catalogRefNum);
3970 if (FTOC(fp)->c_flag & C_MODIFIED) {
3971 for (i = 0; i < kHFSPlusExtentDensity; i++) {
3972 volumeHeader->catalogFile.extents[i].startBlock =
3973 SWAP_BE32 (fp->ff_extents[i].startBlock);
3974 volumeHeader->catalogFile.extents[i].blockCount =
3975 SWAP_BE32 (fp->ff_extents[i].blockCount);
3976 }
3977 volumeHeader->catalogFile.logicalSize = SWAP_BE64 (fp->ff_size);
3978 volumeHeader->catalogFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
3979 volumeHeader->catalogFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize);
3980 FTOC(fp)->c_flag &= ~C_MODIFIED;
3981 altflush = true;
3982 }
3983
3984 /* Sync Allocation file meta data */
3985 fp = VTOF(vcb->allocationsRefNum);
3986 if (FTOC(fp)->c_flag & C_MODIFIED) {
3987 for (i = 0; i < kHFSPlusExtentDensity; i++) {
3988 volumeHeader->allocationFile.extents[i].startBlock =
3989 SWAP_BE32 (fp->ff_extents[i].startBlock);
3990 volumeHeader->allocationFile.extents[i].blockCount =
3991 SWAP_BE32 (fp->ff_extents[i].blockCount);
3992 }
3993 volumeHeader->allocationFile.logicalSize = SWAP_BE64 (fp->ff_size);
3994 volumeHeader->allocationFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
3995 volumeHeader->allocationFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize);
3996 FTOC(fp)->c_flag &= ~C_MODIFIED;
3997 altflush = true;
3998 }
3999
4000 /* Sync Attribute file meta data */
4001 if (hfsmp->hfs_attribute_vp) {
4002 fp = VTOF(hfsmp->hfs_attribute_vp);
4003 for (i = 0; i < kHFSPlusExtentDensity; i++) {
4004 volumeHeader->attributesFile.extents[i].startBlock =
4005 SWAP_BE32 (fp->ff_extents[i].startBlock);
4006 volumeHeader->attributesFile.extents[i].blockCount =
4007 SWAP_BE32 (fp->ff_extents[i].blockCount);
4008 }
4009 if (ISSET(FTOC(fp)->c_flag, C_MODIFIED)) {
4010 FTOC(fp)->c_flag &= ~C_MODIFIED;
4011 altflush = true;
4012 }
4013 volumeHeader->attributesFile.logicalSize = SWAP_BE64 (fp->ff_size);
4014 volumeHeader->attributesFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
4015 volumeHeader->attributesFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize);
4016 }
4017
4018 /* Sync Startup file meta data */
4019 if (hfsmp->hfs_startup_vp) {
4020 fp = VTOF(hfsmp->hfs_startup_vp);
4021 if (FTOC(fp)->c_flag & C_MODIFIED) {
4022 for (i = 0; i < kHFSPlusExtentDensity; i++) {
4023 volumeHeader->startupFile.extents[i].startBlock =
4024 SWAP_BE32 (fp->ff_extents[i].startBlock);
4025 volumeHeader->startupFile.extents[i].blockCount =
4026 SWAP_BE32 (fp->ff_extents[i].blockCount);
4027 }
4028 volumeHeader->startupFile.logicalSize = SWAP_BE64 (fp->ff_size);
4029 volumeHeader->startupFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
4030 volumeHeader->startupFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize);
4031 FTOC(fp)->c_flag &= ~C_MODIFIED;
4032 altflush = true;
4033 }
4034 }
4035
4036 if (altflush)
4037 critical = true;
4038
4039 done:
4040 MarkVCBClean(hfsmp);
4041 hfs_unlock_mount (hfsmp);
4042
4043 /* If requested, flush out the alternate volume header */
4044 if (altflush) {
4045 /*
4046 * The two altVH offsets do not match --- which means that a smaller file
4047 * system exists in a larger partition. Verify that we have the correct
4048 * alternate volume header sector as per the current parititon size.
4049 * The GPT device that we are mounted on top could have changed sizes
4050 * without us knowning.
4051 *
4052 * We're in a transaction, so it's safe to modify the partition_avh_sector
4053 * field if necessary.
4054 */
4055 if (hfsmp->hfs_partition_avh_sector != hfsmp->hfs_fs_avh_sector) {
4056 uint64_t sector_count;
4057
4058 /* Get underlying device block count */
4059 if ((retval = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCGETBLOCKCOUNT,
4060 (caddr_t)&sector_count, 0, vfs_context_current()))) {
4061 printf("hfs_flushVH: err %d getting block count (%s) \n", retval, vcb->vcbVN);
4062 retval = ENXIO;
4063 goto err_exit;
4064 }
4065
4066 /* Partition size was changed without our knowledge */
4067 if (sector_count != (uint64_t)hfsmp->hfs_logical_block_count) {
4068 hfsmp->hfs_partition_avh_sector = (hfsmp->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
4069 HFS_ALT_SECTOR(hfsmp->hfs_logical_block_size, sector_count);
4070 /* Note: hfs_fs_avh_sector will remain unchanged */
4071 printf ("hfs_flushVH: altflush: partition size changed, partition_avh_sector=%qu, fs_avh_sector=%qu\n",
4072 hfsmp->hfs_partition_avh_sector, hfsmp->hfs_fs_avh_sector);
4073 }
4074 }
4075
4076 /*
4077 * First see if we need to write I/O to the "secondary" AVH
4078 * located at FS Size - 1024 bytes, because this one will
4079 * always go into the journal. We put this AVH into the journal
4080 * because even if the filesystem size has shrunk, this LBA should be
4081 * reachable after the partition-size modification has occurred.
4082 * The one where we need to be careful is partitionsize-1024, since the
4083 * partition size should hopefully shrink.
4084 *
4085 * Most of the time this block will not execute.
4086 */
4087 if ((hfsmp->hfs_fs_avh_sector) &&
4088 (hfsmp->hfs_partition_avh_sector != hfsmp->hfs_fs_avh_sector)) {
4089 if (buf_meta_bread(hfsmp->hfs_devvp,
4090 HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_fs_avh_sector, hfsmp->hfs_log_per_phys),
4091 hfsmp->hfs_physical_block_size, NOCRED, &alt_bp) == 0) {
4092 if (hfsmp->jnl) {
4093 journal_modify_block_start(hfsmp->jnl, alt_bp);
4094 }
4095
4096 bcopy(volumeHeader, (char *)buf_dataptr(alt_bp) +
4097 HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size),
4098 kMDBSize);
4099
4100 if (hfsmp->jnl) {
4101 journal_modify_block_end(hfsmp->jnl, alt_bp, NULL, NULL);
4102 } else {
4103 (void) VNOP_BWRITE(alt_bp);
4104 }
4105 } else if (alt_bp) {
4106 buf_brelse(alt_bp);
4107 }
4108 }
4109
4110 /*
4111 * Flush out alternate volume header located at 1024 bytes before
4112 * end of the partition as part of journal transaction. In
4113 * most cases, this will be the only alternate volume header
4114 * that we need to worry about because the file system size is
4115 * same as the partition size, therefore hfs_fs_avh_sector is
4116 * same as hfs_partition_avh_sector. This is the "priority" AVH.
4117 *
4118 * However, do not always put this I/O into the journal. If we skipped the
4119 * FS-Size AVH write above, then we will put this I/O into the journal as
4120 * that indicates the two were in sync. However, if the FS size is
4121 * not the same as the partition size, we are tracking two. We don't
4122 * put it in the journal in that case, since if the partition
4123 * size changes between uptimes, and we need to replay the journal,
4124 * this I/O could generate an EIO if during replay it is now trying
4125 * to access blocks beyond the device EOF.
4126 */
4127 if (hfsmp->hfs_partition_avh_sector) {
4128 if (buf_meta_bread(hfsmp->hfs_devvp,
4129 HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_partition_avh_sector, hfsmp->hfs_log_per_phys),
4130 hfsmp->hfs_physical_block_size, NOCRED, &alt_bp) == 0) {
4131
4132 /* only one AVH, put this I/O in the journal. */
4133 if ((hfsmp->jnl) && (hfsmp->hfs_partition_avh_sector == hfsmp->hfs_fs_avh_sector)) {
4134 journal_modify_block_start(hfsmp->jnl, alt_bp);
4135 }
4136
4137 bcopy(volumeHeader, (char *)buf_dataptr(alt_bp) +
4138 HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size),
4139 kMDBSize);
4140
4141 /* If journaled and we only have one AVH to track */
4142 if ((hfsmp->jnl) && (hfsmp->hfs_partition_avh_sector == hfsmp->hfs_fs_avh_sector)) {
4143 journal_modify_block_end (hfsmp->jnl, alt_bp, NULL, NULL);
4144 } else {
4145 /*
4146 * If we don't have a journal or there are two AVH's at the
4147 * moment, then this one doesn't go in the journal. Note that
4148 * this one may generate I/O errors, since the partition
4149 * can be resized behind our backs at any moment and this I/O
4150 * may now appear to be beyond the device EOF.
4151 */
4152 (void) VNOP_BWRITE(alt_bp);
4153 hfs_flush(hfsmp, HFS_FLUSH_CACHE);
4154 }
4155 } else if (alt_bp) {
4156 buf_brelse(alt_bp);
4157 }
4158 }
4159 }
4160
4161 /* Finish modifying the block for the primary VH */
4162 if (hfsmp->jnl) {
4163 journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL);
4164 } else {
4165 if (!ISSET(options, HFS_FVH_WAIT)) {
4166 buf_bawrite(bp);
4167 } else {
4168 retval = VNOP_BWRITE(bp);
4169 /* When critical data changes, flush the device cache */
4170 if (critical && (retval == 0)) {
4171 hfs_flush(hfsmp, HFS_FLUSH_CACHE);
4172 }
4173 }
4174 }
4175 hfs_end_transaction(hfsmp);
4176
4177 return (retval);
4178
4179 err_exit:
4180 if (alt_bp)
4181 buf_brelse(alt_bp);
4182 if (bp)
4183 buf_brelse(bp);
4184 hfs_end_transaction(hfsmp);
4185 return retval;
4186 }
4187
4188
4189 /*
4190 * Creates a UUID from a unique "name" in the HFS UUID Name space.
4191 * See version 3 UUID.
4192 */
4193 static void
4194 hfs_getvoluuid(struct hfsmount *hfsmp, uuid_t result)
4195 {
4196 MD5_CTX md5c;
4197 uint8_t rawUUID[8];
4198
4199 ((uint32_t *)rawUUID)[0] = hfsmp->vcbFndrInfo[6];
4200 ((uint32_t *)rawUUID)[1] = hfsmp->vcbFndrInfo[7];
4201
4202 MD5Init( &md5c );
4203 MD5Update( &md5c, HFS_UUID_NAMESPACE_ID, sizeof( uuid_t ) );
4204 MD5Update( &md5c, rawUUID, sizeof (rawUUID) );
4205 MD5Final( result, &md5c );
4206
4207 result[6] = 0x30 | ( result[6] & 0x0F );
4208 result[8] = 0x80 | ( result[8] & 0x3F );
4209 }
4210
4211 /*
4212 * Get file system attributes.
4213 */
4214 static int
4215 hfs_vfs_getattr(struct mount *mp, struct vfs_attr *fsap, __unused vfs_context_t context)
4216 {
4217 #define HFS_ATTR_FILE_VALIDMASK (ATTR_FILE_VALIDMASK & ~(ATTR_FILE_FILETYPE | ATTR_FILE_FORKCOUNT | ATTR_FILE_FORKLIST))
4218 #define HFS_ATTR_CMN_VOL_VALIDMASK (ATTR_CMN_VALIDMASK & ~(ATTR_CMN_ACCTIME))
4219
4220 ExtendedVCB *vcb = VFSTOVCB(mp);
4221 struct hfsmount *hfsmp = VFSTOHFS(mp);
4222
4223 int searchfs_on = 0;
4224 int exchangedata_on = 1;
4225
4226 #if CONFIG_SEARCHFS
4227 searchfs_on = 1;
4228 #endif
4229
4230 #if CONFIG_PROTECT
4231 if (cp_fs_protected(mp)) {
4232 exchangedata_on = 0;
4233 }
4234 #endif
4235
4236 VFSATTR_RETURN(fsap, f_objcount, (u_int64_t)hfsmp->vcbFilCnt + (u_int64_t)hfsmp->vcbDirCnt);
4237 VFSATTR_RETURN(fsap, f_filecount, (u_int64_t)hfsmp->vcbFilCnt);
4238 VFSATTR_RETURN(fsap, f_dircount, (u_int64_t)hfsmp->vcbDirCnt);
4239 VFSATTR_RETURN(fsap, f_maxobjcount, (u_int64_t)0xFFFFFFFF);
4240 VFSATTR_RETURN(fsap, f_iosize, (size_t)cluster_max_io_size(mp, 0));
4241 VFSATTR_RETURN(fsap, f_blocks, (u_int64_t)hfsmp->totalBlocks);
4242 VFSATTR_RETURN(fsap, f_bfree, (u_int64_t)hfs_freeblks(hfsmp, 0));
4243 VFSATTR_RETURN(fsap, f_bavail, (u_int64_t)hfs_freeblks(hfsmp, 1));
4244 VFSATTR_RETURN(fsap, f_bsize, (u_int32_t)vcb->blockSize);
4245 /* XXX needs clarification */
4246 VFSATTR_RETURN(fsap, f_bused, hfsmp->totalBlocks - hfs_freeblks(hfsmp, 1));
4247 VFSATTR_RETURN(fsap, f_files, (u_int64_t)HFS_MAX_FILES);
4248 VFSATTR_RETURN(fsap, f_ffree, (u_int64_t)hfs_free_cnids(hfsmp));
4249
4250 fsap->f_fsid.val[0] = hfsmp->hfs_raw_dev;
4251 fsap->f_fsid.val[1] = vfs_typenum(mp);
4252 VFSATTR_SET_SUPPORTED(fsap, f_fsid);
4253
4254 VFSATTR_RETURN(fsap, f_signature, vcb->vcbSigWord);
4255 VFSATTR_RETURN(fsap, f_carbon_fsid, 0);
4256
4257 if (VFSATTR_IS_ACTIVE(fsap, f_capabilities)) {
4258 vol_capabilities_attr_t *cap;
4259
4260 cap = &fsap->f_capabilities;
4261
4262 if ((hfsmp->hfs_flags & HFS_STANDARD) == 0) {
4263 /* HFS+ & variants */
4264 cap->capabilities[VOL_CAPABILITIES_FORMAT] =
4265 VOL_CAP_FMT_PERSISTENTOBJECTIDS |
4266 VOL_CAP_FMT_SYMBOLICLINKS |
4267 VOL_CAP_FMT_HARDLINKS |
4268 VOL_CAP_FMT_JOURNAL |
4269 VOL_CAP_FMT_ZERO_RUNS |
4270 (hfsmp->jnl ? VOL_CAP_FMT_JOURNAL_ACTIVE : 0) |
4271 (hfsmp->hfs_flags & HFS_CASE_SENSITIVE ? VOL_CAP_FMT_CASE_SENSITIVE : 0) |
4272 VOL_CAP_FMT_CASE_PRESERVING |
4273 VOL_CAP_FMT_FAST_STATFS |
4274 VOL_CAP_FMT_2TB_FILESIZE |
4275 VOL_CAP_FMT_HIDDEN_FILES |
4276 #if HFS_COMPRESSION
4277 VOL_CAP_FMT_DECMPFS_COMPRESSION |
4278 #endif
4279 #if CONFIG_HFS_DIRLINK
4280 VOL_CAP_FMT_DIR_HARDLINKS |
4281 #endif
4282 VOL_CAP_FMT_PATH_FROM_ID;
4283 }
4284 #if CONFIG_HFS_STD
4285 else {
4286 /* HFS standard */
4287 cap->capabilities[VOL_CAPABILITIES_FORMAT] =
4288 VOL_CAP_FMT_PERSISTENTOBJECTIDS |
4289 VOL_CAP_FMT_CASE_PRESERVING |
4290 VOL_CAP_FMT_FAST_STATFS |
4291 VOL_CAP_FMT_HIDDEN_FILES |
4292 VOL_CAP_FMT_PATH_FROM_ID;
4293 }
4294 #endif
4295
4296 /*
4297 * The capabilities word in 'cap' tell you whether or not
4298 * this particular filesystem instance has feature X enabled.
4299 */
4300
4301 cap->capabilities[VOL_CAPABILITIES_INTERFACES] =
4302 VOL_CAP_INT_ATTRLIST |
4303 VOL_CAP_INT_NFSEXPORT |
4304 VOL_CAP_INT_READDIRATTR |
4305 VOL_CAP_INT_ALLOCATE |
4306 VOL_CAP_INT_VOL_RENAME |
4307 VOL_CAP_INT_ADVLOCK |
4308 VOL_CAP_INT_FLOCK |
4309 #if VOL_CAP_INT_RENAME_EXCL
4310 VOL_CAP_INT_RENAME_EXCL |
4311 #endif
4312 #if NAMEDSTREAMS
4313 VOL_CAP_INT_EXTENDED_ATTR |
4314 VOL_CAP_INT_NAMEDSTREAMS;
4315 #else
4316 VOL_CAP_INT_EXTENDED_ATTR;
4317 #endif
4318
4319 /* HFS may conditionally support searchfs and exchangedata depending on the runtime */
4320
4321 if (searchfs_on) {
4322 cap->capabilities[VOL_CAPABILITIES_INTERFACES] |= VOL_CAP_INT_SEARCHFS;
4323 }
4324 if (exchangedata_on) {
4325 cap->capabilities[VOL_CAPABILITIES_INTERFACES] |= VOL_CAP_INT_EXCHANGEDATA;
4326 }
4327
4328 cap->capabilities[VOL_CAPABILITIES_RESERVED1] = 0;
4329 cap->capabilities[VOL_CAPABILITIES_RESERVED2] = 0;
4330
4331 cap->valid[VOL_CAPABILITIES_FORMAT] =
4332 VOL_CAP_FMT_PERSISTENTOBJECTIDS |
4333 VOL_CAP_FMT_SYMBOLICLINKS |
4334 VOL_CAP_FMT_HARDLINKS |
4335 VOL_CAP_FMT_JOURNAL |
4336 VOL_CAP_FMT_JOURNAL_ACTIVE |
4337 VOL_CAP_FMT_NO_ROOT_TIMES |
4338 VOL_CAP_FMT_SPARSE_FILES |
4339 VOL_CAP_FMT_ZERO_RUNS |
4340 VOL_CAP_FMT_CASE_SENSITIVE |
4341 VOL_CAP_FMT_CASE_PRESERVING |
4342 VOL_CAP_FMT_FAST_STATFS |
4343 VOL_CAP_FMT_2TB_FILESIZE |
4344 VOL_CAP_FMT_OPENDENYMODES |
4345 VOL_CAP_FMT_HIDDEN_FILES |
4346 VOL_CAP_FMT_PATH_FROM_ID |
4347 VOL_CAP_FMT_DECMPFS_COMPRESSION |
4348 VOL_CAP_FMT_DIR_HARDLINKS;
4349
4350 /*
4351 * Bits in the "valid" field tell you whether or not the on-disk
4352 * format supports feature X.
4353 */
4354
4355 cap->valid[VOL_CAPABILITIES_INTERFACES] =
4356 VOL_CAP_INT_ATTRLIST |
4357 VOL_CAP_INT_NFSEXPORT |
4358 VOL_CAP_INT_READDIRATTR |
4359 VOL_CAP_INT_COPYFILE |
4360 VOL_CAP_INT_ALLOCATE |
4361 VOL_CAP_INT_VOL_RENAME |
4362 VOL_CAP_INT_ADVLOCK |
4363 VOL_CAP_INT_FLOCK |
4364 VOL_CAP_INT_MANLOCK |
4365 #if VOL_CAP_INT_RENAME_EXCL
4366 VOL_CAP_INT_RENAME_EXCL |
4367 #endif
4368
4369 #if NAMEDSTREAMS
4370 VOL_CAP_INT_EXTENDED_ATTR |
4371 VOL_CAP_INT_NAMEDSTREAMS;
4372 #else
4373 VOL_CAP_INT_EXTENDED_ATTR;
4374 #endif
4375
4376 /* HFS always supports exchangedata and searchfs in the on-disk format natively */
4377 cap->valid[VOL_CAPABILITIES_INTERFACES] |= (VOL_CAP_INT_SEARCHFS | VOL_CAP_INT_EXCHANGEDATA);
4378
4379
4380 cap->valid[VOL_CAPABILITIES_RESERVED1] = 0;
4381 cap->valid[VOL_CAPABILITIES_RESERVED2] = 0;
4382 VFSATTR_SET_SUPPORTED(fsap, f_capabilities);
4383 }
4384 if (VFSATTR_IS_ACTIVE(fsap, f_attributes)) {
4385 vol_attributes_attr_t *attrp = &fsap->f_attributes;
4386
4387 attrp->validattr.commonattr = HFS_ATTR_CMN_VOL_VALIDMASK;
4388 attrp->validattr.volattr = ATTR_VOL_VALIDMASK & ~ATTR_VOL_INFO;
4389 attrp->validattr.dirattr = ATTR_DIR_VALIDMASK;
4390 attrp->validattr.fileattr = HFS_ATTR_FILE_VALIDMASK;
4391 attrp->validattr.forkattr = 0;
4392
4393 attrp->nativeattr.commonattr = HFS_ATTR_CMN_VOL_VALIDMASK;
4394 attrp->nativeattr.volattr = ATTR_VOL_VALIDMASK & ~ATTR_VOL_INFO;
4395 attrp->nativeattr.dirattr = ATTR_DIR_VALIDMASK;
4396 attrp->nativeattr.fileattr = HFS_ATTR_FILE_VALIDMASK;
4397 attrp->nativeattr.forkattr = 0;
4398 VFSATTR_SET_SUPPORTED(fsap, f_attributes);
4399 }
4400 fsap->f_create_time.tv_sec = hfsmp->hfs_itime;
4401 fsap->f_create_time.tv_nsec = 0;
4402 VFSATTR_SET_SUPPORTED(fsap, f_create_time);
4403 fsap->f_modify_time.tv_sec = hfsmp->vcbLsMod;
4404 fsap->f_modify_time.tv_nsec = 0;
4405 VFSATTR_SET_SUPPORTED(fsap, f_modify_time);
4406
4407 fsap->f_backup_time.tv_sec = hfsmp->vcbVolBkUp;
4408 fsap->f_backup_time.tv_nsec = 0;
4409 VFSATTR_SET_SUPPORTED(fsap, f_backup_time);
4410 if (VFSATTR_IS_ACTIVE(fsap, f_fssubtype)) {
4411 u_int16_t subtype = 0;
4412
4413 /*
4414 * Subtypes (flavors) for HFS
4415 * 0: Mac OS Extended
4416 * 1: Mac OS Extended (Journaled)
4417 * 2: Mac OS Extended (Case Sensitive)
4418 * 3: Mac OS Extended (Case Sensitive, Journaled)
4419 * 4 - 127: Reserved
4420 * 128: Mac OS Standard
4421 *
4422 */
4423 if ((hfsmp->hfs_flags & HFS_STANDARD) == 0) {
4424 if (hfsmp->jnl) {
4425 subtype |= HFS_SUBTYPE_JOURNALED;
4426 }
4427 if (hfsmp->hfs_flags & HFS_CASE_SENSITIVE) {
4428 subtype |= HFS_SUBTYPE_CASESENSITIVE;
4429 }
4430 }
4431 #if CONFIG_HFS_STD
4432 else {
4433 subtype = HFS_SUBTYPE_STANDARDHFS;
4434 }
4435 #endif
4436 fsap->f_fssubtype = subtype;
4437 VFSATTR_SET_SUPPORTED(fsap, f_fssubtype);
4438 }
4439
4440 if (VFSATTR_IS_ACTIVE(fsap, f_vol_name)) {
4441 strlcpy(fsap->f_vol_name, (char *) hfsmp->vcbVN, MAXPATHLEN);
4442 VFSATTR_SET_SUPPORTED(fsap, f_vol_name);
4443 }
4444 if (VFSATTR_IS_ACTIVE(fsap, f_uuid)) {
4445 hfs_getvoluuid(hfsmp, fsap->f_uuid);
4446 VFSATTR_SET_SUPPORTED(fsap, f_uuid);
4447 }
4448 return (0);
4449 }
4450
4451 /*
4452 * Perform a volume rename. Requires the FS' root vp.
4453 */
4454 static int
4455 hfs_rename_volume(struct vnode *vp, const char *name, proc_t p)
4456 {
4457 ExtendedVCB *vcb = VTOVCB(vp);
4458 struct cnode *cp = VTOC(vp);
4459 struct hfsmount *hfsmp = VTOHFS(vp);
4460 struct cat_desc to_desc;
4461 struct cat_desc todir_desc;
4462 struct cat_desc new_desc;
4463 cat_cookie_t cookie;
4464 int lockflags;
4465 int error = 0;
4466 char converted_volname[256];
4467 size_t volname_length = 0;
4468 size_t conv_volname_length = 0;
4469
4470
4471 /*
4472 * Ignore attempts to rename a volume to a zero-length name.
4473 */
4474 if (name[0] == 0)
4475 return(0);
4476
4477 bzero(&to_desc, sizeof(to_desc));
4478 bzero(&todir_desc, sizeof(todir_desc));
4479 bzero(&new_desc, sizeof(new_desc));
4480 bzero(&cookie, sizeof(cookie));
4481
4482 todir_desc.cd_parentcnid = kHFSRootParentID;
4483 todir_desc.cd_cnid = kHFSRootFolderID;
4484 todir_desc.cd_flags = CD_ISDIR;
4485
4486 to_desc.cd_nameptr = (const u_int8_t *)name;
4487 to_desc.cd_namelen = strlen(name);
4488 to_desc.cd_parentcnid = kHFSRootParentID;
4489 to_desc.cd_cnid = cp->c_cnid;
4490 to_desc.cd_flags = CD_ISDIR;
4491
4492 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)) == 0) {
4493 if ((error = hfs_start_transaction(hfsmp)) == 0) {
4494 if ((error = cat_preflight(hfsmp, CAT_RENAME, &cookie, p)) == 0) {
4495 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
4496
4497 error = cat_rename(hfsmp, &cp->c_desc, &todir_desc, &to_desc, &new_desc);
4498
4499 /*
4500 * If successful, update the name in the VCB, ensure it's terminated.
4501 */
4502 if (error == 0) {
4503 strlcpy((char *)vcb->vcbVN, name, sizeof(vcb->vcbVN));
4504
4505 volname_length = strlen ((const char*)vcb->vcbVN);
4506 /* Send the volume name down to CoreStorage if necessary */
4507 error = utf8_normalizestr(vcb->vcbVN, volname_length, (u_int8_t*)converted_volname, &conv_volname_length, 256, UTF_PRECOMPOSED);
4508 if (error == 0) {
4509 (void) VNOP_IOCTL (hfsmp->hfs_devvp, _DKIOCCSSETLVNAME, converted_volname, 0, vfs_context_current());
4510 }
4511 error = 0;
4512 }
4513
4514 hfs_systemfile_unlock(hfsmp, lockflags);
4515 cat_postflight(hfsmp, &cookie, p);
4516
4517 if (error)
4518 MarkVCBDirty(vcb);
4519 (void) hfs_flushvolumeheader(hfsmp, HFS_FVH_WAIT);
4520 }
4521 hfs_end_transaction(hfsmp);
4522 }
4523 if (!error) {
4524 /* Release old allocated name buffer */
4525 if (cp->c_desc.cd_flags & CD_HASBUF) {
4526 const char *tmp_name = (const char *)cp->c_desc.cd_nameptr;
4527
4528 cp->c_desc.cd_nameptr = 0;
4529 cp->c_desc.cd_namelen = 0;
4530 cp->c_desc.cd_flags &= ~CD_HASBUF;
4531 vfs_removename(tmp_name);
4532 }
4533 /* Update cnode's catalog descriptor */
4534 replace_desc(cp, &new_desc);
4535 vcb->volumeNameEncodingHint = new_desc.cd_encoding;
4536 cp->c_touch_chgtime = TRUE;
4537 }
4538
4539 hfs_unlock(cp);
4540 }
4541
4542 return(error);
4543 }
4544
4545 /*
4546 * Get file system attributes.
4547 */
4548 static int
4549 hfs_vfs_setattr(struct mount *mp, struct vfs_attr *fsap, vfs_context_t context)
4550 {
4551 kauth_cred_t cred = vfs_context_ucred(context);
4552 int error = 0;
4553
4554 /*
4555 * Must be superuser or owner of filesystem to change volume attributes
4556 */
4557 if (!kauth_cred_issuser(cred) && (kauth_cred_getuid(cred) != vfs_statfs(mp)->f_owner))
4558 return(EACCES);
4559
4560 if (VFSATTR_IS_ACTIVE(fsap, f_vol_name)) {
4561 vnode_t root_vp;
4562
4563 error = hfs_vfs_root(mp, &root_vp, context);
4564 if (error)
4565 goto out;
4566
4567 error = hfs_rename_volume(root_vp, fsap->f_vol_name, vfs_context_proc(context));
4568 (void) vnode_put(root_vp);
4569 if (error)
4570 goto out;
4571
4572 VFSATTR_SET_SUPPORTED(fsap, f_vol_name);
4573 }
4574
4575 out:
4576 return error;
4577 }
4578
4579 /* If a runtime corruption is detected, set the volume inconsistent
4580 * bit in the volume attributes. The volume inconsistent bit is a persistent
4581 * bit which represents that the volume is corrupt and needs repair.
4582 * The volume inconsistent bit can be set from the kernel when it detects
4583 * runtime corruption or from file system repair utilities like fsck_hfs when
4584 * a repair operation fails. The bit should be cleared only from file system
4585 * verify/repair utility like fsck_hfs when a verify/repair succeeds.
4586 */
4587 void hfs_mark_inconsistent(struct hfsmount *hfsmp,
4588 hfs_inconsistency_reason_t reason)
4589 {
4590 hfs_lock_mount (hfsmp);
4591 if ((hfsmp->vcbAtrb & kHFSVolumeInconsistentMask) == 0) {
4592 hfsmp->vcbAtrb |= kHFSVolumeInconsistentMask;
4593 MarkVCBDirty(hfsmp);
4594 }
4595 if ((hfsmp->hfs_flags & HFS_READ_ONLY)==0) {
4596 switch (reason) {
4597 case HFS_INCONSISTENCY_DETECTED:
4598 printf("hfs_mark_inconsistent: Runtime corruption detected on %s, fsck will be forced on next mount.\n",
4599 hfsmp->vcbVN);
4600 break;
4601 case HFS_ROLLBACK_FAILED:
4602 printf("hfs_mark_inconsistent: Failed to roll back; volume `%s' might be inconsistent; fsck will be forced on next mount.\n",
4603 hfsmp->vcbVN);
4604 break;
4605 case HFS_OP_INCOMPLETE:
4606 printf("hfs_mark_inconsistent: Failed to complete operation; volume `%s' might be inconsistent; fsck will be forced on next mount.\n",
4607 hfsmp->vcbVN);
4608 break;
4609 case HFS_FSCK_FORCED:
4610 printf("hfs_mark_inconsistent: fsck requested for `%s'; fsck will be forced on next mount.\n",
4611 hfsmp->vcbVN);
4612 break;
4613 }
4614 }
4615 hfs_unlock_mount (hfsmp);
4616 }
4617
4618 /* Replay the journal on the device node provided. Returns zero if
4619 * journal replay succeeded or no journal was supposed to be replayed.
4620 */
4621 static int hfs_journal_replay(vnode_t devvp, vfs_context_t context)
4622 {
4623 int retval = 0;
4624 int error = 0;
4625
4626 /* Replay allowed only on raw devices */
4627 if (!vnode_ischr(devvp) && !vnode_isblk(devvp))
4628 return EINVAL;
4629
4630 retval = hfs_mountfs(devvp, NULL, NULL, /* journal_replay_only: */ 1, context);
4631 buf_flushdirtyblks(devvp, TRUE, 0, "hfs_journal_replay");
4632
4633 /* FSYNC the devnode to be sure all data has been flushed */
4634 error = VNOP_FSYNC(devvp, MNT_WAIT, context);
4635 if (error) {
4636 retval = error;
4637 }
4638
4639 return retval;
4640 }
4641
4642
4643 /*
4644 * Cancel the syncer
4645 */
4646 static void
4647 hfs_syncer_free(struct hfsmount *hfsmp)
4648 {
4649 if (hfsmp && ISSET(hfsmp->hfs_flags, HFS_RUN_SYNCER)) {
4650 hfs_syncer_lock(hfsmp);
4651 CLR(hfsmp->hfs_flags, HFS_RUN_SYNCER);
4652 hfs_syncer_unlock(hfsmp);
4653
4654 // Wait for the syncer thread to finish
4655 if (hfsmp->hfs_syncer_thread) {
4656 hfs_syncer_wakeup(hfsmp);
4657 hfs_syncer_lock(hfsmp);
4658 while (hfsmp->hfs_syncer_thread)
4659 hfs_syncer_wait(hfsmp, NULL);
4660 hfs_syncer_unlock(hfsmp);
4661 }
4662 }
4663 }
4664
4665 static int hfs_vfs_ioctl(struct mount *mp, u_long command, caddr_t data,
4666 __unused int flags, __unused vfs_context_t context)
4667 {
4668 switch (command) {
4669 #if CONFIG_PROTECT
4670 case FIODEVICELOCKED:
4671 cp_device_locked_callback(mp, (cp_lock_state_t)data);
4672 return 0;
4673 #endif
4674 }
4675 return ENOTTY;
4676 }
4677
4678 /*
4679 * hfs vfs operations.
4680 */
4681 struct vfsops hfs_vfsops = {
4682 .vfs_mount = hfs_mount,
4683 .vfs_start = hfs_start,
4684 .vfs_unmount = hfs_unmount,
4685 .vfs_root = hfs_vfs_root,
4686 .vfs_quotactl = hfs_quotactl,
4687 .vfs_getattr = hfs_vfs_getattr,
4688 .vfs_sync = hfs_sync,
4689 .vfs_vget = hfs_vfs_vget,
4690 .vfs_fhtovp = hfs_fhtovp,
4691 .vfs_vptofh = hfs_vptofh,
4692 .vfs_init = hfs_init,
4693 .vfs_sysctl = hfs_sysctl,
4694 .vfs_setattr = hfs_vfs_setattr,
4695 .vfs_ioctl = hfs_vfs_ioctl,
4696 };