]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 1999-2014 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * Copyright (c) 1991, 1993, 1994 | |
30 | * The Regents of the University of California. All rights reserved. | |
31 | * (c) UNIX System Laboratories, Inc. | |
32 | * All or some portions of this file are derived from material licensed | |
33 | * to the University of California by American Telephone and Telegraph | |
34 | * Co. or Unix System Laboratories, Inc. and are reproduced herein with | |
35 | * the permission of UNIX System Laboratories, Inc. | |
36 | * | |
37 | * Redistribution and use in source and binary forms, with or without | |
38 | * modification, are permitted provided that the following conditions | |
39 | * are met: | |
40 | * 1. Redistributions of source code must retain the above copyright | |
41 | * notice, this list of conditions and the following disclaimer. | |
42 | * 2. Redistributions in binary form must reproduce the above copyright | |
43 | * notice, this list of conditions and the following disclaimer in the | |
44 | * documentation and/or other materials provided with the distribution. | |
45 | * 3. All advertising materials mentioning features or use of this software | |
46 | * must display the following acknowledgement: | |
47 | * This product includes software developed by the University of | |
48 | * California, Berkeley and its contributors. | |
49 | * 4. Neither the name of the University nor the names of its contributors | |
50 | * may be used to endorse or promote products derived from this software | |
51 | * without specific prior written permission. | |
52 | * | |
53 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
54 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
55 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
56 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
57 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
58 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
59 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
60 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
61 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
62 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
63 | * SUCH DAMAGE. | |
64 | * | |
65 | * hfs_vfsops.c | |
66 | * derived from @(#)ufs_vfsops.c 8.8 (Berkeley) 5/20/95 | |
67 | * | |
68 | * (c) Copyright 1997-2002 Apple Computer, Inc. All rights reserved. | |
69 | * | |
70 | * hfs_vfsops.c -- VFS layer for loadable HFS file system. | |
71 | * | |
72 | */ | |
73 | #include <sys/param.h> | |
74 | #include <sys/systm.h> | |
75 | #include <sys/kauth.h> | |
76 | ||
77 | #include <sys/ubc.h> | |
78 | #include <sys/ubc_internal.h> | |
79 | #include <sys/vnode_internal.h> | |
80 | #include <sys/mount_internal.h> | |
81 | #include <sys/sysctl.h> | |
82 | #include <sys/malloc.h> | |
83 | #include <sys/stat.h> | |
84 | #include <sys/quota.h> | |
85 | #include <sys/disk.h> | |
86 | #include <sys/paths.h> | |
87 | #include <sys/utfconv.h> | |
88 | #include <sys/kdebug.h> | |
89 | #include <sys/fslog.h> | |
90 | #include <sys/ubc.h> | |
91 | #include <sys/buf_internal.h> | |
92 | ||
93 | /* for parsing boot-args */ | |
94 | #include <pexpert/pexpert.h> | |
95 | ||
96 | ||
97 | #include <kern/locks.h> | |
98 | ||
99 | #include <vfs/vfs_journal.h> | |
100 | ||
101 | #include <miscfs/specfs/specdev.h> | |
102 | #include <hfs/hfs_mount.h> | |
103 | ||
104 | #include <libkern/crypto/md5.h> | |
105 | #include <uuid/uuid.h> | |
106 | ||
107 | #include "hfs.h" | |
108 | #include "hfs_catalog.h" | |
109 | #include "hfs_cnode.h" | |
110 | #include "hfs_dbg.h" | |
111 | #include "hfs_endian.h" | |
112 | #include "hfs_hotfiles.h" | |
113 | #include "hfs_quota.h" | |
114 | #include "hfs_btreeio.h" | |
115 | #include "hfs_kdebug.h" | |
116 | ||
117 | #include "hfscommon/headers/FileMgrInternal.h" | |
118 | #include "hfscommon/headers/BTreesInternal.h" | |
119 | ||
120 | #if CONFIG_PROTECT | |
121 | #include <sys/cprotect.h> | |
122 | #endif | |
123 | ||
124 | #define HFS_MOUNT_DEBUG 1 | |
125 | ||
126 | #if HFS_DIAGNOSTIC | |
127 | int hfs_dbg_all = 0; | |
128 | int hfs_dbg_err = 0; | |
129 | #endif | |
130 | ||
131 | /* Enable/disable debugging code for live volume resizing, defined in hfs_resize.c */ | |
132 | extern int hfs_resize_debug; | |
133 | ||
134 | lck_grp_attr_t * hfs_group_attr; | |
135 | lck_attr_t * hfs_lock_attr; | |
136 | lck_grp_t * hfs_mutex_group; | |
137 | lck_grp_t * hfs_rwlock_group; | |
138 | lck_grp_t * hfs_spinlock_group; | |
139 | ||
140 | extern struct vnodeopv_desc hfs_vnodeop_opv_desc; | |
141 | ||
142 | #if CONFIG_HFS_STD | |
143 | extern struct vnodeopv_desc hfs_std_vnodeop_opv_desc; | |
144 | static int hfs_flushMDB(struct hfsmount *hfsmp, int waitfor, int altflush); | |
145 | #endif | |
146 | ||
147 | /* not static so we can re-use in hfs_readwrite.c for build_path calls */ | |
148 | int hfs_vfs_vget(struct mount *mp, ino64_t ino, struct vnode **vpp, vfs_context_t context); | |
149 | ||
150 | static int hfs_changefs(struct mount *mp, struct hfs_mount_args *args); | |
151 | static int hfs_fhtovp(struct mount *mp, int fhlen, unsigned char *fhp, struct vnode **vpp, vfs_context_t context); | |
152 | static int hfs_flushfiles(struct mount *, int, struct proc *); | |
153 | static int hfs_getmountpoint(struct vnode *vp, struct hfsmount **hfsmpp); | |
154 | static int hfs_init(struct vfsconf *vfsp); | |
155 | static void hfs_locks_destroy(struct hfsmount *hfsmp); | |
156 | static int hfs_vfs_root(struct mount *mp, struct vnode **vpp, vfs_context_t context); | |
157 | static int hfs_quotactl(struct mount *, int, uid_t, caddr_t, vfs_context_t context); | |
158 | static int hfs_start(struct mount *mp, int flags, vfs_context_t context); | |
159 | static int hfs_vptofh(struct vnode *vp, int *fhlenp, unsigned char *fhp, vfs_context_t context); | |
160 | static int hfs_journal_replay(vnode_t devvp, vfs_context_t context); | |
161 | static void hfs_syncer_free(struct hfsmount *hfsmp); | |
162 | ||
163 | void hfs_initialize_allocator (struct hfsmount *hfsmp); | |
164 | int hfs_teardown_allocator (struct hfsmount *hfsmp); | |
165 | ||
166 | int hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t context); | |
167 | int hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, int journal_replay_only, vfs_context_t context); | |
168 | int hfs_reload(struct mount *mp); | |
169 | int hfs_statfs(struct mount *mp, register struct vfsstatfs *sbp, vfs_context_t context); | |
170 | int hfs_sync(struct mount *mp, int waitfor, vfs_context_t context); | |
171 | int hfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, | |
172 | user_addr_t newp, size_t newlen, vfs_context_t context); | |
173 | int hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context); | |
174 | ||
175 | /* | |
176 | * Called by vfs_mountroot when mounting HFS Plus as root. | |
177 | */ | |
178 | ||
179 | int | |
180 | hfs_mountroot(mount_t mp, vnode_t rvp, vfs_context_t context) | |
181 | { | |
182 | struct hfsmount *hfsmp; | |
183 | ExtendedVCB *vcb; | |
184 | struct vfsstatfs *vfsp; | |
185 | int error; | |
186 | ||
187 | if ((error = hfs_mountfs(rvp, mp, NULL, 0, context))) { | |
188 | if (HFS_MOUNT_DEBUG) { | |
189 | printf("hfs_mountroot: hfs_mountfs returned %d, rvp (%p) name (%s) \n", | |
190 | error, rvp, (rvp->v_name ? rvp->v_name : "unknown device")); | |
191 | } | |
192 | return (error); | |
193 | } | |
194 | ||
195 | /* Init hfsmp */ | |
196 | hfsmp = VFSTOHFS(mp); | |
197 | ||
198 | hfsmp->hfs_uid = UNKNOWNUID; | |
199 | hfsmp->hfs_gid = UNKNOWNGID; | |
200 | hfsmp->hfs_dir_mask = (S_IRWXU | S_IRGRP|S_IXGRP | S_IROTH|S_IXOTH); /* 0755 */ | |
201 | hfsmp->hfs_file_mask = (S_IRWXU | S_IRGRP|S_IXGRP | S_IROTH|S_IXOTH); /* 0755 */ | |
202 | ||
203 | /* Establish the free block reserve. */ | |
204 | vcb = HFSTOVCB(hfsmp); | |
205 | vcb->reserveBlocks = ((u_int64_t)vcb->totalBlocks * HFS_MINFREE) / 100; | |
206 | vcb->reserveBlocks = MIN(vcb->reserveBlocks, HFS_MAXRESERVE / vcb->blockSize); | |
207 | ||
208 | vfsp = vfs_statfs(mp); | |
209 | (void)hfs_statfs(mp, vfsp, NULL); | |
210 | ||
211 | /* Invoke ioctl that asks if the underlying device is Core Storage or not */ | |
212 | error = VNOP_IOCTL(rvp, _DKIOCCORESTORAGE, NULL, 0, context); | |
213 | if (error == 0) { | |
214 | hfsmp->hfs_flags |= HFS_CS; | |
215 | } | |
216 | return (0); | |
217 | } | |
218 | ||
219 | ||
220 | /* | |
221 | * VFS Operations. | |
222 | * | |
223 | * mount system call | |
224 | */ | |
225 | ||
226 | int | |
227 | hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t context) | |
228 | { | |
229 | struct proc *p = vfs_context_proc(context); | |
230 | struct hfsmount *hfsmp = NULL; | |
231 | struct hfs_mount_args args; | |
232 | int retval = E_NONE; | |
233 | u_int32_t cmdflags; | |
234 | ||
235 | if ((retval = copyin(data, (caddr_t)&args, sizeof(args)))) { | |
236 | if (HFS_MOUNT_DEBUG) { | |
237 | printf("hfs_mount: copyin returned %d for fs\n", retval); | |
238 | } | |
239 | return (retval); | |
240 | } | |
241 | cmdflags = (u_int32_t)vfs_flags(mp) & MNT_CMDFLAGS; | |
242 | if (cmdflags & MNT_UPDATE) { | |
243 | hfsmp = VFSTOHFS(mp); | |
244 | ||
245 | /* Reload incore data after an fsck. */ | |
246 | if (cmdflags & MNT_RELOAD) { | |
247 | if (vfs_isrdonly(mp)) { | |
248 | int error = hfs_reload(mp); | |
249 | if (error && HFS_MOUNT_DEBUG) { | |
250 | printf("hfs_mount: hfs_reload returned %d on %s \n", error, hfsmp->vcbVN); | |
251 | } | |
252 | return error; | |
253 | } | |
254 | else { | |
255 | if (HFS_MOUNT_DEBUG) { | |
256 | printf("hfs_mount: MNT_RELOAD not supported on rdwr filesystem %s\n", hfsmp->vcbVN); | |
257 | } | |
258 | return (EINVAL); | |
259 | } | |
260 | } | |
261 | ||
262 | /* Change to a read-only file system. */ | |
263 | if (((hfsmp->hfs_flags & HFS_READ_ONLY) == 0) && | |
264 | vfs_isrdonly(mp)) { | |
265 | int flags; | |
266 | ||
267 | /* Set flag to indicate that a downgrade to read-only | |
268 | * is in progress and therefore block any further | |
269 | * modifications to the file system. | |
270 | */ | |
271 | hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK); | |
272 | hfsmp->hfs_flags |= HFS_RDONLY_DOWNGRADE; | |
273 | hfsmp->hfs_downgrading_thread = current_thread(); | |
274 | hfs_unlock_global (hfsmp); | |
275 | hfs_syncer_free(hfsmp); | |
276 | ||
277 | /* use VFS_SYNC to push out System (btree) files */ | |
278 | retval = VFS_SYNC(mp, MNT_WAIT, context); | |
279 | if (retval && ((cmdflags & MNT_FORCE) == 0)) { | |
280 | hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE; | |
281 | hfsmp->hfs_downgrading_thread = NULL; | |
282 | if (HFS_MOUNT_DEBUG) { | |
283 | printf("hfs_mount: VFS_SYNC returned %d during b-tree sync of %s \n", retval, hfsmp->vcbVN); | |
284 | } | |
285 | goto out; | |
286 | } | |
287 | ||
288 | flags = WRITECLOSE; | |
289 | if (cmdflags & MNT_FORCE) | |
290 | flags |= FORCECLOSE; | |
291 | ||
292 | if ((retval = hfs_flushfiles(mp, flags, p))) { | |
293 | hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE; | |
294 | hfsmp->hfs_downgrading_thread = NULL; | |
295 | if (HFS_MOUNT_DEBUG) { | |
296 | printf("hfs_mount: hfs_flushfiles returned %d on %s \n", retval, hfsmp->vcbVN); | |
297 | } | |
298 | goto out; | |
299 | } | |
300 | ||
301 | /* mark the volume cleanly unmounted */ | |
302 | hfsmp->vcbAtrb |= kHFSVolumeUnmountedMask; | |
303 | retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0); | |
304 | hfsmp->hfs_flags |= HFS_READ_ONLY; | |
305 | ||
306 | /* | |
307 | * Close down the journal. | |
308 | * | |
309 | * NOTE: It is critically important to close down the journal | |
310 | * and have it issue all pending I/O prior to calling VNOP_FSYNC below. | |
311 | * In a journaled environment it is expected that the journal be | |
312 | * the only actor permitted to issue I/O for metadata blocks in HFS. | |
313 | * If we were to call VNOP_FSYNC prior to closing down the journal, | |
314 | * we would inadvertantly issue (and wait for) the I/O we just | |
315 | * initiated above as part of the flushvolumeheader call. | |
316 | * | |
317 | * To avoid this, we follow the same order of operations as in | |
318 | * unmount and issue the journal_close prior to calling VNOP_FSYNC. | |
319 | */ | |
320 | ||
321 | if (hfsmp->jnl) { | |
322 | hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK); | |
323 | ||
324 | journal_close(hfsmp->jnl); | |
325 | hfsmp->jnl = NULL; | |
326 | ||
327 | // Note: we explicitly don't want to shutdown | |
328 | // access to the jvp because we may need | |
329 | // it later if we go back to being read-write. | |
330 | ||
331 | hfs_unlock_global (hfsmp); | |
332 | ||
333 | vfs_clearflags(hfsmp->hfs_mp, MNT_JOURNALED); | |
334 | } | |
335 | ||
336 | /* | |
337 | * Write out any pending I/O still outstanding against the device node | |
338 | * now that the journal has been closed. | |
339 | */ | |
340 | if (retval == 0) { | |
341 | vnode_get(hfsmp->hfs_devvp); | |
342 | retval = VNOP_FSYNC(hfsmp->hfs_devvp, MNT_WAIT, context); | |
343 | vnode_put(hfsmp->hfs_devvp); | |
344 | } | |
345 | ||
346 | if (retval) { | |
347 | if (HFS_MOUNT_DEBUG) { | |
348 | printf("hfs_mount: FSYNC on devvp returned %d for fs %s\n", retval, hfsmp->vcbVN); | |
349 | } | |
350 | hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE; | |
351 | hfsmp->hfs_downgrading_thread = NULL; | |
352 | hfsmp->hfs_flags &= ~HFS_READ_ONLY; | |
353 | goto out; | |
354 | } | |
355 | ||
356 | if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) { | |
357 | if (hfsmp->hfs_summary_table) { | |
358 | int err = 0; | |
359 | /* | |
360 | * Take the bitmap lock to serialize against a concurrent bitmap scan still in progress | |
361 | */ | |
362 | if (hfsmp->hfs_allocation_vp) { | |
363 | err = hfs_lock (VTOC(hfsmp->hfs_allocation_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); | |
364 | } | |
365 | FREE (hfsmp->hfs_summary_table, M_TEMP); | |
366 | hfsmp->hfs_summary_table = NULL; | |
367 | hfsmp->hfs_flags &= ~HFS_SUMMARY_TABLE; | |
368 | if (err == 0 && hfsmp->hfs_allocation_vp){ | |
369 | hfs_unlock (VTOC(hfsmp->hfs_allocation_vp)); | |
370 | } | |
371 | } | |
372 | } | |
373 | ||
374 | hfsmp->hfs_downgrading_thread = NULL; | |
375 | } | |
376 | ||
377 | /* Change to a writable file system. */ | |
378 | if (vfs_iswriteupgrade(mp)) { | |
379 | /* | |
380 | * On inconsistent disks, do not allow read-write mount | |
381 | * unless it is the boot volume being mounted. | |
382 | */ | |
383 | if (!(vfs_flags(mp) & MNT_ROOTFS) && | |
384 | (hfsmp->vcbAtrb & kHFSVolumeInconsistentMask)) { | |
385 | if (HFS_MOUNT_DEBUG) { | |
386 | printf("hfs_mount: attempting to mount inconsistent non-root volume %s\n", (hfsmp->vcbVN)); | |
387 | } | |
388 | retval = EINVAL; | |
389 | goto out; | |
390 | } | |
391 | ||
392 | // If the journal was shut-down previously because we were | |
393 | // asked to be read-only, let's start it back up again now | |
394 | ||
395 | if ( (HFSTOVCB(hfsmp)->vcbAtrb & kHFSVolumeJournaledMask) | |
396 | && hfsmp->jnl == NULL | |
397 | && hfsmp->jvp != NULL) { | |
398 | int jflags; | |
399 | ||
400 | if (hfsmp->hfs_flags & HFS_NEED_JNL_RESET) { | |
401 | jflags = JOURNAL_RESET; | |
402 | } else { | |
403 | jflags = 0; | |
404 | } | |
405 | ||
406 | hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK); | |
407 | ||
408 | /* We provide the mount point twice here: The first is used as | |
409 | * an opaque argument to be passed back when hfs_sync_metadata | |
410 | * is called. The second is provided to the throttling code to | |
411 | * indicate which mount's device should be used when accounting | |
412 | * for metadata writes. | |
413 | */ | |
414 | hfsmp->jnl = journal_open(hfsmp->jvp, | |
415 | (hfsmp->jnl_start * HFSTOVCB(hfsmp)->blockSize) + (off_t)HFSTOVCB(hfsmp)->hfsPlusIOPosOffset, | |
416 | hfsmp->jnl_size, | |
417 | hfsmp->hfs_devvp, | |
418 | hfsmp->hfs_logical_block_size, | |
419 | jflags, | |
420 | 0, | |
421 | hfs_sync_metadata, hfsmp->hfs_mp, | |
422 | hfsmp->hfs_mp); | |
423 | ||
424 | /* | |
425 | * Set up the trim callback function so that we can add | |
426 | * recently freed extents to the free extent cache once | |
427 | * the transaction that freed them is written to the | |
428 | * journal on disk. | |
429 | */ | |
430 | if (hfsmp->jnl) | |
431 | journal_trim_set_callback(hfsmp->jnl, hfs_trim_callback, hfsmp); | |
432 | ||
433 | hfs_unlock_global (hfsmp); | |
434 | ||
435 | if (hfsmp->jnl == NULL) { | |
436 | if (HFS_MOUNT_DEBUG) { | |
437 | printf("hfs_mount: journal_open == NULL; couldn't be opened on %s \n", (hfsmp->vcbVN)); | |
438 | } | |
439 | retval = EINVAL; | |
440 | goto out; | |
441 | } else { | |
442 | hfsmp->hfs_flags &= ~HFS_NEED_JNL_RESET; | |
443 | vfs_setflags(hfsmp->hfs_mp, MNT_JOURNALED); | |
444 | } | |
445 | } | |
446 | ||
447 | /* See if we need to erase unused Catalog nodes due to <rdar://problem/6947811>. */ | |
448 | retval = hfs_erase_unused_nodes(hfsmp); | |
449 | if (retval != E_NONE) { | |
450 | if (HFS_MOUNT_DEBUG) { | |
451 | printf("hfs_mount: hfs_erase_unused_nodes returned %d for fs %s\n", retval, hfsmp->vcbVN); | |
452 | } | |
453 | goto out; | |
454 | } | |
455 | ||
456 | /* If this mount point was downgraded from read-write | |
457 | * to read-only, clear that information as we are now | |
458 | * moving back to read-write. | |
459 | */ | |
460 | hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE; | |
461 | hfsmp->hfs_downgrading_thread = NULL; | |
462 | ||
463 | /* mark the volume dirty (clear clean unmount bit) */ | |
464 | hfsmp->vcbAtrb &= ~kHFSVolumeUnmountedMask; | |
465 | ||
466 | retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0); | |
467 | if (retval != E_NONE) { | |
468 | if (HFS_MOUNT_DEBUG) { | |
469 | printf("hfs_mount: hfs_flushvolumeheader returned %d for fs %s\n", retval, hfsmp->vcbVN); | |
470 | } | |
471 | goto out; | |
472 | } | |
473 | ||
474 | /* Only clear HFS_READ_ONLY after a successful write */ | |
475 | hfsmp->hfs_flags &= ~HFS_READ_ONLY; | |
476 | ||
477 | ||
478 | if (!(hfsmp->hfs_flags & (HFS_READ_ONLY | HFS_STANDARD))) { | |
479 | /* Setup private/hidden directories for hardlinks. */ | |
480 | hfs_privatedir_init(hfsmp, FILE_HARDLINKS); | |
481 | hfs_privatedir_init(hfsmp, DIR_HARDLINKS); | |
482 | ||
483 | hfs_remove_orphans(hfsmp); | |
484 | ||
485 | /* | |
486 | * Allow hot file clustering if conditions allow. | |
487 | */ | |
488 | if ((hfsmp->hfs_flags & HFS_METADATA_ZONE) && | |
489 | ((hfsmp->hfs_mp->mnt_kern_flag & MNTK_SSD) == 0)) { | |
490 | (void) hfs_recording_init(hfsmp); | |
491 | } | |
492 | /* Force ACLs on HFS+ file systems. */ | |
493 | if (vfs_extendedsecurity(HFSTOVFS(hfsmp)) == 0) { | |
494 | vfs_setextendedsecurity(HFSTOVFS(hfsmp)); | |
495 | } | |
496 | } | |
497 | } | |
498 | ||
499 | /* Update file system parameters. */ | |
500 | retval = hfs_changefs(mp, &args); | |
501 | if (retval && HFS_MOUNT_DEBUG) { | |
502 | printf("hfs_mount: hfs_changefs returned %d for %s\n", retval, hfsmp->vcbVN); | |
503 | } | |
504 | ||
505 | } else /* not an update request */ { | |
506 | ||
507 | /* Set the mount flag to indicate that we support volfs */ | |
508 | vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_DOVOLFS)); | |
509 | ||
510 | retval = hfs_mountfs(devvp, mp, &args, 0, context); | |
511 | if (retval) { | |
512 | const char *name = vnode_getname(devvp); | |
513 | printf("hfs_mount: hfs_mountfs returned error=%d for device %s\n", retval, (name ? name : "unknown-dev")); | |
514 | if (name) { | |
515 | vnode_putname(name); | |
516 | } | |
517 | goto out; | |
518 | } | |
519 | ||
520 | /* After hfs_mountfs succeeds, we should have valid hfsmp */ | |
521 | hfsmp = VFSTOHFS(mp); | |
522 | ||
523 | /* | |
524 | * Check to see if the file system exists on CoreStorage. | |
525 | * | |
526 | * This must be done after examining the root folder's CP EA since | |
527 | * hfs_vfs_root will create a vnode (which must not occur until after | |
528 | * we've established the CP level of the FS). | |
529 | */ | |
530 | if (retval == 0) { | |
531 | errno_t err; | |
532 | /* Invoke ioctl that asks if the underlying device is Core Storage or not */ | |
533 | err = VNOP_IOCTL(devvp, _DKIOCCORESTORAGE, NULL, 0, context); | |
534 | if (err == 0) { | |
535 | hfsmp->hfs_flags |= HFS_CS; | |
536 | } | |
537 | } | |
538 | } | |
539 | ||
540 | out: | |
541 | if (retval == 0) { | |
542 | (void)hfs_statfs(mp, vfs_statfs(mp), context); | |
543 | } | |
544 | return (retval); | |
545 | } | |
546 | ||
547 | ||
548 | struct hfs_changefs_cargs { | |
549 | struct hfsmount *hfsmp; | |
550 | int namefix; | |
551 | int permfix; | |
552 | int permswitch; | |
553 | }; | |
554 | ||
555 | static int | |
556 | hfs_changefs_callback(struct vnode *vp, void *cargs) | |
557 | { | |
558 | ExtendedVCB *vcb; | |
559 | struct cnode *cp; | |
560 | struct cat_desc cndesc; | |
561 | struct cat_attr cnattr; | |
562 | struct hfs_changefs_cargs *args; | |
563 | int lockflags; | |
564 | int error; | |
565 | ||
566 | args = (struct hfs_changefs_cargs *)cargs; | |
567 | ||
568 | cp = VTOC(vp); | |
569 | vcb = HFSTOVCB(args->hfsmp); | |
570 | ||
571 | lockflags = hfs_systemfile_lock(args->hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); | |
572 | error = cat_lookup(args->hfsmp, &cp->c_desc, 0, 0, &cndesc, &cnattr, NULL, NULL); | |
573 | hfs_systemfile_unlock(args->hfsmp, lockflags); | |
574 | if (error) { | |
575 | /* | |
576 | * If we couldn't find this guy skip to the next one | |
577 | */ | |
578 | if (args->namefix) | |
579 | cache_purge(vp); | |
580 | ||
581 | return (VNODE_RETURNED); | |
582 | } | |
583 | /* | |
584 | * Get the real uid/gid and perm mask from disk. | |
585 | */ | |
586 | if (args->permswitch || args->permfix) { | |
587 | cp->c_uid = cnattr.ca_uid; | |
588 | cp->c_gid = cnattr.ca_gid; | |
589 | cp->c_mode = cnattr.ca_mode; | |
590 | } | |
591 | /* | |
592 | * If we're switching name converters then... | |
593 | * Remove the existing entry from the namei cache. | |
594 | * Update name to one based on new encoder. | |
595 | */ | |
596 | if (args->namefix) { | |
597 | cache_purge(vp); | |
598 | replace_desc(cp, &cndesc); | |
599 | ||
600 | if (cndesc.cd_cnid == kHFSRootFolderID) { | |
601 | strlcpy((char *)vcb->vcbVN, (const char *)cp->c_desc.cd_nameptr, NAME_MAX+1); | |
602 | cp->c_desc.cd_encoding = args->hfsmp->hfs_encoding; | |
603 | } | |
604 | } else { | |
605 | cat_releasedesc(&cndesc); | |
606 | } | |
607 | return (VNODE_RETURNED); | |
608 | } | |
609 | ||
610 | /* Change fs mount parameters */ | |
611 | static int | |
612 | hfs_changefs(struct mount *mp, struct hfs_mount_args *args) | |
613 | { | |
614 | int retval = 0; | |
615 | int namefix, permfix, permswitch; | |
616 | struct hfsmount *hfsmp; | |
617 | ExtendedVCB *vcb; | |
618 | struct hfs_changefs_cargs cargs; | |
619 | u_int32_t mount_flags; | |
620 | ||
621 | #if CONFIG_HFS_STD | |
622 | u_int32_t old_encoding = 0; | |
623 | hfs_to_unicode_func_t get_unicode_func; | |
624 | unicode_to_hfs_func_t get_hfsname_func; | |
625 | #endif | |
626 | ||
627 | hfsmp = VFSTOHFS(mp); | |
628 | vcb = HFSTOVCB(hfsmp); | |
629 | mount_flags = (unsigned int)vfs_flags(mp); | |
630 | ||
631 | hfsmp->hfs_flags |= HFS_IN_CHANGEFS; | |
632 | ||
633 | permswitch = (((hfsmp->hfs_flags & HFS_UNKNOWN_PERMS) && | |
634 | ((mount_flags & MNT_UNKNOWNPERMISSIONS) == 0)) || | |
635 | (((hfsmp->hfs_flags & HFS_UNKNOWN_PERMS) == 0) && | |
636 | (mount_flags & MNT_UNKNOWNPERMISSIONS))); | |
637 | ||
638 | /* The root filesystem must operate with actual permissions: */ | |
639 | if (permswitch && (mount_flags & MNT_ROOTFS) && (mount_flags & MNT_UNKNOWNPERMISSIONS)) { | |
640 | vfs_clearflags(mp, (u_int64_t)((unsigned int)MNT_UNKNOWNPERMISSIONS)); /* Just say "No". */ | |
641 | retval = EINVAL; | |
642 | goto exit; | |
643 | } | |
644 | if (mount_flags & MNT_UNKNOWNPERMISSIONS) | |
645 | hfsmp->hfs_flags |= HFS_UNKNOWN_PERMS; | |
646 | else | |
647 | hfsmp->hfs_flags &= ~HFS_UNKNOWN_PERMS; | |
648 | ||
649 | namefix = permfix = 0; | |
650 | ||
651 | /* | |
652 | * Tracking of hot files requires up-to-date access times. So if | |
653 | * access time updates are disabled, we must also disable hot files. | |
654 | */ | |
655 | if (mount_flags & MNT_NOATIME) { | |
656 | (void) hfs_recording_suspend(hfsmp); | |
657 | } | |
658 | ||
659 | /* Change the timezone (Note: this affects all hfs volumes and hfs+ volume create dates) */ | |
660 | if (args->hfs_timezone.tz_minuteswest != VNOVAL) { | |
661 | gTimeZone = args->hfs_timezone; | |
662 | } | |
663 | ||
664 | /* Change the default uid, gid and/or mask */ | |
665 | if ((args->hfs_uid != (uid_t)VNOVAL) && (hfsmp->hfs_uid != args->hfs_uid)) { | |
666 | hfsmp->hfs_uid = args->hfs_uid; | |
667 | if (vcb->vcbSigWord == kHFSPlusSigWord) | |
668 | ++permfix; | |
669 | } | |
670 | if ((args->hfs_gid != (gid_t)VNOVAL) && (hfsmp->hfs_gid != args->hfs_gid)) { | |
671 | hfsmp->hfs_gid = args->hfs_gid; | |
672 | if (vcb->vcbSigWord == kHFSPlusSigWord) | |
673 | ++permfix; | |
674 | } | |
675 | if (args->hfs_mask != (mode_t)VNOVAL) { | |
676 | if (hfsmp->hfs_dir_mask != (args->hfs_mask & ALLPERMS)) { | |
677 | hfsmp->hfs_dir_mask = args->hfs_mask & ALLPERMS; | |
678 | hfsmp->hfs_file_mask = args->hfs_mask & ALLPERMS; | |
679 | if ((args->flags != VNOVAL) && (args->flags & HFSFSMNT_NOXONFILES)) | |
680 | hfsmp->hfs_file_mask = (args->hfs_mask & DEFFILEMODE); | |
681 | if (vcb->vcbSigWord == kHFSPlusSigWord) | |
682 | ++permfix; | |
683 | } | |
684 | } | |
685 | ||
686 | #if CONFIG_HFS_STD | |
687 | /* Change the hfs encoding value (hfs only) */ | |
688 | if ((vcb->vcbSigWord == kHFSSigWord) && | |
689 | (args->hfs_encoding != (u_int32_t)VNOVAL) && | |
690 | (hfsmp->hfs_encoding != args->hfs_encoding)) { | |
691 | ||
692 | retval = hfs_getconverter(args->hfs_encoding, &get_unicode_func, &get_hfsname_func); | |
693 | if (retval) | |
694 | goto exit; | |
695 | ||
696 | /* | |
697 | * Connect the new hfs_get_unicode converter but leave | |
698 | * the old hfs_get_hfsname converter in place so that | |
699 | * we can lookup existing vnodes to get their correctly | |
700 | * encoded names. | |
701 | * | |
702 | * When we're all finished, we can then connect the new | |
703 | * hfs_get_hfsname converter and release our interest | |
704 | * in the old converters. | |
705 | */ | |
706 | hfsmp->hfs_get_unicode = get_unicode_func; | |
707 | old_encoding = hfsmp->hfs_encoding; | |
708 | hfsmp->hfs_encoding = args->hfs_encoding; | |
709 | ++namefix; | |
710 | } | |
711 | #endif | |
712 | ||
713 | if (!(namefix || permfix || permswitch)) | |
714 | goto exit; | |
715 | ||
716 | /* XXX 3762912 hack to support HFS filesystem 'owner' */ | |
717 | if (permfix) | |
718 | vfs_setowner(mp, | |
719 | hfsmp->hfs_uid == UNKNOWNUID ? KAUTH_UID_NONE : hfsmp->hfs_uid, | |
720 | hfsmp->hfs_gid == UNKNOWNGID ? KAUTH_GID_NONE : hfsmp->hfs_gid); | |
721 | ||
722 | /* | |
723 | * For each active vnode fix things that changed | |
724 | * | |
725 | * Note that we can visit a vnode more than once | |
726 | * and we can race with fsync. | |
727 | * | |
728 | * hfs_changefs_callback will be called for each vnode | |
729 | * hung off of this mount point | |
730 | * | |
731 | * The vnode will be properly referenced and unreferenced | |
732 | * around the callback | |
733 | */ | |
734 | cargs.hfsmp = hfsmp; | |
735 | cargs.namefix = namefix; | |
736 | cargs.permfix = permfix; | |
737 | cargs.permswitch = permswitch; | |
738 | ||
739 | vnode_iterate(mp, 0, hfs_changefs_callback, (void *)&cargs); | |
740 | ||
741 | #if CONFIG_HFS_STD | |
742 | /* | |
743 | * If we're switching name converters we can now | |
744 | * connect the new hfs_get_hfsname converter and | |
745 | * release our interest in the old converters. | |
746 | */ | |
747 | if (namefix) { | |
748 | /* HFS standard only */ | |
749 | hfsmp->hfs_get_hfsname = get_hfsname_func; | |
750 | vcb->volumeNameEncodingHint = args->hfs_encoding; | |
751 | (void) hfs_relconverter(old_encoding); | |
752 | } | |
753 | #endif | |
754 | ||
755 | exit: | |
756 | hfsmp->hfs_flags &= ~HFS_IN_CHANGEFS; | |
757 | return (retval); | |
758 | } | |
759 | ||
760 | ||
761 | struct hfs_reload_cargs { | |
762 | struct hfsmount *hfsmp; | |
763 | int error; | |
764 | }; | |
765 | ||
766 | static int | |
767 | hfs_reload_callback(struct vnode *vp, void *cargs) | |
768 | { | |
769 | struct cnode *cp; | |
770 | struct hfs_reload_cargs *args; | |
771 | int lockflags; | |
772 | ||
773 | args = (struct hfs_reload_cargs *)cargs; | |
774 | /* | |
775 | * flush all the buffers associated with this node | |
776 | */ | |
777 | (void) buf_invalidateblks(vp, 0, 0, 0); | |
778 | ||
779 | cp = VTOC(vp); | |
780 | /* | |
781 | * Remove any directory hints | |
782 | */ | |
783 | if (vnode_isdir(vp)) | |
784 | hfs_reldirhints(cp, 0); | |
785 | ||
786 | /* | |
787 | * Re-read cnode data for all active vnodes (non-metadata files). | |
788 | */ | |
789 | if (!vnode_issystem(vp) && !VNODE_IS_RSRC(vp) && (cp->c_fileid >= kHFSFirstUserCatalogNodeID)) { | |
790 | struct cat_fork *datafork; | |
791 | struct cat_desc desc; | |
792 | ||
793 | datafork = cp->c_datafork ? &cp->c_datafork->ff_data : NULL; | |
794 | ||
795 | /* lookup by fileID since name could have changed */ | |
796 | lockflags = hfs_systemfile_lock(args->hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); | |
797 | args->error = cat_idlookup(args->hfsmp, cp->c_fileid, 0, 0, &desc, &cp->c_attr, datafork); | |
798 | hfs_systemfile_unlock(args->hfsmp, lockflags); | |
799 | if (args->error) { | |
800 | return (VNODE_RETURNED_DONE); | |
801 | } | |
802 | ||
803 | /* update cnode's catalog descriptor */ | |
804 | (void) replace_desc(cp, &desc); | |
805 | } | |
806 | return (VNODE_RETURNED); | |
807 | } | |
808 | ||
809 | /* | |
810 | * Reload all incore data for a filesystem (used after running fsck on | |
811 | * the root filesystem and finding things to fix). The filesystem must | |
812 | * be mounted read-only. | |
813 | * | |
814 | * Things to do to update the mount: | |
815 | * invalidate all cached meta-data. | |
816 | * invalidate all inactive vnodes. | |
817 | * invalidate all cached file data. | |
818 | * re-read volume header from disk. | |
819 | * re-load meta-file info (extents, file size). | |
820 | * re-load B-tree header data. | |
821 | * re-read cnode data for all active vnodes. | |
822 | */ | |
823 | int | |
824 | hfs_reload(struct mount *mountp) | |
825 | { | |
826 | register struct vnode *devvp; | |
827 | struct buf *bp; | |
828 | int error, i; | |
829 | struct hfsmount *hfsmp; | |
830 | struct HFSPlusVolumeHeader *vhp; | |
831 | ExtendedVCB *vcb; | |
832 | struct filefork *forkp; | |
833 | struct cat_desc cndesc; | |
834 | struct hfs_reload_cargs args; | |
835 | daddr64_t priIDSector; | |
836 | ||
837 | hfsmp = VFSTOHFS(mountp); | |
838 | vcb = HFSTOVCB(hfsmp); | |
839 | ||
840 | if (vcb->vcbSigWord == kHFSSigWord) | |
841 | return (EINVAL); /* rooting from HFS is not supported! */ | |
842 | ||
843 | /* | |
844 | * Invalidate all cached meta-data. | |
845 | */ | |
846 | devvp = hfsmp->hfs_devvp; | |
847 | if (buf_invalidateblks(devvp, 0, 0, 0)) | |
848 | panic("hfs_reload: dirty1"); | |
849 | ||
850 | args.hfsmp = hfsmp; | |
851 | args.error = 0; | |
852 | /* | |
853 | * hfs_reload_callback will be called for each vnode | |
854 | * hung off of this mount point that can't be recycled... | |
855 | * vnode_iterate will recycle those that it can (the VNODE_RELOAD option) | |
856 | * the vnode will be in an 'unbusy' state (VNODE_WAIT) and | |
857 | * properly referenced and unreferenced around the callback | |
858 | */ | |
859 | vnode_iterate(mountp, VNODE_RELOAD | VNODE_WAIT, hfs_reload_callback, (void *)&args); | |
860 | ||
861 | if (args.error) | |
862 | return (args.error); | |
863 | ||
864 | /* | |
865 | * Re-read VolumeHeader from disk. | |
866 | */ | |
867 | priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) + | |
868 | HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size)); | |
869 | ||
870 | error = (int)buf_meta_bread(hfsmp->hfs_devvp, | |
871 | HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys), | |
872 | hfsmp->hfs_physical_block_size, NOCRED, &bp); | |
873 | if (error) { | |
874 | if (bp != NULL) | |
875 | buf_brelse(bp); | |
876 | return (error); | |
877 | } | |
878 | ||
879 | vhp = (HFSPlusVolumeHeader *) (buf_dataptr(bp) + HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size)); | |
880 | ||
881 | /* Do a quick sanity check */ | |
882 | if ((SWAP_BE16(vhp->signature) != kHFSPlusSigWord && | |
883 | SWAP_BE16(vhp->signature) != kHFSXSigWord) || | |
884 | (SWAP_BE16(vhp->version) != kHFSPlusVersion && | |
885 | SWAP_BE16(vhp->version) != kHFSXVersion) || | |
886 | SWAP_BE32(vhp->blockSize) != vcb->blockSize) { | |
887 | buf_brelse(bp); | |
888 | return (EIO); | |
889 | } | |
890 | ||
891 | vcb->vcbLsMod = to_bsd_time(SWAP_BE32(vhp->modifyDate)); | |
892 | vcb->vcbAtrb = SWAP_BE32 (vhp->attributes); | |
893 | vcb->vcbJinfoBlock = SWAP_BE32(vhp->journalInfoBlock); | |
894 | vcb->vcbClpSiz = SWAP_BE32 (vhp->rsrcClumpSize); | |
895 | vcb->vcbNxtCNID = SWAP_BE32 (vhp->nextCatalogID); | |
896 | vcb->vcbVolBkUp = to_bsd_time(SWAP_BE32(vhp->backupDate)); | |
897 | vcb->vcbWrCnt = SWAP_BE32 (vhp->writeCount); | |
898 | vcb->vcbFilCnt = SWAP_BE32 (vhp->fileCount); | |
899 | vcb->vcbDirCnt = SWAP_BE32 (vhp->folderCount); | |
900 | HFS_UPDATE_NEXT_ALLOCATION(vcb, SWAP_BE32 (vhp->nextAllocation)); | |
901 | vcb->totalBlocks = SWAP_BE32 (vhp->totalBlocks); | |
902 | vcb->freeBlocks = SWAP_BE32 (vhp->freeBlocks); | |
903 | vcb->encodingsBitmap = SWAP_BE64 (vhp->encodingsBitmap); | |
904 | bcopy(vhp->finderInfo, vcb->vcbFndrInfo, sizeof(vhp->finderInfo)); | |
905 | vcb->localCreateDate = SWAP_BE32 (vhp->createDate); /* hfs+ create date is in local time */ | |
906 | ||
907 | /* | |
908 | * Re-load meta-file vnode data (extent info, file size, etc). | |
909 | */ | |
910 | forkp = VTOF((struct vnode *)vcb->extentsRefNum); | |
911 | for (i = 0; i < kHFSPlusExtentDensity; i++) { | |
912 | forkp->ff_extents[i].startBlock = | |
913 | SWAP_BE32 (vhp->extentsFile.extents[i].startBlock); | |
914 | forkp->ff_extents[i].blockCount = | |
915 | SWAP_BE32 (vhp->extentsFile.extents[i].blockCount); | |
916 | } | |
917 | forkp->ff_size = SWAP_BE64 (vhp->extentsFile.logicalSize); | |
918 | forkp->ff_blocks = SWAP_BE32 (vhp->extentsFile.totalBlocks); | |
919 | forkp->ff_clumpsize = SWAP_BE32 (vhp->extentsFile.clumpSize); | |
920 | ||
921 | ||
922 | forkp = VTOF((struct vnode *)vcb->catalogRefNum); | |
923 | for (i = 0; i < kHFSPlusExtentDensity; i++) { | |
924 | forkp->ff_extents[i].startBlock = | |
925 | SWAP_BE32 (vhp->catalogFile.extents[i].startBlock); | |
926 | forkp->ff_extents[i].blockCount = | |
927 | SWAP_BE32 (vhp->catalogFile.extents[i].blockCount); | |
928 | } | |
929 | forkp->ff_size = SWAP_BE64 (vhp->catalogFile.logicalSize); | |
930 | forkp->ff_blocks = SWAP_BE32 (vhp->catalogFile.totalBlocks); | |
931 | forkp->ff_clumpsize = SWAP_BE32 (vhp->catalogFile.clumpSize); | |
932 | ||
933 | if (hfsmp->hfs_attribute_vp) { | |
934 | forkp = VTOF(hfsmp->hfs_attribute_vp); | |
935 | for (i = 0; i < kHFSPlusExtentDensity; i++) { | |
936 | forkp->ff_extents[i].startBlock = | |
937 | SWAP_BE32 (vhp->attributesFile.extents[i].startBlock); | |
938 | forkp->ff_extents[i].blockCount = | |
939 | SWAP_BE32 (vhp->attributesFile.extents[i].blockCount); | |
940 | } | |
941 | forkp->ff_size = SWAP_BE64 (vhp->attributesFile.logicalSize); | |
942 | forkp->ff_blocks = SWAP_BE32 (vhp->attributesFile.totalBlocks); | |
943 | forkp->ff_clumpsize = SWAP_BE32 (vhp->attributesFile.clumpSize); | |
944 | } | |
945 | ||
946 | forkp = VTOF((struct vnode *)vcb->allocationsRefNum); | |
947 | for (i = 0; i < kHFSPlusExtentDensity; i++) { | |
948 | forkp->ff_extents[i].startBlock = | |
949 | SWAP_BE32 (vhp->allocationFile.extents[i].startBlock); | |
950 | forkp->ff_extents[i].blockCount = | |
951 | SWAP_BE32 (vhp->allocationFile.extents[i].blockCount); | |
952 | } | |
953 | forkp->ff_size = SWAP_BE64 (vhp->allocationFile.logicalSize); | |
954 | forkp->ff_blocks = SWAP_BE32 (vhp->allocationFile.totalBlocks); | |
955 | forkp->ff_clumpsize = SWAP_BE32 (vhp->allocationFile.clumpSize); | |
956 | ||
957 | buf_brelse(bp); | |
958 | vhp = NULL; | |
959 | ||
960 | /* | |
961 | * Re-load B-tree header data | |
962 | */ | |
963 | forkp = VTOF((struct vnode *)vcb->extentsRefNum); | |
964 | if ( (error = MacToVFSError( BTReloadData((FCB*)forkp) )) ) | |
965 | return (error); | |
966 | ||
967 | forkp = VTOF((struct vnode *)vcb->catalogRefNum); | |
968 | if ( (error = MacToVFSError( BTReloadData((FCB*)forkp) )) ) | |
969 | return (error); | |
970 | ||
971 | if (hfsmp->hfs_attribute_vp) { | |
972 | forkp = VTOF(hfsmp->hfs_attribute_vp); | |
973 | if ( (error = MacToVFSError( BTReloadData((FCB*)forkp) )) ) | |
974 | return (error); | |
975 | } | |
976 | ||
977 | /* Reload the volume name */ | |
978 | if ((error = cat_idlookup(hfsmp, kHFSRootFolderID, 0, 0, &cndesc, NULL, NULL))) | |
979 | return (error); | |
980 | vcb->volumeNameEncodingHint = cndesc.cd_encoding; | |
981 | bcopy(cndesc.cd_nameptr, vcb->vcbVN, min(255, cndesc.cd_namelen)); | |
982 | cat_releasedesc(&cndesc); | |
983 | ||
984 | /* Re-establish private/hidden directories. */ | |
985 | hfs_privatedir_init(hfsmp, FILE_HARDLINKS); | |
986 | hfs_privatedir_init(hfsmp, DIR_HARDLINKS); | |
987 | ||
988 | /* In case any volume information changed to trigger a notification */ | |
989 | hfs_generate_volume_notifications(hfsmp); | |
990 | ||
991 | return (0); | |
992 | } | |
993 | ||
994 | __unused | |
995 | static uint64_t tv_to_usecs(struct timeval *tv) | |
996 | { | |
997 | return tv->tv_sec * 1000000ULL + tv->tv_usec; | |
998 | } | |
999 | ||
1000 | // Returns TRUE if b - a >= usecs | |
1001 | static boolean_t hfs_has_elapsed (const struct timeval *a, | |
1002 | const struct timeval *b, | |
1003 | uint64_t usecs) | |
1004 | { | |
1005 | struct timeval diff; | |
1006 | timersub(b, a, &diff); | |
1007 | return diff.tv_sec * 1000000ULL + diff.tv_usec >= usecs; | |
1008 | } | |
1009 | ||
1010 | static void | |
1011 | hfs_syncer(void *arg0, void *unused) | |
1012 | { | |
1013 | #pragma unused(unused) | |
1014 | ||
1015 | struct hfsmount *hfsmp = arg0; | |
1016 | struct timeval now; | |
1017 | ||
1018 | microuptime(&now); | |
1019 | ||
1020 | KERNEL_DEBUG_CONSTANT(HFSDBG_SYNCER | DBG_FUNC_START, hfsmp, | |
1021 | tv_to_usecs(&now), | |
1022 | tv_to_usecs(&hfsmp->hfs_mp->mnt_last_write_completed_timestamp), | |
1023 | hfsmp->hfs_mp->mnt_pending_write_size, 0); | |
1024 | ||
1025 | hfs_syncer_lock(hfsmp); | |
1026 | ||
1027 | if (!hfsmp->hfs_syncer) { | |
1028 | // hfs_unmount is waiting for us leave now and let it do the sync | |
1029 | hfsmp->hfs_sync_incomplete = FALSE; | |
1030 | hfs_syncer_unlock(hfsmp); | |
1031 | hfs_syncer_wakeup(hfsmp); | |
1032 | return; | |
1033 | } | |
1034 | ||
1035 | /* Check to see whether we should flush now: either the oldest is | |
1036 | > HFS_MAX_META_DELAY or HFS_META_DELAY has elapsed since the | |
1037 | request and there are no pending writes. */ | |
1038 | ||
1039 | boolean_t flush_now = FALSE; | |
1040 | ||
1041 | if (hfs_has_elapsed(&hfsmp->hfs_sync_req_oldest, &now, HFS_MAX_META_DELAY)) | |
1042 | flush_now = TRUE; | |
1043 | else if (!hfsmp->hfs_mp->mnt_pending_write_size) { | |
1044 | /* N.B. accessing mnt_last_write_completed_timestamp is not thread safe, but | |
1045 | it won't matter for what we're using it for. */ | |
1046 | if (hfs_has_elapsed(&hfsmp->hfs_mp->mnt_last_write_completed_timestamp, | |
1047 | &now, | |
1048 | HFS_META_DELAY)) { | |
1049 | flush_now = TRUE; | |
1050 | } | |
1051 | } | |
1052 | ||
1053 | if (!flush_now) { | |
1054 | thread_call_t syncer = hfsmp->hfs_syncer; | |
1055 | ||
1056 | hfs_syncer_unlock(hfsmp); | |
1057 | ||
1058 | hfs_syncer_queue(syncer); | |
1059 | ||
1060 | return; | |
1061 | } | |
1062 | ||
1063 | timerclear(&hfsmp->hfs_sync_req_oldest); | |
1064 | ||
1065 | hfs_syncer_unlock(hfsmp); | |
1066 | ||
1067 | KERNEL_DEBUG_CONSTANT(HFSDBG_SYNCER_TIMED | DBG_FUNC_START, | |
1068 | tv_to_usecs(&now), | |
1069 | tv_to_usecs(&hfsmp->hfs_mp->mnt_last_write_completed_timestamp), | |
1070 | tv_to_usecs(&hfsmp->hfs_mp->mnt_last_write_issued_timestamp), | |
1071 | hfsmp->hfs_mp->mnt_pending_write_size, 0); | |
1072 | ||
1073 | if (hfsmp->hfs_syncer_thread) { | |
1074 | printf("hfs: syncer already running!\n"); | |
1075 | return; | |
1076 | } | |
1077 | ||
1078 | hfsmp->hfs_syncer_thread = current_thread(); | |
1079 | ||
1080 | if (hfs_start_transaction(hfsmp) != 0) // so we hold off any new writes | |
1081 | goto out; | |
1082 | ||
1083 | /* | |
1084 | * We intentionally do a synchronous flush (of the journal or entire volume) here. | |
1085 | * For journaled volumes, this means we wait until the metadata blocks are written | |
1086 | * to both the journal and their final locations (in the B-trees, etc.). | |
1087 | * | |
1088 | * This tends to avoid interleaving the metadata writes with other writes (for | |
1089 | * example, user data, or to the journal when a later transaction notices that | |
1090 | * an earlier transaction has finished its async writes, and then updates the | |
1091 | * journal start in the journal header). Avoiding interleaving of writes is | |
1092 | * very good for performance on simple flash devices like SD cards, thumb drives; | |
1093 | * and on devices like floppies. Since removable devices tend to be this kind of | |
1094 | * simple device, doing a synchronous flush actually improves performance in | |
1095 | * practice. | |
1096 | * | |
1097 | * NOTE: For non-journaled volumes, the call to hfs_sync will also cause dirty | |
1098 | * user data to be written. | |
1099 | */ | |
1100 | if (hfsmp->jnl) { | |
1101 | hfs_journal_flush(hfsmp, TRUE); | |
1102 | } else { | |
1103 | hfs_sync(hfsmp->hfs_mp, MNT_WAIT, vfs_context_kernel()); | |
1104 | } | |
1105 | ||
1106 | KERNEL_DEBUG_CONSTANT(HFSDBG_SYNCER_TIMED | DBG_FUNC_END, | |
1107 | (microuptime(&now), tv_to_usecs(&now)), | |
1108 | tv_to_usecs(&hfsmp->hfs_mp->mnt_last_write_completed_timestamp), | |
1109 | tv_to_usecs(&hfsmp->hfs_mp->mnt_last_write_issued_timestamp), | |
1110 | hfsmp->hfs_mp->mnt_pending_write_size, 0); | |
1111 | ||
1112 | hfs_end_transaction(hfsmp); | |
1113 | ||
1114 | out: | |
1115 | ||
1116 | hfsmp->hfs_syncer_thread = NULL; | |
1117 | ||
1118 | hfs_syncer_lock(hfsmp); | |
1119 | ||
1120 | // If hfs_unmount lets us and we missed a sync, schedule again | |
1121 | if (hfsmp->hfs_syncer && timerisset(&hfsmp->hfs_sync_req_oldest)) { | |
1122 | thread_call_t syncer = hfsmp->hfs_syncer; | |
1123 | ||
1124 | hfs_syncer_unlock(hfsmp); | |
1125 | ||
1126 | hfs_syncer_queue(syncer); | |
1127 | } else { | |
1128 | hfsmp->hfs_sync_incomplete = FALSE; | |
1129 | hfs_syncer_unlock(hfsmp); | |
1130 | hfs_syncer_wakeup(hfsmp); | |
1131 | } | |
1132 | ||
1133 | /* BE CAREFUL WHAT YOU ADD HERE: at this point hfs_unmount is free | |
1134 | to continue and therefore hfsmp might be invalid. */ | |
1135 | ||
1136 | KERNEL_DEBUG_CONSTANT(HFSDBG_SYNCER | DBG_FUNC_END, 0, 0, 0, 0, 0); | |
1137 | } | |
1138 | ||
1139 | ||
1140 | extern int IOBSDIsMediaEjectable( const char *cdev_name ); | |
1141 | ||
1142 | /* | |
1143 | * Call into the allocator code and perform a full scan of the bitmap file. | |
1144 | * | |
1145 | * This allows us to TRIM unallocated ranges if needed, and also to build up | |
1146 | * an in-memory summary table of the state of the allocated blocks. | |
1147 | */ | |
1148 | void hfs_scan_blocks (struct hfsmount *hfsmp) { | |
1149 | /* | |
1150 | * Take the allocation file lock. Journal transactions will block until | |
1151 | * we're done here. | |
1152 | */ | |
1153 | ||
1154 | int flags = hfs_systemfile_lock(hfsmp, SFL_BITMAP, HFS_EXCLUSIVE_LOCK); | |
1155 | ||
1156 | /* | |
1157 | * We serialize here with the HFS mount lock as we're mounting. | |
1158 | * | |
1159 | * The mount can only proceed once this thread has acquired the bitmap | |
1160 | * lock, since we absolutely do not want someone else racing in and | |
1161 | * getting the bitmap lock, doing a read/write of the bitmap file, | |
1162 | * then us getting the bitmap lock. | |
1163 | * | |
1164 | * To prevent this, the mount thread takes the HFS mount mutex, starts us | |
1165 | * up, then immediately msleeps on the scan_var variable in the mount | |
1166 | * point as a condition variable. This serialization is safe since | |
1167 | * if we race in and try to proceed while they're still holding the lock, | |
1168 | * we'll block trying to acquire the global lock. Since the mount thread | |
1169 | * acquires the HFS mutex before starting this function in a new thread, | |
1170 | * any lock acquisition on our part must be linearizably AFTER the mount thread's. | |
1171 | * | |
1172 | * Note that the HFS mount mutex is always taken last, and always for only | |
1173 | * a short time. In this case, we just take it long enough to mark the | |
1174 | * scan-in-flight bit. | |
1175 | */ | |
1176 | (void) hfs_lock_mount (hfsmp); | |
1177 | hfsmp->scan_var |= HFS_ALLOCATOR_SCAN_INFLIGHT; | |
1178 | wakeup((caddr_t) &hfsmp->scan_var); | |
1179 | hfs_unlock_mount (hfsmp); | |
1180 | ||
1181 | /* Initialize the summary table */ | |
1182 | if (hfs_init_summary (hfsmp)) { | |
1183 | printf("hfs: could not initialize summary table for %s\n", hfsmp->vcbVN); | |
1184 | } | |
1185 | ||
1186 | /* | |
1187 | * ScanUnmapBlocks assumes that the bitmap lock is held when you | |
1188 | * call the function. We don't care if there were any errors issuing unmaps. | |
1189 | * | |
1190 | * It will also attempt to build up the summary table for subsequent | |
1191 | * allocator use, as configured. | |
1192 | */ | |
1193 | (void) ScanUnmapBlocks(hfsmp); | |
1194 | ||
1195 | hfsmp->scan_var |= HFS_ALLOCATOR_SCAN_COMPLETED; | |
1196 | ||
1197 | hfs_systemfile_unlock(hfsmp, flags); | |
1198 | } | |
1199 | ||
1200 | static int hfs_root_unmounted_cleanly = 0; | |
1201 | ||
1202 | SYSCTL_DECL(_vfs_generic); | |
1203 | SYSCTL_INT(_vfs_generic, OID_AUTO, root_unmounted_cleanly, CTLFLAG_RD, &hfs_root_unmounted_cleanly, 0, "Root filesystem was unmounted cleanly"); | |
1204 | ||
1205 | /* | |
1206 | * Common code for mount and mountroot | |
1207 | */ | |
1208 | int | |
1209 | hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, | |
1210 | int journal_replay_only, vfs_context_t context) | |
1211 | { | |
1212 | struct proc *p = vfs_context_proc(context); | |
1213 | int retval = E_NONE; | |
1214 | struct hfsmount *hfsmp = NULL; | |
1215 | struct buf *bp; | |
1216 | dev_t dev; | |
1217 | HFSMasterDirectoryBlock *mdbp = NULL; | |
1218 | int ronly; | |
1219 | #if QUOTA | |
1220 | int i; | |
1221 | #endif | |
1222 | int mntwrapper; | |
1223 | kauth_cred_t cred; | |
1224 | u_int64_t disksize; | |
1225 | daddr64_t log_blkcnt; | |
1226 | u_int32_t log_blksize; | |
1227 | u_int32_t phys_blksize; | |
1228 | u_int32_t minblksize; | |
1229 | u_int32_t iswritable; | |
1230 | daddr64_t mdb_offset; | |
1231 | int isvirtual = 0; | |
1232 | int isroot = 0; | |
1233 | u_int32_t device_features = 0; | |
1234 | int isssd; | |
1235 | ||
1236 | if (args == NULL) { | |
1237 | /* only hfs_mountroot passes us NULL as the 'args' argument */ | |
1238 | isroot = 1; | |
1239 | } | |
1240 | ||
1241 | ronly = vfs_isrdonly(mp); | |
1242 | dev = vnode_specrdev(devvp); | |
1243 | cred = p ? vfs_context_ucred(context) : NOCRED; | |
1244 | mntwrapper = 0; | |
1245 | ||
1246 | bp = NULL; | |
1247 | hfsmp = NULL; | |
1248 | mdbp = NULL; | |
1249 | minblksize = kHFSBlockSize; | |
1250 | ||
1251 | /* Advisory locking should be handled at the VFS layer */ | |
1252 | vfs_setlocklocal(mp); | |
1253 | ||
1254 | /* Get the logical block size (treated as physical block size everywhere) */ | |
1255 | if (VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, (caddr_t)&log_blksize, 0, context)) { | |
1256 | if (HFS_MOUNT_DEBUG) { | |
1257 | printf("hfs_mountfs: DKIOCGETBLOCKSIZE failed\n"); | |
1258 | } | |
1259 | retval = ENXIO; | |
1260 | goto error_exit; | |
1261 | } | |
1262 | if (log_blksize == 0 || log_blksize > 1024*1024*1024) { | |
1263 | printf("hfs: logical block size 0x%x looks bad. Not mounting.\n", log_blksize); | |
1264 | retval = ENXIO; | |
1265 | goto error_exit; | |
1266 | } | |
1267 | ||
1268 | /* Get the physical block size. */ | |
1269 | retval = VNOP_IOCTL(devvp, DKIOCGETPHYSICALBLOCKSIZE, (caddr_t)&phys_blksize, 0, context); | |
1270 | if (retval) { | |
1271 | if ((retval != ENOTSUP) && (retval != ENOTTY)) { | |
1272 | if (HFS_MOUNT_DEBUG) { | |
1273 | printf("hfs_mountfs: DKIOCGETPHYSICALBLOCKSIZE failed\n"); | |
1274 | } | |
1275 | retval = ENXIO; | |
1276 | goto error_exit; | |
1277 | } | |
1278 | /* If device does not support this ioctl, assume that physical | |
1279 | * block size is same as logical block size | |
1280 | */ | |
1281 | phys_blksize = log_blksize; | |
1282 | } | |
1283 | if (phys_blksize == 0 || phys_blksize > MAXBSIZE) { | |
1284 | printf("hfs: physical block size 0x%x looks bad. Not mounting.\n", phys_blksize); | |
1285 | retval = ENXIO; | |
1286 | goto error_exit; | |
1287 | } | |
1288 | ||
1289 | /* Switch to 512 byte sectors (temporarily) */ | |
1290 | if (log_blksize > 512) { | |
1291 | u_int32_t size512 = 512; | |
1292 | ||
1293 | if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&size512, FWRITE, context)) { | |
1294 | if (HFS_MOUNT_DEBUG) { | |
1295 | printf("hfs_mountfs: DKIOCSETBLOCKSIZE failed \n"); | |
1296 | } | |
1297 | retval = ENXIO; | |
1298 | goto error_exit; | |
1299 | } | |
1300 | } | |
1301 | /* Get the number of 512 byte physical blocks. */ | |
1302 | if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) { | |
1303 | /* resetting block size may fail if getting block count did */ | |
1304 | (void)VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context); | |
1305 | if (HFS_MOUNT_DEBUG) { | |
1306 | printf("hfs_mountfs: DKIOCGETBLOCKCOUNT failed\n"); | |
1307 | } | |
1308 | retval = ENXIO; | |
1309 | goto error_exit; | |
1310 | } | |
1311 | /* Compute an accurate disk size (i.e. within 512 bytes) */ | |
1312 | disksize = (u_int64_t)log_blkcnt * (u_int64_t)512; | |
1313 | ||
1314 | /* | |
1315 | * On Tiger it is not necessary to switch the device | |
1316 | * block size to be 4k if there are more than 31-bits | |
1317 | * worth of blocks but to insure compatibility with | |
1318 | * pre-Tiger systems we have to do it. | |
1319 | * | |
1320 | * If the device size is not a multiple of 4K (8 * 512), then | |
1321 | * switching the logical block size isn't going to help because | |
1322 | * we will be unable to write the alternate volume header. | |
1323 | * In this case, just leave the logical block size unchanged. | |
1324 | */ | |
1325 | if (log_blkcnt > 0x000000007fffffff && (log_blkcnt & 7) == 0) { | |
1326 | minblksize = log_blksize = 4096; | |
1327 | if (phys_blksize < log_blksize) | |
1328 | phys_blksize = log_blksize; | |
1329 | } | |
1330 | ||
1331 | /* | |
1332 | * The cluster layer is not currently prepared to deal with a logical | |
1333 | * block size larger than the system's page size. (It can handle | |
1334 | * blocks per page, but not multiple pages per block.) So limit the | |
1335 | * logical block size to the page size. | |
1336 | */ | |
1337 | if (log_blksize > PAGE_SIZE) { | |
1338 | log_blksize = PAGE_SIZE; | |
1339 | } | |
1340 | ||
1341 | /* Now switch to our preferred physical block size. */ | |
1342 | if (log_blksize > 512) { | |
1343 | if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) { | |
1344 | if (HFS_MOUNT_DEBUG) { | |
1345 | printf("hfs_mountfs: DKIOCSETBLOCKSIZE (2) failed\n"); | |
1346 | } | |
1347 | retval = ENXIO; | |
1348 | goto error_exit; | |
1349 | } | |
1350 | /* Get the count of physical blocks. */ | |
1351 | if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) { | |
1352 | if (HFS_MOUNT_DEBUG) { | |
1353 | printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (2) failed\n"); | |
1354 | } | |
1355 | retval = ENXIO; | |
1356 | goto error_exit; | |
1357 | } | |
1358 | } | |
1359 | /* | |
1360 | * At this point: | |
1361 | * minblksize is the minimum physical block size | |
1362 | * log_blksize has our preferred physical block size | |
1363 | * log_blkcnt has the total number of physical blocks | |
1364 | */ | |
1365 | ||
1366 | mdb_offset = (daddr64_t)HFS_PRI_SECTOR(log_blksize); | |
1367 | if ((retval = (int)buf_meta_bread(devvp, | |
1368 | HFS_PHYSBLK_ROUNDDOWN(mdb_offset, (phys_blksize/log_blksize)), | |
1369 | phys_blksize, cred, &bp))) { | |
1370 | if (HFS_MOUNT_DEBUG) { | |
1371 | printf("hfs_mountfs: buf_meta_bread failed with %d\n", retval); | |
1372 | } | |
1373 | goto error_exit; | |
1374 | } | |
1375 | MALLOC(mdbp, HFSMasterDirectoryBlock *, kMDBSize, M_TEMP, M_WAITOK); | |
1376 | if (mdbp == NULL) { | |
1377 | retval = ENOMEM; | |
1378 | if (HFS_MOUNT_DEBUG) { | |
1379 | printf("hfs_mountfs: MALLOC failed\n"); | |
1380 | } | |
1381 | goto error_exit; | |
1382 | } | |
1383 | bcopy((char *)buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize), mdbp, kMDBSize); | |
1384 | buf_brelse(bp); | |
1385 | bp = NULL; | |
1386 | ||
1387 | MALLOC(hfsmp, struct hfsmount *, sizeof(struct hfsmount), M_HFSMNT, M_WAITOK); | |
1388 | if (hfsmp == NULL) { | |
1389 | if (HFS_MOUNT_DEBUG) { | |
1390 | printf("hfs_mountfs: MALLOC (2) failed\n"); | |
1391 | } | |
1392 | retval = ENOMEM; | |
1393 | goto error_exit; | |
1394 | } | |
1395 | bzero(hfsmp, sizeof(struct hfsmount)); | |
1396 | ||
1397 | hfs_chashinit_finish(hfsmp); | |
1398 | ||
1399 | /* Init the ID lookup hashtable */ | |
1400 | hfs_idhash_init (hfsmp); | |
1401 | ||
1402 | /* | |
1403 | * See if the disk supports unmap (trim). | |
1404 | * | |
1405 | * NOTE: vfs_init_io_attributes has not been called yet, so we can't use the io_flags field | |
1406 | * returned by vfs_ioattr. We need to call VNOP_IOCTL ourselves. | |
1407 | */ | |
1408 | if (VNOP_IOCTL(devvp, DKIOCGETFEATURES, (caddr_t)&device_features, 0, context) == 0) { | |
1409 | if (device_features & DK_FEATURE_UNMAP) { | |
1410 | hfsmp->hfs_flags |= HFS_UNMAP; | |
1411 | } | |
1412 | } | |
1413 | ||
1414 | /* | |
1415 | * See if the disk is a solid state device, too. We need this to decide what to do about | |
1416 | * hotfiles. | |
1417 | */ | |
1418 | if (VNOP_IOCTL(devvp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, context) == 0) { | |
1419 | if (isssd) { | |
1420 | hfsmp->hfs_flags |= HFS_SSD; | |
1421 | } | |
1422 | } | |
1423 | ||
1424 | ||
1425 | /* | |
1426 | * Init the volume information structure | |
1427 | */ | |
1428 | ||
1429 | lck_mtx_init(&hfsmp->hfs_mutex, hfs_mutex_group, hfs_lock_attr); | |
1430 | lck_mtx_init(&hfsmp->hfc_mutex, hfs_mutex_group, hfs_lock_attr); | |
1431 | lck_rw_init(&hfsmp->hfs_global_lock, hfs_rwlock_group, hfs_lock_attr); | |
1432 | lck_spin_init(&hfsmp->vcbFreeExtLock, hfs_spinlock_group, hfs_lock_attr); | |
1433 | ||
1434 | vfs_setfsprivate(mp, hfsmp); | |
1435 | hfsmp->hfs_mp = mp; /* Make VFSTOHFS work */ | |
1436 | hfsmp->hfs_raw_dev = vnode_specrdev(devvp); | |
1437 | hfsmp->hfs_devvp = devvp; | |
1438 | vnode_ref(devvp); /* Hold a ref on the device, dropped when hfsmp is freed. */ | |
1439 | hfsmp->hfs_logical_block_size = log_blksize; | |
1440 | hfsmp->hfs_logical_block_count = log_blkcnt; | |
1441 | hfsmp->hfs_logical_bytes = (uint64_t) log_blksize * (uint64_t) log_blkcnt; | |
1442 | hfsmp->hfs_physical_block_size = phys_blksize; | |
1443 | hfsmp->hfs_log_per_phys = (phys_blksize / log_blksize); | |
1444 | hfsmp->hfs_flags |= HFS_WRITEABLE_MEDIA; | |
1445 | if (ronly) | |
1446 | hfsmp->hfs_flags |= HFS_READ_ONLY; | |
1447 | if (((unsigned int)vfs_flags(mp)) & MNT_UNKNOWNPERMISSIONS) | |
1448 | hfsmp->hfs_flags |= HFS_UNKNOWN_PERMS; | |
1449 | ||
1450 | #if QUOTA | |
1451 | for (i = 0; i < MAXQUOTAS; i++) | |
1452 | dqfileinit(&hfsmp->hfs_qfiles[i]); | |
1453 | #endif | |
1454 | ||
1455 | if (args) { | |
1456 | hfsmp->hfs_uid = (args->hfs_uid == (uid_t)VNOVAL) ? UNKNOWNUID : args->hfs_uid; | |
1457 | if (hfsmp->hfs_uid == 0xfffffffd) hfsmp->hfs_uid = UNKNOWNUID; | |
1458 | hfsmp->hfs_gid = (args->hfs_gid == (gid_t)VNOVAL) ? UNKNOWNGID : args->hfs_gid; | |
1459 | if (hfsmp->hfs_gid == 0xfffffffd) hfsmp->hfs_gid = UNKNOWNGID; | |
1460 | vfs_setowner(mp, hfsmp->hfs_uid, hfsmp->hfs_gid); /* tell the VFS */ | |
1461 | if (args->hfs_mask != (mode_t)VNOVAL) { | |
1462 | hfsmp->hfs_dir_mask = args->hfs_mask & ALLPERMS; | |
1463 | if (args->flags & HFSFSMNT_NOXONFILES) { | |
1464 | hfsmp->hfs_file_mask = (args->hfs_mask & DEFFILEMODE); | |
1465 | } else { | |
1466 | hfsmp->hfs_file_mask = args->hfs_mask & ALLPERMS; | |
1467 | } | |
1468 | } else { | |
1469 | hfsmp->hfs_dir_mask = UNKNOWNPERMISSIONS & ALLPERMS; /* 0777: rwx---rwx */ | |
1470 | hfsmp->hfs_file_mask = UNKNOWNPERMISSIONS & DEFFILEMODE; /* 0666: no --x by default? */ | |
1471 | } | |
1472 | if ((args->flags != (int)VNOVAL) && (args->flags & HFSFSMNT_WRAPPER)) | |
1473 | mntwrapper = 1; | |
1474 | } else { | |
1475 | /* Even w/o explicit mount arguments, MNT_UNKNOWNPERMISSIONS requires setting up uid, gid, and mask: */ | |
1476 | if (((unsigned int)vfs_flags(mp)) & MNT_UNKNOWNPERMISSIONS) { | |
1477 | hfsmp->hfs_uid = UNKNOWNUID; | |
1478 | hfsmp->hfs_gid = UNKNOWNGID; | |
1479 | vfs_setowner(mp, hfsmp->hfs_uid, hfsmp->hfs_gid); /* tell the VFS */ | |
1480 | hfsmp->hfs_dir_mask = UNKNOWNPERMISSIONS & ALLPERMS; /* 0777: rwx---rwx */ | |
1481 | hfsmp->hfs_file_mask = UNKNOWNPERMISSIONS & DEFFILEMODE; /* 0666: no --x by default? */ | |
1482 | } | |
1483 | } | |
1484 | ||
1485 | /* Find out if disk media is writable. */ | |
1486 | if (VNOP_IOCTL(devvp, DKIOCISWRITABLE, (caddr_t)&iswritable, 0, context) == 0) { | |
1487 | if (iswritable) | |
1488 | hfsmp->hfs_flags |= HFS_WRITEABLE_MEDIA; | |
1489 | else | |
1490 | hfsmp->hfs_flags &= ~HFS_WRITEABLE_MEDIA; | |
1491 | } | |
1492 | ||
1493 | // record the current time at which we're mounting this volume | |
1494 | struct timeval tv; | |
1495 | microtime(&tv); | |
1496 | hfsmp->hfs_mount_time = tv.tv_sec; | |
1497 | ||
1498 | /* Mount a standard HFS disk */ | |
1499 | if ((SWAP_BE16(mdbp->drSigWord) == kHFSSigWord) && | |
1500 | (mntwrapper || (SWAP_BE16(mdbp->drEmbedSigWord) != kHFSPlusSigWord))) { | |
1501 | #if CONFIG_HFS_STD | |
1502 | /* On 10.6 and beyond, non read-only mounts for HFS standard vols get rejected */ | |
1503 | if (vfs_isrdwr(mp)) { | |
1504 | retval = EROFS; | |
1505 | goto error_exit; | |
1506 | } | |
1507 | ||
1508 | printf("hfs_mountfs: Mounting HFS Standard volumes was deprecated in Mac OS 10.7 \n"); | |
1509 | ||
1510 | /* Treat it as if it's read-only and not writeable */ | |
1511 | hfsmp->hfs_flags |= HFS_READ_ONLY; | |
1512 | hfsmp->hfs_flags &= ~HFS_WRITEABLE_MEDIA; | |
1513 | ||
1514 | /* If only journal replay is requested, exit immediately */ | |
1515 | if (journal_replay_only) { | |
1516 | retval = 0; | |
1517 | goto error_exit; | |
1518 | } | |
1519 | ||
1520 | if ((vfs_flags(mp) & MNT_ROOTFS)) { | |
1521 | retval = EINVAL; /* Cannot root from HFS standard disks */ | |
1522 | goto error_exit; | |
1523 | } | |
1524 | /* HFS disks can only use 512 byte physical blocks */ | |
1525 | if (log_blksize > kHFSBlockSize) { | |
1526 | log_blksize = kHFSBlockSize; | |
1527 | if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) { | |
1528 | retval = ENXIO; | |
1529 | goto error_exit; | |
1530 | } | |
1531 | if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) { | |
1532 | retval = ENXIO; | |
1533 | goto error_exit; | |
1534 | } | |
1535 | hfsmp->hfs_logical_block_size = log_blksize; | |
1536 | hfsmp->hfs_logical_block_count = log_blkcnt; | |
1537 | hfsmp->hfs_logical_bytes = (uint64_t) log_blksize * (uint64_t) log_blkcnt; | |
1538 | hfsmp->hfs_physical_block_size = log_blksize; | |
1539 | hfsmp->hfs_log_per_phys = 1; | |
1540 | } | |
1541 | if (args) { | |
1542 | hfsmp->hfs_encoding = args->hfs_encoding; | |
1543 | HFSTOVCB(hfsmp)->volumeNameEncodingHint = args->hfs_encoding; | |
1544 | ||
1545 | /* establish the timezone */ | |
1546 | gTimeZone = args->hfs_timezone; | |
1547 | } | |
1548 | ||
1549 | retval = hfs_getconverter(hfsmp->hfs_encoding, &hfsmp->hfs_get_unicode, | |
1550 | &hfsmp->hfs_get_hfsname); | |
1551 | if (retval) | |
1552 | goto error_exit; | |
1553 | ||
1554 | retval = hfs_MountHFSVolume(hfsmp, mdbp, p); | |
1555 | if (retval) | |
1556 | (void) hfs_relconverter(hfsmp->hfs_encoding); | |
1557 | #else | |
1558 | /* On platforms where HFS Standard is not supported, deny the mount altogether */ | |
1559 | retval = EINVAL; | |
1560 | goto error_exit; | |
1561 | #endif | |
1562 | ||
1563 | } | |
1564 | else { /* Mount an HFS Plus disk */ | |
1565 | HFSPlusVolumeHeader *vhp; | |
1566 | off_t embeddedOffset; | |
1567 | int jnl_disable = 0; | |
1568 | ||
1569 | /* Get the embedded Volume Header */ | |
1570 | if (SWAP_BE16(mdbp->drEmbedSigWord) == kHFSPlusSigWord) { | |
1571 | embeddedOffset = SWAP_BE16(mdbp->drAlBlSt) * kHFSBlockSize; | |
1572 | embeddedOffset += (u_int64_t)SWAP_BE16(mdbp->drEmbedExtent.startBlock) * | |
1573 | (u_int64_t)SWAP_BE32(mdbp->drAlBlkSiz); | |
1574 | ||
1575 | /* | |
1576 | * If the embedded volume doesn't start on a block | |
1577 | * boundary, then switch the device to a 512-byte | |
1578 | * block size so everything will line up on a block | |
1579 | * boundary. | |
1580 | */ | |
1581 | if ((embeddedOffset % log_blksize) != 0) { | |
1582 | printf("hfs_mountfs: embedded volume offset not" | |
1583 | " a multiple of physical block size (%d);" | |
1584 | " switching to 512\n", log_blksize); | |
1585 | log_blksize = 512; | |
1586 | if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, | |
1587 | (caddr_t)&log_blksize, FWRITE, context)) { | |
1588 | ||
1589 | if (HFS_MOUNT_DEBUG) { | |
1590 | printf("hfs_mountfs: DKIOCSETBLOCKSIZE (3) failed\n"); | |
1591 | } | |
1592 | retval = ENXIO; | |
1593 | goto error_exit; | |
1594 | } | |
1595 | if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, | |
1596 | (caddr_t)&log_blkcnt, 0, context)) { | |
1597 | if (HFS_MOUNT_DEBUG) { | |
1598 | printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (3) failed\n"); | |
1599 | } | |
1600 | retval = ENXIO; | |
1601 | goto error_exit; | |
1602 | } | |
1603 | /* Note: relative block count adjustment */ | |
1604 | hfsmp->hfs_logical_block_count *= | |
1605 | hfsmp->hfs_logical_block_size / log_blksize; | |
1606 | ||
1607 | /* Update logical /physical block size */ | |
1608 | hfsmp->hfs_logical_block_size = log_blksize; | |
1609 | hfsmp->hfs_physical_block_size = log_blksize; | |
1610 | ||
1611 | phys_blksize = log_blksize; | |
1612 | hfsmp->hfs_log_per_phys = 1; | |
1613 | } | |
1614 | ||
1615 | disksize = (u_int64_t)SWAP_BE16(mdbp->drEmbedExtent.blockCount) * | |
1616 | (u_int64_t)SWAP_BE32(mdbp->drAlBlkSiz); | |
1617 | ||
1618 | hfsmp->hfs_logical_block_count = disksize / log_blksize; | |
1619 | ||
1620 | hfsmp->hfs_logical_bytes = (uint64_t) hfsmp->hfs_logical_block_count * (uint64_t) hfsmp->hfs_logical_block_size; | |
1621 | ||
1622 | mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize)); | |
1623 | ||
1624 | if (bp) { | |
1625 | buf_markinvalid(bp); | |
1626 | buf_brelse(bp); | |
1627 | bp = NULL; | |
1628 | } | |
1629 | retval = (int)buf_meta_bread(devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys), | |
1630 | phys_blksize, cred, &bp); | |
1631 | if (retval) { | |
1632 | if (HFS_MOUNT_DEBUG) { | |
1633 | printf("hfs_mountfs: buf_meta_bread (2) failed with %d\n", retval); | |
1634 | } | |
1635 | goto error_exit; | |
1636 | } | |
1637 | bcopy((char *)buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize), mdbp, 512); | |
1638 | buf_brelse(bp); | |
1639 | bp = NULL; | |
1640 | vhp = (HFSPlusVolumeHeader*) mdbp; | |
1641 | ||
1642 | } | |
1643 | else { /* pure HFS+ */ | |
1644 | embeddedOffset = 0; | |
1645 | vhp = (HFSPlusVolumeHeader*) mdbp; | |
1646 | } | |
1647 | ||
1648 | retval = hfs_ValidateHFSPlusVolumeHeader(hfsmp, vhp); | |
1649 | if (retval) | |
1650 | goto error_exit; | |
1651 | ||
1652 | /* | |
1653 | * If allocation block size is less than the physical block size, | |
1654 | * invalidate the buffer read in using native physical block size | |
1655 | * to ensure data consistency. | |
1656 | * | |
1657 | * HFS Plus reserves one allocation block for the Volume Header. | |
1658 | * If the physical size is larger, then when we read the volume header, | |
1659 | * we will also end up reading in the next allocation block(s). | |
1660 | * If those other allocation block(s) is/are modified, and then the volume | |
1661 | * header is modified, the write of the volume header's buffer will write | |
1662 | * out the old contents of the other allocation blocks. | |
1663 | * | |
1664 | * We assume that the physical block size is same as logical block size. | |
1665 | * The physical block size value is used to round down the offsets for | |
1666 | * reading and writing the primary and alternate volume headers. | |
1667 | * | |
1668 | * The same logic is also in hfs_MountHFSPlusVolume to ensure that | |
1669 | * hfs_mountfs, hfs_MountHFSPlusVolume and later are doing the I/Os | |
1670 | * using same block size. | |
1671 | */ | |
1672 | if (SWAP_BE32(vhp->blockSize) < hfsmp->hfs_physical_block_size) { | |
1673 | phys_blksize = hfsmp->hfs_logical_block_size; | |
1674 | hfsmp->hfs_physical_block_size = hfsmp->hfs_logical_block_size; | |
1675 | hfsmp->hfs_log_per_phys = 1; | |
1676 | // There should be one bp associated with devvp in buffer cache. | |
1677 | retval = buf_invalidateblks(devvp, 0, 0, 0); | |
1678 | if (retval) | |
1679 | goto error_exit; | |
1680 | } | |
1681 | ||
1682 | if (isroot) { | |
1683 | hfs_root_unmounted_cleanly = ((SWAP_BE32(vhp->attributes) & kHFSVolumeUnmountedMask) != 0); | |
1684 | } | |
1685 | ||
1686 | /* | |
1687 | * On inconsistent disks, do not allow read-write mount | |
1688 | * unless it is the boot volume being mounted. We also | |
1689 | * always want to replay the journal if the journal_replay_only | |
1690 | * flag is set because that will (most likely) get the | |
1691 | * disk into a consistent state before fsck_hfs starts | |
1692 | * looking at it. | |
1693 | */ | |
1694 | if ( !(vfs_flags(mp) & MNT_ROOTFS) | |
1695 | && (SWAP_BE32(vhp->attributes) & kHFSVolumeInconsistentMask) | |
1696 | && !journal_replay_only | |
1697 | && !(hfsmp->hfs_flags & HFS_READ_ONLY)) { | |
1698 | ||
1699 | if (HFS_MOUNT_DEBUG) { | |
1700 | printf("hfs_mountfs: failed to mount non-root inconsistent disk\n"); | |
1701 | } | |
1702 | retval = EINVAL; | |
1703 | goto error_exit; | |
1704 | } | |
1705 | ||
1706 | ||
1707 | // XXXdbg | |
1708 | // | |
1709 | hfsmp->jnl = NULL; | |
1710 | hfsmp->jvp = NULL; | |
1711 | if (args != NULL && (args->flags & HFSFSMNT_EXTENDED_ARGS) && | |
1712 | args->journal_disable) { | |
1713 | jnl_disable = 1; | |
1714 | } | |
1715 | ||
1716 | // | |
1717 | // We only initialize the journal here if the last person | |
1718 | // to mount this volume was journaling aware. Otherwise | |
1719 | // we delay journal initialization until later at the end | |
1720 | // of hfs_MountHFSPlusVolume() because the last person who | |
1721 | // mounted it could have messed things up behind our back | |
1722 | // (so we need to go find the .journal file, make sure it's | |
1723 | // the right size, re-sync up if it was moved, etc). | |
1724 | // | |
1725 | if ( (SWAP_BE32(vhp->lastMountedVersion) == kHFSJMountVersion) | |
1726 | && (SWAP_BE32(vhp->attributes) & kHFSVolumeJournaledMask) | |
1727 | && !jnl_disable) { | |
1728 | ||
1729 | // if we're able to init the journal, mark the mount | |
1730 | // point as journaled. | |
1731 | // | |
1732 | if ((retval = hfs_early_journal_init(hfsmp, vhp, args, embeddedOffset, mdb_offset, mdbp, cred)) == 0) { | |
1733 | vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_JOURNALED)); | |
1734 | } else { | |
1735 | if (retval == EROFS) { | |
1736 | // EROFS is a special error code that means the volume has an external | |
1737 | // journal which we couldn't find. in that case we do not want to | |
1738 | // rewrite the volume header - we'll just refuse to mount the volume. | |
1739 | if (HFS_MOUNT_DEBUG) { | |
1740 | printf("hfs_mountfs: hfs_early_journal_init indicated external jnl \n"); | |
1741 | } | |
1742 | retval = EINVAL; | |
1743 | goto error_exit; | |
1744 | } | |
1745 | ||
1746 | // if the journal failed to open, then set the lastMountedVersion | |
1747 | // to be "FSK!" which fsck_hfs will see and force the fsck instead | |
1748 | // of just bailing out because the volume is journaled. | |
1749 | if (!ronly) { | |
1750 | if (HFS_MOUNT_DEBUG) { | |
1751 | printf("hfs_mountfs: hfs_early_journal_init failed, setting to FSK \n"); | |
1752 | } | |
1753 | ||
1754 | HFSPlusVolumeHeader *jvhp; | |
1755 | ||
1756 | hfsmp->hfs_flags |= HFS_NEED_JNL_RESET; | |
1757 | ||
1758 | if (mdb_offset == 0) { | |
1759 | mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize)); | |
1760 | } | |
1761 | ||
1762 | bp = NULL; | |
1763 | retval = (int)buf_meta_bread(devvp, | |
1764 | HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys), | |
1765 | phys_blksize, cred, &bp); | |
1766 | if (retval == 0) { | |
1767 | jvhp = (HFSPlusVolumeHeader *)(buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize)); | |
1768 | ||
1769 | if (SWAP_BE16(jvhp->signature) == kHFSPlusSigWord || SWAP_BE16(jvhp->signature) == kHFSXSigWord) { | |
1770 | printf ("hfs(1): Journal replay fail. Writing lastMountVersion as FSK!\n"); | |
1771 | jvhp->lastMountedVersion = SWAP_BE32(kFSKMountVersion); | |
1772 | buf_bwrite(bp); | |
1773 | } else { | |
1774 | buf_brelse(bp); | |
1775 | } | |
1776 | bp = NULL; | |
1777 | } else if (bp) { | |
1778 | buf_brelse(bp); | |
1779 | // clear this so the error exit path won't try to use it | |
1780 | bp = NULL; | |
1781 | } | |
1782 | } | |
1783 | ||
1784 | // if this isn't the root device just bail out. | |
1785 | // If it is the root device we just continue on | |
1786 | // in the hopes that fsck_hfs will be able to | |
1787 | // fix any damage that exists on the volume. | |
1788 | if ( !(vfs_flags(mp) & MNT_ROOTFS)) { | |
1789 | if (HFS_MOUNT_DEBUG) { | |
1790 | printf("hfs_mountfs: hfs_early_journal_init failed, erroring out \n"); | |
1791 | } | |
1792 | retval = EINVAL; | |
1793 | goto error_exit; | |
1794 | } | |
1795 | } | |
1796 | } | |
1797 | // XXXdbg | |
1798 | ||
1799 | /* Either the journal is replayed successfully, or there | |
1800 | * was nothing to replay, or no journal exists. In any case, | |
1801 | * return success. | |
1802 | */ | |
1803 | if (journal_replay_only) { | |
1804 | retval = 0; | |
1805 | goto error_exit; | |
1806 | } | |
1807 | ||
1808 | (void) hfs_getconverter(0, &hfsmp->hfs_get_unicode, &hfsmp->hfs_get_hfsname); | |
1809 | ||
1810 | retval = hfs_MountHFSPlusVolume(hfsmp, vhp, embeddedOffset, disksize, p, args, cred); | |
1811 | /* | |
1812 | * If the backend didn't like our physical blocksize | |
1813 | * then retry with physical blocksize of 512. | |
1814 | */ | |
1815 | if ((retval == ENXIO) && (log_blksize > 512) && (log_blksize != minblksize)) { | |
1816 | printf("hfs_mountfs: could not use physical block size " | |
1817 | "(%d) switching to 512\n", log_blksize); | |
1818 | log_blksize = 512; | |
1819 | if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) { | |
1820 | if (HFS_MOUNT_DEBUG) { | |
1821 | printf("hfs_mountfs: DKIOCSETBLOCKSIZE (4) failed \n"); | |
1822 | } | |
1823 | retval = ENXIO; | |
1824 | goto error_exit; | |
1825 | } | |
1826 | if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) { | |
1827 | if (HFS_MOUNT_DEBUG) { | |
1828 | printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (4) failed \n"); | |
1829 | } | |
1830 | retval = ENXIO; | |
1831 | goto error_exit; | |
1832 | } | |
1833 | devvp->v_specsize = log_blksize; | |
1834 | /* Note: relative block count adjustment (in case this is an embedded volume). */ | |
1835 | hfsmp->hfs_logical_block_count *= hfsmp->hfs_logical_block_size / log_blksize; | |
1836 | hfsmp->hfs_logical_block_size = log_blksize; | |
1837 | hfsmp->hfs_log_per_phys = hfsmp->hfs_physical_block_size / log_blksize; | |
1838 | ||
1839 | hfsmp->hfs_logical_bytes = (uint64_t) hfsmp->hfs_logical_block_count * (uint64_t) hfsmp->hfs_logical_block_size; | |
1840 | ||
1841 | if (hfsmp->jnl && hfsmp->jvp == devvp) { | |
1842 | // close and re-open this with the new block size | |
1843 | journal_close(hfsmp->jnl); | |
1844 | hfsmp->jnl = NULL; | |
1845 | if (hfs_early_journal_init(hfsmp, vhp, args, embeddedOffset, mdb_offset, mdbp, cred) == 0) { | |
1846 | vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_JOURNALED)); | |
1847 | } else { | |
1848 | // if the journal failed to open, then set the lastMountedVersion | |
1849 | // to be "FSK!" which fsck_hfs will see and force the fsck instead | |
1850 | // of just bailing out because the volume is journaled. | |
1851 | if (!ronly) { | |
1852 | if (HFS_MOUNT_DEBUG) { | |
1853 | printf("hfs_mountfs: hfs_early_journal_init (2) resetting.. \n"); | |
1854 | } | |
1855 | HFSPlusVolumeHeader *jvhp; | |
1856 | ||
1857 | hfsmp->hfs_flags |= HFS_NEED_JNL_RESET; | |
1858 | ||
1859 | if (mdb_offset == 0) { | |
1860 | mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize)); | |
1861 | } | |
1862 | ||
1863 | bp = NULL; | |
1864 | retval = (int)buf_meta_bread(devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys), | |
1865 | phys_blksize, cred, &bp); | |
1866 | if (retval == 0) { | |
1867 | jvhp = (HFSPlusVolumeHeader *)(buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize)); | |
1868 | ||
1869 | if (SWAP_BE16(jvhp->signature) == kHFSPlusSigWord || SWAP_BE16(jvhp->signature) == kHFSXSigWord) { | |
1870 | printf ("hfs(2): Journal replay fail. Writing lastMountVersion as FSK!\n"); | |
1871 | jvhp->lastMountedVersion = SWAP_BE32(kFSKMountVersion); | |
1872 | buf_bwrite(bp); | |
1873 | } else { | |
1874 | buf_brelse(bp); | |
1875 | } | |
1876 | bp = NULL; | |
1877 | } else if (bp) { | |
1878 | buf_brelse(bp); | |
1879 | // clear this so the error exit path won't try to use it | |
1880 | bp = NULL; | |
1881 | } | |
1882 | } | |
1883 | ||
1884 | // if this isn't the root device just bail out. | |
1885 | // If it is the root device we just continue on | |
1886 | // in the hopes that fsck_hfs will be able to | |
1887 | // fix any damage that exists on the volume. | |
1888 | if ( !(vfs_flags(mp) & MNT_ROOTFS)) { | |
1889 | if (HFS_MOUNT_DEBUG) { | |
1890 | printf("hfs_mountfs: hfs_early_journal_init (2) failed \n"); | |
1891 | } | |
1892 | retval = EINVAL; | |
1893 | goto error_exit; | |
1894 | } | |
1895 | } | |
1896 | } | |
1897 | ||
1898 | /* Try again with a smaller block size... */ | |
1899 | retval = hfs_MountHFSPlusVolume(hfsmp, vhp, embeddedOffset, disksize, p, args, cred); | |
1900 | if (retval && HFS_MOUNT_DEBUG) { | |
1901 | printf("hfs_MountHFSPlusVolume (late) returned %d\n",retval); | |
1902 | } | |
1903 | } | |
1904 | if (retval) | |
1905 | (void) hfs_relconverter(0); | |
1906 | } | |
1907 | ||
1908 | // save off a snapshot of the mtime from the previous mount | |
1909 | // (for matador). | |
1910 | hfsmp->hfs_last_mounted_mtime = hfsmp->hfs_mtime; | |
1911 | ||
1912 | if ( retval ) { | |
1913 | if (HFS_MOUNT_DEBUG) { | |
1914 | printf("hfs_mountfs: encountered failure %d \n", retval); | |
1915 | } | |
1916 | goto error_exit; | |
1917 | } | |
1918 | ||
1919 | mp->mnt_vfsstat.f_fsid.val[0] = dev; | |
1920 | mp->mnt_vfsstat.f_fsid.val[1] = vfs_typenum(mp); | |
1921 | vfs_setmaxsymlen(mp, 0); | |
1922 | ||
1923 | mp->mnt_vtable->vfc_vfsflags |= VFC_VFSNATIVEXATTR; | |
1924 | #if NAMEDSTREAMS | |
1925 | mp->mnt_kern_flag |= MNTK_NAMED_STREAMS; | |
1926 | #endif | |
1927 | if ((hfsmp->hfs_flags & HFS_STANDARD) == 0 ) { | |
1928 | /* Tell VFS that we support directory hard links. */ | |
1929 | mp->mnt_vtable->vfc_vfsflags |= VFC_VFSDIRLINKS; | |
1930 | } | |
1931 | #if CONFIG_HFS_STD | |
1932 | else { | |
1933 | /* HFS standard doesn't support extended readdir! */ | |
1934 | mount_set_noreaddirext (mp); | |
1935 | } | |
1936 | #endif | |
1937 | ||
1938 | if (args) { | |
1939 | /* | |
1940 | * Set the free space warning levels for a non-root volume: | |
1941 | * | |
1942 | * Set the "danger" limit to 1% of the volume size or 100MB, whichever | |
1943 | * is less. Set the "warning" limit to 2% of the volume size or 150MB, | |
1944 | * whichever is less. And last, set the "desired" freespace level to | |
1945 | * to 3% of the volume size or 200MB, whichever is less. | |
1946 | */ | |
1947 | hfsmp->hfs_freespace_notify_dangerlimit = | |
1948 | MIN(HFS_VERYLOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize, | |
1949 | (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_VERYLOWDISKTRIGGERFRACTION); | |
1950 | hfsmp->hfs_freespace_notify_warninglimit = | |
1951 | MIN(HFS_LOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize, | |
1952 | (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_LOWDISKTRIGGERFRACTION); | |
1953 | hfsmp->hfs_freespace_notify_desiredlevel = | |
1954 | MIN(HFS_LOWDISKSHUTOFFLEVEL / HFSTOVCB(hfsmp)->blockSize, | |
1955 | (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_LOWDISKSHUTOFFFRACTION); | |
1956 | } else { | |
1957 | /* | |
1958 | * Set the free space warning levels for the root volume: | |
1959 | * | |
1960 | * Set the "danger" limit to 5% of the volume size or 512MB, whichever | |
1961 | * is less. Set the "warning" limit to 10% of the volume size or 1GB, | |
1962 | * whichever is less. And last, set the "desired" freespace level to | |
1963 | * to 11% of the volume size or 1.25GB, whichever is less. | |
1964 | */ | |
1965 | hfsmp->hfs_freespace_notify_dangerlimit = | |
1966 | MIN(HFS_ROOTVERYLOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize, | |
1967 | (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTVERYLOWDISKTRIGGERFRACTION); | |
1968 | hfsmp->hfs_freespace_notify_warninglimit = | |
1969 | MIN(HFS_ROOTLOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize, | |
1970 | (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTLOWDISKTRIGGERFRACTION); | |
1971 | hfsmp->hfs_freespace_notify_desiredlevel = | |
1972 | MIN(HFS_ROOTLOWDISKSHUTOFFLEVEL / HFSTOVCB(hfsmp)->blockSize, | |
1973 | (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTLOWDISKSHUTOFFFRACTION); | |
1974 | }; | |
1975 | ||
1976 | /* Check if the file system exists on virtual device, like disk image */ | |
1977 | if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, context) == 0) { | |
1978 | if (isvirtual) { | |
1979 | hfsmp->hfs_flags |= HFS_VIRTUAL_DEVICE; | |
1980 | } | |
1981 | } | |
1982 | ||
1983 | /* do not allow ejectability checks on the root device */ | |
1984 | if (isroot == 0) { | |
1985 | if ((hfsmp->hfs_flags & HFS_VIRTUAL_DEVICE) == 0 && | |
1986 | IOBSDIsMediaEjectable(mp->mnt_vfsstat.f_mntfromname)) { | |
1987 | hfsmp->hfs_syncer = thread_call_allocate(hfs_syncer, hfsmp); | |
1988 | if (hfsmp->hfs_syncer == NULL) { | |
1989 | printf("hfs: failed to allocate syncer thread callback for %s (%s)\n", | |
1990 | mp->mnt_vfsstat.f_mntfromname, mp->mnt_vfsstat.f_mntonname); | |
1991 | } | |
1992 | } | |
1993 | } | |
1994 | ||
1995 | printf("hfs: mounted %s on device %s\n", (hfsmp->vcbVN ? (const char*) hfsmp->vcbVN : "unknown"), | |
1996 | (devvp->v_name ? devvp->v_name : (isroot ? "root_device": "unknown device"))); | |
1997 | ||
1998 | /* | |
1999 | * Start looking for free space to drop below this level and generate a | |
2000 | * warning immediately if needed: | |
2001 | */ | |
2002 | hfsmp->hfs_notification_conditions = 0; | |
2003 | hfs_generate_volume_notifications(hfsmp); | |
2004 | ||
2005 | if (ronly == 0) { | |
2006 | (void) hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0); | |
2007 | } | |
2008 | FREE(mdbp, M_TEMP); | |
2009 | return (0); | |
2010 | ||
2011 | error_exit: | |
2012 | if (bp) | |
2013 | buf_brelse(bp); | |
2014 | if (mdbp) | |
2015 | FREE(mdbp, M_TEMP); | |
2016 | ||
2017 | if (hfsmp && hfsmp->jvp && hfsmp->jvp != hfsmp->hfs_devvp) { | |
2018 | vnode_clearmountedon(hfsmp->jvp); | |
2019 | (void)VNOP_CLOSE(hfsmp->jvp, ronly ? FREAD : FREAD|FWRITE, vfs_context_kernel()); | |
2020 | hfsmp->jvp = NULL; | |
2021 | } | |
2022 | if (hfsmp) { | |
2023 | if (hfsmp->hfs_devvp) { | |
2024 | vnode_rele(hfsmp->hfs_devvp); | |
2025 | } | |
2026 | hfs_locks_destroy(hfsmp); | |
2027 | hfs_delete_chash(hfsmp); | |
2028 | hfs_idhash_destroy (hfsmp); | |
2029 | ||
2030 | FREE(hfsmp, M_HFSMNT); | |
2031 | vfs_setfsprivate(mp, NULL); | |
2032 | } | |
2033 | return (retval); | |
2034 | } | |
2035 | ||
2036 | ||
2037 | /* | |
2038 | * Make a filesystem operational. | |
2039 | * Nothing to do at the moment. | |
2040 | */ | |
2041 | /* ARGSUSED */ | |
2042 | static int | |
2043 | hfs_start(__unused struct mount *mp, __unused int flags, __unused vfs_context_t context) | |
2044 | { | |
2045 | return (0); | |
2046 | } | |
2047 | ||
2048 | ||
2049 | /* | |
2050 | * unmount system call | |
2051 | */ | |
2052 | int | |
2053 | hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context) | |
2054 | { | |
2055 | struct proc *p = vfs_context_proc(context); | |
2056 | struct hfsmount *hfsmp = VFSTOHFS(mp); | |
2057 | int retval = E_NONE; | |
2058 | int flags; | |
2059 | int force; | |
2060 | int started_tr = 0; | |
2061 | ||
2062 | flags = 0; | |
2063 | force = 0; | |
2064 | if (mntflags & MNT_FORCE) { | |
2065 | flags |= FORCECLOSE; | |
2066 | force = 1; | |
2067 | } | |
2068 | ||
2069 | printf("hfs: unmount initiated on %s on device %s\n", | |
2070 | (hfsmp->vcbVN ? (const char*) hfsmp->vcbVN : "unknown"), | |
2071 | (hfsmp->hfs_devvp ? ((hfsmp->hfs_devvp->v_name ? hfsmp->hfs_devvp->v_name : "unknown device")) : "unknown device")); | |
2072 | ||
2073 | if ((retval = hfs_flushfiles(mp, flags, p)) && !force) | |
2074 | return (retval); | |
2075 | ||
2076 | if (hfsmp->hfs_flags & HFS_METADATA_ZONE) | |
2077 | (void) hfs_recording_suspend(hfsmp); | |
2078 | ||
2079 | hfs_syncer_free(hfsmp); | |
2080 | ||
2081 | if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) { | |
2082 | if (hfsmp->hfs_summary_table) { | |
2083 | int err = 0; | |
2084 | /* | |
2085 | * Take the bitmap lock to serialize against a concurrent bitmap scan still in progress | |
2086 | */ | |
2087 | if (hfsmp->hfs_allocation_vp) { | |
2088 | err = hfs_lock (VTOC(hfsmp->hfs_allocation_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); | |
2089 | } | |
2090 | FREE (hfsmp->hfs_summary_table, M_TEMP); | |
2091 | hfsmp->hfs_summary_table = NULL; | |
2092 | hfsmp->hfs_flags &= ~HFS_SUMMARY_TABLE; | |
2093 | ||
2094 | if (err == 0 && hfsmp->hfs_allocation_vp){ | |
2095 | hfs_unlock (VTOC(hfsmp->hfs_allocation_vp)); | |
2096 | } | |
2097 | ||
2098 | } | |
2099 | } | |
2100 | ||
2101 | /* | |
2102 | * Flush out the b-trees, volume bitmap and Volume Header | |
2103 | */ | |
2104 | if ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0) { | |
2105 | retval = hfs_start_transaction(hfsmp); | |
2106 | if (retval == 0) { | |
2107 | started_tr = 1; | |
2108 | } else if (!force) { | |
2109 | goto err_exit; | |
2110 | } | |
2111 | ||
2112 | if (hfsmp->hfs_startup_vp) { | |
2113 | (void) hfs_lock(VTOC(hfsmp->hfs_startup_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); | |
2114 | retval = hfs_fsync(hfsmp->hfs_startup_vp, MNT_WAIT, 0, p); | |
2115 | hfs_unlock(VTOC(hfsmp->hfs_startup_vp)); | |
2116 | if (retval && !force) | |
2117 | goto err_exit; | |
2118 | } | |
2119 | ||
2120 | if (hfsmp->hfs_attribute_vp) { | |
2121 | (void) hfs_lock(VTOC(hfsmp->hfs_attribute_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); | |
2122 | retval = hfs_fsync(hfsmp->hfs_attribute_vp, MNT_WAIT, 0, p); | |
2123 | hfs_unlock(VTOC(hfsmp->hfs_attribute_vp)); | |
2124 | if (retval && !force) | |
2125 | goto err_exit; | |
2126 | } | |
2127 | ||
2128 | (void) hfs_lock(VTOC(hfsmp->hfs_catalog_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); | |
2129 | retval = hfs_fsync(hfsmp->hfs_catalog_vp, MNT_WAIT, 0, p); | |
2130 | hfs_unlock(VTOC(hfsmp->hfs_catalog_vp)); | |
2131 | if (retval && !force) | |
2132 | goto err_exit; | |
2133 | ||
2134 | (void) hfs_lock(VTOC(hfsmp->hfs_extents_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); | |
2135 | retval = hfs_fsync(hfsmp->hfs_extents_vp, MNT_WAIT, 0, p); | |
2136 | hfs_unlock(VTOC(hfsmp->hfs_extents_vp)); | |
2137 | if (retval && !force) | |
2138 | goto err_exit; | |
2139 | ||
2140 | if (hfsmp->hfs_allocation_vp) { | |
2141 | (void) hfs_lock(VTOC(hfsmp->hfs_allocation_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); | |
2142 | retval = hfs_fsync(hfsmp->hfs_allocation_vp, MNT_WAIT, 0, p); | |
2143 | hfs_unlock(VTOC(hfsmp->hfs_allocation_vp)); | |
2144 | if (retval && !force) | |
2145 | goto err_exit; | |
2146 | } | |
2147 | ||
2148 | if (hfsmp->hfc_filevp && vnode_issystem(hfsmp->hfc_filevp)) { | |
2149 | retval = hfs_fsync(hfsmp->hfc_filevp, MNT_WAIT, 0, p); | |
2150 | if (retval && !force) | |
2151 | goto err_exit; | |
2152 | } | |
2153 | ||
2154 | /* If runtime corruption was detected, indicate that the volume | |
2155 | * was not unmounted cleanly. | |
2156 | */ | |
2157 | if (hfsmp->vcbAtrb & kHFSVolumeInconsistentMask) { | |
2158 | HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeUnmountedMask; | |
2159 | } else { | |
2160 | HFSTOVCB(hfsmp)->vcbAtrb |= kHFSVolumeUnmountedMask; | |
2161 | } | |
2162 | ||
2163 | if (hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) { | |
2164 | int i; | |
2165 | u_int32_t min_start = hfsmp->totalBlocks; | |
2166 | ||
2167 | // set the nextAllocation pointer to the smallest free block number | |
2168 | // we've seen so on the next mount we won't rescan unnecessarily | |
2169 | lck_spin_lock(&hfsmp->vcbFreeExtLock); | |
2170 | for(i=0; i < (int)hfsmp->vcbFreeExtCnt; i++) { | |
2171 | if (hfsmp->vcbFreeExt[i].startBlock < min_start) { | |
2172 | min_start = hfsmp->vcbFreeExt[i].startBlock; | |
2173 | } | |
2174 | } | |
2175 | lck_spin_unlock(&hfsmp->vcbFreeExtLock); | |
2176 | if (min_start < hfsmp->nextAllocation) { | |
2177 | hfsmp->nextAllocation = min_start; | |
2178 | } | |
2179 | } | |
2180 | ||
2181 | retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0); | |
2182 | if (retval) { | |
2183 | HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeUnmountedMask; | |
2184 | if (!force) | |
2185 | goto err_exit; /* could not flush everything */ | |
2186 | } | |
2187 | ||
2188 | if (started_tr) { | |
2189 | hfs_end_transaction(hfsmp); | |
2190 | started_tr = 0; | |
2191 | } | |
2192 | } | |
2193 | ||
2194 | if (hfsmp->jnl) { | |
2195 | hfs_journal_flush(hfsmp, FALSE); | |
2196 | } | |
2197 | ||
2198 | /* | |
2199 | * Invalidate our caches and release metadata vnodes | |
2200 | */ | |
2201 | (void) hfsUnmount(hfsmp, p); | |
2202 | ||
2203 | #if CONFIG_HFS_STD | |
2204 | if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord) { | |
2205 | (void) hfs_relconverter(hfsmp->hfs_encoding); | |
2206 | } | |
2207 | #endif | |
2208 | ||
2209 | // XXXdbg | |
2210 | if (hfsmp->jnl) { | |
2211 | journal_close(hfsmp->jnl); | |
2212 | hfsmp->jnl = NULL; | |
2213 | } | |
2214 | ||
2215 | VNOP_FSYNC(hfsmp->hfs_devvp, MNT_WAIT, context); | |
2216 | ||
2217 | if (hfsmp->jvp && hfsmp->jvp != hfsmp->hfs_devvp) { | |
2218 | vnode_clearmountedon(hfsmp->jvp); | |
2219 | retval = VNOP_CLOSE(hfsmp->jvp, | |
2220 | hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE, | |
2221 | vfs_context_kernel()); | |
2222 | vnode_put(hfsmp->jvp); | |
2223 | hfsmp->jvp = NULL; | |
2224 | } | |
2225 | // XXXdbg | |
2226 | ||
2227 | /* | |
2228 | * Last chance to dump unreferenced system files. | |
2229 | */ | |
2230 | (void) vflush(mp, NULLVP, FORCECLOSE); | |
2231 | ||
2232 | #if HFS_SPARSE_DEV | |
2233 | /* Drop our reference on the backing fs (if any). */ | |
2234 | if ((hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) && hfsmp->hfs_backingfs_rootvp) { | |
2235 | struct vnode * tmpvp; | |
2236 | ||
2237 | hfsmp->hfs_flags &= ~HFS_HAS_SPARSE_DEVICE; | |
2238 | tmpvp = hfsmp->hfs_backingfs_rootvp; | |
2239 | hfsmp->hfs_backingfs_rootvp = NULLVP; | |
2240 | vnode_rele(tmpvp); | |
2241 | } | |
2242 | #endif /* HFS_SPARSE_DEV */ | |
2243 | ||
2244 | vnode_rele(hfsmp->hfs_devvp); | |
2245 | ||
2246 | hfs_locks_destroy(hfsmp); | |
2247 | hfs_delete_chash(hfsmp); | |
2248 | hfs_idhash_destroy(hfsmp); | |
2249 | FREE(hfsmp, M_HFSMNT); | |
2250 | ||
2251 | return (0); | |
2252 | ||
2253 | err_exit: | |
2254 | if (started_tr) { | |
2255 | hfs_end_transaction(hfsmp); | |
2256 | } | |
2257 | return retval; | |
2258 | } | |
2259 | ||
2260 | ||
2261 | /* | |
2262 | * Return the root of a filesystem. | |
2263 | */ | |
2264 | static int | |
2265 | hfs_vfs_root(struct mount *mp, struct vnode **vpp, __unused vfs_context_t context) | |
2266 | { | |
2267 | return hfs_vget(VFSTOHFS(mp), (cnid_t)kHFSRootFolderID, vpp, 1, 0); | |
2268 | } | |
2269 | ||
2270 | ||
2271 | /* | |
2272 | * Do operations associated with quotas | |
2273 | */ | |
2274 | #if !QUOTA | |
2275 | static int | |
2276 | hfs_quotactl(__unused struct mount *mp, __unused int cmds, __unused uid_t uid, __unused caddr_t datap, __unused vfs_context_t context) | |
2277 | { | |
2278 | return (ENOTSUP); | |
2279 | } | |
2280 | #else | |
2281 | static int | |
2282 | hfs_quotactl(struct mount *mp, int cmds, uid_t uid, caddr_t datap, vfs_context_t context) | |
2283 | { | |
2284 | struct proc *p = vfs_context_proc(context); | |
2285 | int cmd, type, error; | |
2286 | ||
2287 | if (uid == ~0U) | |
2288 | uid = kauth_cred_getuid(vfs_context_ucred(context)); | |
2289 | cmd = cmds >> SUBCMDSHIFT; | |
2290 | ||
2291 | switch (cmd) { | |
2292 | case Q_SYNC: | |
2293 | case Q_QUOTASTAT: | |
2294 | break; | |
2295 | case Q_GETQUOTA: | |
2296 | if (uid == kauth_cred_getuid(vfs_context_ucred(context))) | |
2297 | break; | |
2298 | /* fall through */ | |
2299 | default: | |
2300 | if ( (error = vfs_context_suser(context)) ) | |
2301 | return (error); | |
2302 | } | |
2303 | ||
2304 | type = cmds & SUBCMDMASK; | |
2305 | if ((u_int)type >= MAXQUOTAS) | |
2306 | return (EINVAL); | |
2307 | if (vfs_busy(mp, LK_NOWAIT)) | |
2308 | return (0); | |
2309 | ||
2310 | switch (cmd) { | |
2311 | ||
2312 | case Q_QUOTAON: | |
2313 | error = hfs_quotaon(p, mp, type, datap); | |
2314 | break; | |
2315 | ||
2316 | case Q_QUOTAOFF: | |
2317 | error = hfs_quotaoff(p, mp, type); | |
2318 | break; | |
2319 | ||
2320 | case Q_SETQUOTA: | |
2321 | error = hfs_setquota(mp, uid, type, datap); | |
2322 | break; | |
2323 | ||
2324 | case Q_SETUSE: | |
2325 | error = hfs_setuse(mp, uid, type, datap); | |
2326 | break; | |
2327 | ||
2328 | case Q_GETQUOTA: | |
2329 | error = hfs_getquota(mp, uid, type, datap); | |
2330 | break; | |
2331 | ||
2332 | case Q_SYNC: | |
2333 | error = hfs_qsync(mp); | |
2334 | break; | |
2335 | ||
2336 | case Q_QUOTASTAT: | |
2337 | error = hfs_quotastat(mp, type, datap); | |
2338 | break; | |
2339 | ||
2340 | default: | |
2341 | error = EINVAL; | |
2342 | break; | |
2343 | } | |
2344 | vfs_unbusy(mp); | |
2345 | ||
2346 | return (error); | |
2347 | } | |
2348 | #endif /* QUOTA */ | |
2349 | ||
2350 | /* Subtype is composite of bits */ | |
2351 | #define HFS_SUBTYPE_JOURNALED 0x01 | |
2352 | #define HFS_SUBTYPE_CASESENSITIVE 0x02 | |
2353 | /* bits 2 - 6 reserved */ | |
2354 | #define HFS_SUBTYPE_STANDARDHFS 0x80 | |
2355 | ||
2356 | /* | |
2357 | * Get file system statistics. | |
2358 | */ | |
2359 | int | |
2360 | hfs_statfs(struct mount *mp, register struct vfsstatfs *sbp, __unused vfs_context_t context) | |
2361 | { | |
2362 | ExtendedVCB *vcb = VFSTOVCB(mp); | |
2363 | struct hfsmount *hfsmp = VFSTOHFS(mp); | |
2364 | u_int32_t freeCNIDs; | |
2365 | u_int16_t subtype = 0; | |
2366 | ||
2367 | freeCNIDs = (u_int32_t)0xFFFFFFFF - (u_int32_t)vcb->vcbNxtCNID; | |
2368 | ||
2369 | sbp->f_bsize = (u_int32_t)vcb->blockSize; | |
2370 | sbp->f_iosize = (size_t)cluster_max_io_size(mp, 0); | |
2371 | sbp->f_blocks = (u_int64_t)((u_int32_t)vcb->totalBlocks); | |
2372 | sbp->f_bfree = (u_int64_t)((u_int32_t )hfs_freeblks(hfsmp, 0)); | |
2373 | sbp->f_bavail = (u_int64_t)((u_int32_t )hfs_freeblks(hfsmp, 1)); | |
2374 | sbp->f_files = (u_int64_t)((u_int32_t )(vcb->totalBlocks - 2)); /* max files is constrained by total blocks */ | |
2375 | sbp->f_ffree = (u_int64_t)((u_int32_t )(MIN(freeCNIDs, sbp->f_bavail))); | |
2376 | ||
2377 | /* | |
2378 | * Subtypes (flavors) for HFS | |
2379 | * 0: Mac OS Extended | |
2380 | * 1: Mac OS Extended (Journaled) | |
2381 | * 2: Mac OS Extended (Case Sensitive) | |
2382 | * 3: Mac OS Extended (Case Sensitive, Journaled) | |
2383 | * 4 - 127: Reserved | |
2384 | * 128: Mac OS Standard | |
2385 | * | |
2386 | */ | |
2387 | if ((hfsmp->hfs_flags & HFS_STANDARD) == 0) { | |
2388 | /* HFS+ & variants */ | |
2389 | if (hfsmp->jnl) { | |
2390 | subtype |= HFS_SUBTYPE_JOURNALED; | |
2391 | } | |
2392 | if (hfsmp->hfs_flags & HFS_CASE_SENSITIVE) { | |
2393 | subtype |= HFS_SUBTYPE_CASESENSITIVE; | |
2394 | } | |
2395 | } | |
2396 | #if CONFIG_HFS_STD | |
2397 | else { | |
2398 | /* HFS standard */ | |
2399 | subtype = HFS_SUBTYPE_STANDARDHFS; | |
2400 | } | |
2401 | #endif | |
2402 | sbp->f_fssubtype = subtype; | |
2403 | ||
2404 | return (0); | |
2405 | } | |
2406 | ||
2407 | ||
2408 | // | |
2409 | // XXXdbg -- this is a callback to be used by the journal to | |
2410 | // get meta data blocks flushed out to disk. | |
2411 | // | |
2412 | // XXXdbg -- be smarter and don't flush *every* block on each | |
2413 | // call. try to only flush some so we don't wind up | |
2414 | // being too synchronous. | |
2415 | // | |
2416 | __private_extern__ | |
2417 | void | |
2418 | hfs_sync_metadata(void *arg) | |
2419 | { | |
2420 | struct mount *mp = (struct mount *)arg; | |
2421 | struct hfsmount *hfsmp; | |
2422 | ExtendedVCB *vcb; | |
2423 | buf_t bp; | |
2424 | int retval; | |
2425 | daddr64_t priIDSector; | |
2426 | hfsmp = VFSTOHFS(mp); | |
2427 | vcb = HFSTOVCB(hfsmp); | |
2428 | ||
2429 | // now make sure the super block is flushed | |
2430 | priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) + | |
2431 | HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size)); | |
2432 | ||
2433 | retval = (int)buf_meta_bread(hfsmp->hfs_devvp, | |
2434 | HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys), | |
2435 | hfsmp->hfs_physical_block_size, NOCRED, &bp); | |
2436 | if ((retval != 0 ) && (retval != ENXIO)) { | |
2437 | printf("hfs_sync_metadata: can't read volume header at %d! (retval 0x%x)\n", | |
2438 | (int)priIDSector, retval); | |
2439 | } | |
2440 | ||
2441 | if (retval == 0 && ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI)) { | |
2442 | buf_bwrite(bp); | |
2443 | } else if (bp) { | |
2444 | buf_brelse(bp); | |
2445 | } | |
2446 | ||
2447 | /* Note that these I/Os bypass the journal (no calls to journal_start_modify_block) */ | |
2448 | ||
2449 | // the alternate super block... | |
2450 | // XXXdbg - we probably don't need to do this each and every time. | |
2451 | // hfs_btreeio.c:FlushAlternate() should flag when it was | |
2452 | // written... | |
2453 | if (hfsmp->hfs_partition_avh_sector) { | |
2454 | retval = (int)buf_meta_bread(hfsmp->hfs_devvp, | |
2455 | HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_partition_avh_sector, hfsmp->hfs_log_per_phys), | |
2456 | hfsmp->hfs_physical_block_size, NOCRED, &bp); | |
2457 | if (retval == 0 && ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI)) { | |
2458 | /* | |
2459 | * note this I/O can fail if the partition shrank behind our backs! | |
2460 | * So failure should be OK here. | |
2461 | */ | |
2462 | buf_bwrite(bp); | |
2463 | } else if (bp) { | |
2464 | buf_brelse(bp); | |
2465 | } | |
2466 | } | |
2467 | ||
2468 | /* Is the FS's idea of the AVH different than the partition ? */ | |
2469 | if ((hfsmp->hfs_fs_avh_sector) && (hfsmp->hfs_partition_avh_sector != hfsmp->hfs_fs_avh_sector)) { | |
2470 | retval = (int)buf_meta_bread(hfsmp->hfs_devvp, | |
2471 | HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_fs_avh_sector, hfsmp->hfs_log_per_phys), | |
2472 | hfsmp->hfs_physical_block_size, NOCRED, &bp); | |
2473 | if (retval == 0 && ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI)) { | |
2474 | buf_bwrite(bp); | |
2475 | } else if (bp) { | |
2476 | buf_brelse(bp); | |
2477 | } | |
2478 | } | |
2479 | ||
2480 | } | |
2481 | ||
2482 | ||
2483 | struct hfs_sync_cargs { | |
2484 | kauth_cred_t cred; | |
2485 | struct proc *p; | |
2486 | int waitfor; | |
2487 | int error; | |
2488 | }; | |
2489 | ||
2490 | ||
2491 | static int | |
2492 | hfs_sync_callback(struct vnode *vp, void *cargs) | |
2493 | { | |
2494 | struct cnode *cp; | |
2495 | struct hfs_sync_cargs *args; | |
2496 | int error; | |
2497 | ||
2498 | args = (struct hfs_sync_cargs *)cargs; | |
2499 | ||
2500 | if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0) { | |
2501 | return (VNODE_RETURNED); | |
2502 | } | |
2503 | cp = VTOC(vp); | |
2504 | ||
2505 | if ((cp->c_flag & C_MODIFIED) || | |
2506 | (cp->c_touch_acctime | cp->c_touch_chgtime | cp->c_touch_modtime) || | |
2507 | vnode_hasdirtyblks(vp)) { | |
2508 | error = hfs_fsync(vp, args->waitfor, 0, args->p); | |
2509 | ||
2510 | if (error) | |
2511 | args->error = error; | |
2512 | } | |
2513 | hfs_unlock(cp); | |
2514 | return (VNODE_RETURNED); | |
2515 | } | |
2516 | ||
2517 | ||
2518 | ||
2519 | /* | |
2520 | * Go through the disk queues to initiate sandbagged IO; | |
2521 | * go through the inodes to write those that have been modified; | |
2522 | * initiate the writing of the super block if it has been modified. | |
2523 | * | |
2524 | * Note: we are always called with the filesystem marked `MPBUSY'. | |
2525 | */ | |
2526 | int | |
2527 | hfs_sync(struct mount *mp, int waitfor, vfs_context_t context) | |
2528 | { | |
2529 | struct proc *p = vfs_context_proc(context); | |
2530 | struct cnode *cp; | |
2531 | struct hfsmount *hfsmp; | |
2532 | ExtendedVCB *vcb; | |
2533 | struct vnode *meta_vp[4]; | |
2534 | int i; | |
2535 | int error, allerror = 0; | |
2536 | struct hfs_sync_cargs args; | |
2537 | ||
2538 | hfsmp = VFSTOHFS(mp); | |
2539 | ||
2540 | // Back off if hfs_changefs or a freeze is underway | |
2541 | hfs_lock_mount(hfsmp); | |
2542 | if ((hfsmp->hfs_flags & HFS_IN_CHANGEFS) | |
2543 | || hfsmp->hfs_freeze_state != HFS_THAWED) { | |
2544 | hfs_unlock_mount(hfsmp); | |
2545 | return 0; | |
2546 | } | |
2547 | ||
2548 | if (hfsmp->hfs_flags & HFS_READ_ONLY) { | |
2549 | hfs_unlock_mount(hfsmp); | |
2550 | return (EROFS); | |
2551 | } | |
2552 | ||
2553 | ++hfsmp->hfs_syncers; | |
2554 | hfs_unlock_mount(hfsmp); | |
2555 | ||
2556 | args.cred = kauth_cred_get(); | |
2557 | args.waitfor = waitfor; | |
2558 | args.p = p; | |
2559 | args.error = 0; | |
2560 | /* | |
2561 | * hfs_sync_callback will be called for each vnode | |
2562 | * hung off of this mount point... the vnode will be | |
2563 | * properly referenced and unreferenced around the callback | |
2564 | */ | |
2565 | vnode_iterate(mp, 0, hfs_sync_callback, (void *)&args); | |
2566 | ||
2567 | if (args.error) | |
2568 | allerror = args.error; | |
2569 | ||
2570 | vcb = HFSTOVCB(hfsmp); | |
2571 | ||
2572 | meta_vp[0] = vcb->extentsRefNum; | |
2573 | meta_vp[1] = vcb->catalogRefNum; | |
2574 | meta_vp[2] = vcb->allocationsRefNum; /* This is NULL for standard HFS */ | |
2575 | meta_vp[3] = hfsmp->hfs_attribute_vp; /* Optional file */ | |
2576 | ||
2577 | /* Now sync our three metadata files */ | |
2578 | for (i = 0; i < 4; ++i) { | |
2579 | struct vnode *btvp; | |
2580 | ||
2581 | btvp = meta_vp[i];; | |
2582 | if ((btvp==0) || (vnode_mount(btvp) != mp)) | |
2583 | continue; | |
2584 | ||
2585 | /* XXX use hfs_systemfile_lock instead ? */ | |
2586 | (void) hfs_lock(VTOC(btvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); | |
2587 | cp = VTOC(btvp); | |
2588 | ||
2589 | if (((cp->c_flag & C_MODIFIED) == 0) && | |
2590 | (cp->c_touch_acctime == 0) && | |
2591 | (cp->c_touch_chgtime == 0) && | |
2592 | (cp->c_touch_modtime == 0) && | |
2593 | vnode_hasdirtyblks(btvp) == 0) { | |
2594 | hfs_unlock(VTOC(btvp)); | |
2595 | continue; | |
2596 | } | |
2597 | error = vnode_get(btvp); | |
2598 | if (error) { | |
2599 | hfs_unlock(VTOC(btvp)); | |
2600 | continue; | |
2601 | } | |
2602 | if ((error = hfs_fsync(btvp, waitfor, 0, p))) | |
2603 | allerror = error; | |
2604 | ||
2605 | hfs_unlock(cp); | |
2606 | vnode_put(btvp); | |
2607 | }; | |
2608 | ||
2609 | ||
2610 | #if CONFIG_HFS_STD | |
2611 | /* | |
2612 | * Force stale file system control information to be flushed. | |
2613 | */ | |
2614 | if (vcb->vcbSigWord == kHFSSigWord) { | |
2615 | if ((error = VNOP_FSYNC(hfsmp->hfs_devvp, waitfor, context))) { | |
2616 | allerror = error; | |
2617 | } | |
2618 | } | |
2619 | #endif | |
2620 | ||
2621 | #if QUOTA | |
2622 | hfs_qsync(mp); | |
2623 | #endif /* QUOTA */ | |
2624 | ||
2625 | hfs_hotfilesync(hfsmp, vfs_context_kernel()); | |
2626 | ||
2627 | /* | |
2628 | * Write back modified superblock. | |
2629 | */ | |
2630 | if (IsVCBDirty(vcb)) { | |
2631 | error = hfs_flushvolumeheader(hfsmp, waitfor, 0); | |
2632 | if (error) | |
2633 | allerror = error; | |
2634 | } | |
2635 | ||
2636 | if (hfsmp->jnl) { | |
2637 | hfs_journal_flush(hfsmp, FALSE); | |
2638 | } | |
2639 | ||
2640 | hfs_lock_mount(hfsmp); | |
2641 | boolean_t wake = (!--hfsmp->hfs_syncers | |
2642 | && hfsmp->hfs_freeze_state == HFS_WANT_TO_FREEZE); | |
2643 | hfs_unlock_mount(hfsmp); | |
2644 | if (wake) | |
2645 | wakeup(&hfsmp->hfs_freeze_state); | |
2646 | ||
2647 | return (allerror); | |
2648 | } | |
2649 | ||
2650 | ||
2651 | /* | |
2652 | * File handle to vnode | |
2653 | * | |
2654 | * Have to be really careful about stale file handles: | |
2655 | * - check that the cnode id is valid | |
2656 | * - call hfs_vget() to get the locked cnode | |
2657 | * - check for an unallocated cnode (i_mode == 0) | |
2658 | * - check that the given client host has export rights and return | |
2659 | * those rights via. exflagsp and credanonp | |
2660 | */ | |
2661 | static int | |
2662 | hfs_fhtovp(struct mount *mp, int fhlen, unsigned char *fhp, struct vnode **vpp, __unused vfs_context_t context) | |
2663 | { | |
2664 | struct hfsfid *hfsfhp; | |
2665 | struct vnode *nvp; | |
2666 | int result; | |
2667 | ||
2668 | *vpp = NULL; | |
2669 | hfsfhp = (struct hfsfid *)fhp; | |
2670 | ||
2671 | if (fhlen < (int)sizeof(struct hfsfid)) | |
2672 | return (EINVAL); | |
2673 | ||
2674 | result = hfs_vget(VFSTOHFS(mp), ntohl(hfsfhp->hfsfid_cnid), &nvp, 0, 0); | |
2675 | if (result) { | |
2676 | if (result == ENOENT) | |
2677 | result = ESTALE; | |
2678 | return result; | |
2679 | } | |
2680 | ||
2681 | /* | |
2682 | * We used to use the create time as the gen id of the file handle, | |
2683 | * but it is not static enough because it can change at any point | |
2684 | * via system calls. We still don't have another volume ID or other | |
2685 | * unique identifier to use for a generation ID across reboots that | |
2686 | * persists until the file is removed. Using only the CNID exposes | |
2687 | * us to the potential wrap-around case, but as of 2/2008, it would take | |
2688 | * over 2 months to wrap around if the machine did nothing but allocate | |
2689 | * CNIDs. Using some kind of wrap counter would only be effective if | |
2690 | * each file had the wrap counter associated with it. For now, | |
2691 | * we use only the CNID to identify the file as it's good enough. | |
2692 | */ | |
2693 | ||
2694 | *vpp = nvp; | |
2695 | ||
2696 | hfs_unlock(VTOC(nvp)); | |
2697 | return (0); | |
2698 | } | |
2699 | ||
2700 | ||
2701 | /* | |
2702 | * Vnode pointer to File handle | |
2703 | */ | |
2704 | /* ARGSUSED */ | |
2705 | static int | |
2706 | hfs_vptofh(struct vnode *vp, int *fhlenp, unsigned char *fhp, __unused vfs_context_t context) | |
2707 | { | |
2708 | struct cnode *cp; | |
2709 | struct hfsfid *hfsfhp; | |
2710 | ||
2711 | if (ISHFS(VTOVCB(vp))) | |
2712 | return (ENOTSUP); /* hfs standard is not exportable */ | |
2713 | ||
2714 | if (*fhlenp < (int)sizeof(struct hfsfid)) | |
2715 | return (EOVERFLOW); | |
2716 | ||
2717 | cp = VTOC(vp); | |
2718 | hfsfhp = (struct hfsfid *)fhp; | |
2719 | /* only the CNID is used to identify the file now */ | |
2720 | hfsfhp->hfsfid_cnid = htonl(cp->c_fileid); | |
2721 | hfsfhp->hfsfid_gen = htonl(cp->c_fileid); | |
2722 | *fhlenp = sizeof(struct hfsfid); | |
2723 | ||
2724 | return (0); | |
2725 | } | |
2726 | ||
2727 | ||
2728 | /* | |
2729 | * Initialize HFS filesystems, done only once per boot. | |
2730 | * | |
2731 | * HFS is not a kext-based file system. This makes it difficult to find | |
2732 | * out when the last HFS file system was unmounted and call hfs_uninit() | |
2733 | * to deallocate data structures allocated in hfs_init(). Therefore we | |
2734 | * never deallocate memory allocated by lock attribute and group initializations | |
2735 | * in this function. | |
2736 | */ | |
2737 | static int | |
2738 | hfs_init(__unused struct vfsconf *vfsp) | |
2739 | { | |
2740 | static int done = 0; | |
2741 | ||
2742 | if (done) | |
2743 | return (0); | |
2744 | done = 1; | |
2745 | hfs_chashinit(); | |
2746 | hfs_converterinit(); | |
2747 | ||
2748 | BTReserveSetup(); | |
2749 | ||
2750 | hfs_lock_attr = lck_attr_alloc_init(); | |
2751 | hfs_group_attr = lck_grp_attr_alloc_init(); | |
2752 | hfs_mutex_group = lck_grp_alloc_init("hfs-mutex", hfs_group_attr); | |
2753 | hfs_rwlock_group = lck_grp_alloc_init("hfs-rwlock", hfs_group_attr); | |
2754 | hfs_spinlock_group = lck_grp_alloc_init("hfs-spinlock", hfs_group_attr); | |
2755 | ||
2756 | #if HFS_COMPRESSION | |
2757 | decmpfs_init(); | |
2758 | #endif | |
2759 | ||
2760 | return (0); | |
2761 | } | |
2762 | ||
2763 | ||
2764 | /* | |
2765 | * Destroy all locks, mutexes and spinlocks in hfsmp on unmount or failed mount | |
2766 | */ | |
2767 | static void | |
2768 | hfs_locks_destroy(struct hfsmount *hfsmp) | |
2769 | { | |
2770 | ||
2771 | lck_mtx_destroy(&hfsmp->hfs_mutex, hfs_mutex_group); | |
2772 | lck_mtx_destroy(&hfsmp->hfc_mutex, hfs_mutex_group); | |
2773 | lck_rw_destroy(&hfsmp->hfs_global_lock, hfs_rwlock_group); | |
2774 | lck_spin_destroy(&hfsmp->vcbFreeExtLock, hfs_spinlock_group); | |
2775 | ||
2776 | return; | |
2777 | } | |
2778 | ||
2779 | ||
2780 | static int | |
2781 | hfs_getmountpoint(struct vnode *vp, struct hfsmount **hfsmpp) | |
2782 | { | |
2783 | struct hfsmount * hfsmp; | |
2784 | char fstypename[MFSNAMELEN]; | |
2785 | ||
2786 | if (vp == NULL) | |
2787 | return (EINVAL); | |
2788 | ||
2789 | if (!vnode_isvroot(vp)) | |
2790 | return (EINVAL); | |
2791 | ||
2792 | vnode_vfsname(vp, fstypename); | |
2793 | if (strncmp(fstypename, "hfs", sizeof(fstypename)) != 0) | |
2794 | return (EINVAL); | |
2795 | ||
2796 | hfsmp = VTOHFS(vp); | |
2797 | ||
2798 | if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord) | |
2799 | return (EINVAL); | |
2800 | ||
2801 | *hfsmpp = hfsmp; | |
2802 | ||
2803 | return (0); | |
2804 | } | |
2805 | ||
2806 | // XXXdbg | |
2807 | #include <sys/filedesc.h> | |
2808 | ||
2809 | /* | |
2810 | * HFS filesystem related variables. | |
2811 | */ | |
2812 | int | |
2813 | hfs_sysctl(int *name, __unused u_int namelen, user_addr_t oldp, size_t *oldlenp, | |
2814 | user_addr_t newp, size_t newlen, vfs_context_t context) | |
2815 | { | |
2816 | struct proc *p = vfs_context_proc(context); | |
2817 | int error; | |
2818 | struct hfsmount *hfsmp; | |
2819 | ||
2820 | /* all sysctl names at this level are terminal */ | |
2821 | ||
2822 | if (name[0] == HFS_ENCODINGBIAS) { | |
2823 | int bias; | |
2824 | ||
2825 | bias = hfs_getencodingbias(); | |
2826 | error = sysctl_int(oldp, oldlenp, newp, newlen, &bias); | |
2827 | if (error == 0 && newp) | |
2828 | hfs_setencodingbias(bias); | |
2829 | return (error); | |
2830 | ||
2831 | } else if (name[0] == HFS_EXTEND_FS) { | |
2832 | u_int64_t newsize = 0; | |
2833 | vnode_t vp = vfs_context_cwd(context); | |
2834 | ||
2835 | if (newp == USER_ADDR_NULL || vp == NULLVP) | |
2836 | return (EINVAL); | |
2837 | if ((error = hfs_getmountpoint(vp, &hfsmp))) | |
2838 | return (error); | |
2839 | ||
2840 | /* Start with the 'size' set to the current number of bytes in the filesystem */ | |
2841 | newsize = ((uint64_t)hfsmp->totalBlocks) * ((uint64_t)hfsmp->blockSize); | |
2842 | ||
2843 | /* now get the new size from userland and over-write our stored value */ | |
2844 | error = sysctl_quad(oldp, oldlenp, newp, newlen, (quad_t *)&newsize); | |
2845 | if (error) | |
2846 | return (error); | |
2847 | ||
2848 | error = hfs_extendfs(hfsmp, newsize, context); | |
2849 | return (error); | |
2850 | ||
2851 | } else if (name[0] == HFS_ENCODINGHINT) { | |
2852 | size_t bufsize; | |
2853 | size_t bytes; | |
2854 | u_int32_t hint; | |
2855 | u_int16_t *unicode_name = NULL; | |
2856 | char *filename = NULL; | |
2857 | ||
2858 | if ((newlen <= 0) || (newlen > MAXPATHLEN)) | |
2859 | return (EINVAL); | |
2860 | ||
2861 | bufsize = MAX(newlen * 3, MAXPATHLEN); | |
2862 | MALLOC(filename, char *, newlen, M_TEMP, M_WAITOK); | |
2863 | if (filename == NULL) { | |
2864 | error = ENOMEM; | |
2865 | goto encodinghint_exit; | |
2866 | } | |
2867 | MALLOC(unicode_name, u_int16_t *, bufsize, M_TEMP, M_WAITOK); | |
2868 | if (unicode_name == NULL) { | |
2869 | error = ENOMEM; | |
2870 | goto encodinghint_exit; | |
2871 | } | |
2872 | ||
2873 | error = copyin(newp, (caddr_t)filename, newlen); | |
2874 | if (error == 0) { | |
2875 | error = utf8_decodestr((u_int8_t *)filename, newlen - 1, unicode_name, | |
2876 | &bytes, bufsize, 0, UTF_DECOMPOSED); | |
2877 | if (error == 0) { | |
2878 | hint = hfs_pickencoding(unicode_name, bytes / 2); | |
2879 | error = sysctl_int(oldp, oldlenp, USER_ADDR_NULL, 0, (int32_t *)&hint); | |
2880 | } | |
2881 | } | |
2882 | ||
2883 | encodinghint_exit: | |
2884 | if (unicode_name) | |
2885 | FREE(unicode_name, M_TEMP); | |
2886 | if (filename) | |
2887 | FREE(filename, M_TEMP); | |
2888 | return (error); | |
2889 | ||
2890 | } else if (name[0] == HFS_ENABLE_JOURNALING) { | |
2891 | // make the file system journaled... | |
2892 | vnode_t vp = vfs_context_cwd(context); | |
2893 | vnode_t jvp; | |
2894 | ExtendedVCB *vcb; | |
2895 | struct cat_attr jnl_attr; | |
2896 | struct cat_attr jinfo_attr; | |
2897 | struct cat_fork jnl_fork; | |
2898 | struct cat_fork jinfo_fork; | |
2899 | buf_t jib_buf; | |
2900 | uint64_t jib_blkno; | |
2901 | uint32_t tmpblkno; | |
2902 | uint64_t journal_byte_offset; | |
2903 | uint64_t journal_size; | |
2904 | vnode_t jib_vp = NULLVP; | |
2905 | struct JournalInfoBlock local_jib; | |
2906 | int err = 0; | |
2907 | void *jnl = NULL; | |
2908 | int lockflags; | |
2909 | ||
2910 | /* Only root can enable journaling */ | |
2911 | if (!kauth_cred_issuser(kauth_cred_get())) { | |
2912 | return (EPERM); | |
2913 | } | |
2914 | if (vp == NULLVP) | |
2915 | return EINVAL; | |
2916 | ||
2917 | hfsmp = VTOHFS(vp); | |
2918 | if (hfsmp->hfs_flags & HFS_READ_ONLY) { | |
2919 | return EROFS; | |
2920 | } | |
2921 | if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord) { | |
2922 | printf("hfs: can't make a plain hfs volume journaled.\n"); | |
2923 | return EINVAL; | |
2924 | } | |
2925 | ||
2926 | if (hfsmp->jnl) { | |
2927 | printf("hfs: volume @ mp %p is already journaled!\n", vnode_mount(vp)); | |
2928 | return EAGAIN; | |
2929 | } | |
2930 | vcb = HFSTOVCB(hfsmp); | |
2931 | ||
2932 | /* Set up local copies of the initialization info */ | |
2933 | tmpblkno = (uint32_t) name[1]; | |
2934 | jib_blkno = (uint64_t) tmpblkno; | |
2935 | journal_byte_offset = (uint64_t) name[2]; | |
2936 | journal_byte_offset *= hfsmp->blockSize; | |
2937 | journal_byte_offset += hfsmp->hfsPlusIOPosOffset; | |
2938 | journal_size = (uint64_t)((unsigned)name[3]); | |
2939 | ||
2940 | lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS, HFS_EXCLUSIVE_LOCK); | |
2941 | if (BTHasContiguousNodes(VTOF(vcb->catalogRefNum)) == 0 || | |
2942 | BTHasContiguousNodes(VTOF(vcb->extentsRefNum)) == 0) { | |
2943 | ||
2944 | printf("hfs: volume has a btree w/non-contiguous nodes. can not enable journaling.\n"); | |
2945 | hfs_systemfile_unlock(hfsmp, lockflags); | |
2946 | return EINVAL; | |
2947 | } | |
2948 | hfs_systemfile_unlock(hfsmp, lockflags); | |
2949 | ||
2950 | // make sure these both exist! | |
2951 | if ( GetFileInfo(vcb, kHFSRootFolderID, ".journal_info_block", &jinfo_attr, &jinfo_fork) == 0 | |
2952 | || GetFileInfo(vcb, kHFSRootFolderID, ".journal", &jnl_attr, &jnl_fork) == 0) { | |
2953 | ||
2954 | return EINVAL; | |
2955 | } | |
2956 | ||
2957 | /* | |
2958 | * At this point, we have a copy of the metadata that lives in the catalog for the | |
2959 | * journal info block. Compare that the journal info block's single extent matches | |
2960 | * that which was passed into this sysctl. | |
2961 | * | |
2962 | * If it is different, deny the journal enable call. | |
2963 | */ | |
2964 | if (jinfo_fork.cf_blocks > 1) { | |
2965 | /* too many blocks */ | |
2966 | return EINVAL; | |
2967 | } | |
2968 | ||
2969 | if (jinfo_fork.cf_extents[0].startBlock != jib_blkno) { | |
2970 | /* Wrong block */ | |
2971 | return EINVAL; | |
2972 | } | |
2973 | ||
2974 | /* | |
2975 | * We want to immediately purge the vnode for the JIB. | |
2976 | * | |
2977 | * Because it was written to from userland, there's probably | |
2978 | * a vnode somewhere in the vnode cache (possibly with UBC backed blocks). | |
2979 | * So we bring the vnode into core, then immediately do whatever | |
2980 | * we can to flush/vclean it out. This is because those blocks will be | |
2981 | * interpreted as user data, which may be treated separately on some platforms | |
2982 | * than metadata. If the vnode is gone, then there cannot be backing blocks | |
2983 | * in the UBC. | |
2984 | */ | |
2985 | if (hfs_vget (hfsmp, jinfo_attr.ca_fileid, &jib_vp, 1, 0)) { | |
2986 | return EINVAL; | |
2987 | } | |
2988 | /* | |
2989 | * Now we have a vnode for the JIB. recycle it. Because we hold an iocount | |
2990 | * on the vnode, we'll just mark it for termination when the last iocount | |
2991 | * (hopefully ours), is dropped. | |
2992 | */ | |
2993 | vnode_recycle (jib_vp); | |
2994 | err = vnode_put (jib_vp); | |
2995 | if (err) { | |
2996 | return EINVAL; | |
2997 | } | |
2998 | ||
2999 | /* Initialize the local copy of the JIB (just like hfs.util) */ | |
3000 | memset (&local_jib, 'Z', sizeof(struct JournalInfoBlock)); | |
3001 | local_jib.flags = SWAP_BE32(kJIJournalInFSMask); | |
3002 | /* Note that the JIB's offset is in bytes */ | |
3003 | local_jib.offset = SWAP_BE64(journal_byte_offset); | |
3004 | local_jib.size = SWAP_BE64(journal_size); | |
3005 | ||
3006 | /* | |
3007 | * Now write out the local JIB. This essentially overwrites the userland | |
3008 | * copy of the JIB. Read it as BLK_META to treat it as a metadata read/write. | |
3009 | */ | |
3010 | jib_buf = buf_getblk (hfsmp->hfs_devvp, | |
3011 | jib_blkno * (hfsmp->blockSize / hfsmp->hfs_logical_block_size), | |
3012 | hfsmp->blockSize, 0, 0, BLK_META); | |
3013 | char* buf_ptr = (char*) buf_dataptr (jib_buf); | |
3014 | ||
3015 | /* Zero out the portion of the block that won't contain JIB data */ | |
3016 | memset (buf_ptr, 0, hfsmp->blockSize); | |
3017 | ||
3018 | bcopy(&local_jib, buf_ptr, sizeof(local_jib)); | |
3019 | if (buf_bwrite (jib_buf)) { | |
3020 | return EIO; | |
3021 | } | |
3022 | ||
3023 | /* Force a flush track cache */ | |
3024 | (void) VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, context); | |
3025 | ||
3026 | ||
3027 | /* Now proceed with full volume sync */ | |
3028 | hfs_sync(hfsmp->hfs_mp, MNT_WAIT, context); | |
3029 | ||
3030 | printf("hfs: Initializing the journal (joffset 0x%llx sz 0x%llx)...\n", | |
3031 | (off_t)name[2], (off_t)name[3]); | |
3032 | ||
3033 | // | |
3034 | // XXXdbg - note that currently (Sept, 08) hfs_util does not support | |
3035 | // enabling the journal on a separate device so it is safe | |
3036 | // to just copy hfs_devvp here. If hfs_util gets the ability | |
3037 | // to dynamically enable the journal on a separate device then | |
3038 | // we will have to do the same thing as hfs_early_journal_init() | |
3039 | // to locate and open the journal device. | |
3040 | // | |
3041 | jvp = hfsmp->hfs_devvp; | |
3042 | jnl = journal_create(jvp, journal_byte_offset, journal_size, | |
3043 | hfsmp->hfs_devvp, | |
3044 | hfsmp->hfs_logical_block_size, | |
3045 | 0, | |
3046 | 0, | |
3047 | hfs_sync_metadata, hfsmp->hfs_mp, | |
3048 | hfsmp->hfs_mp); | |
3049 | ||
3050 | /* | |
3051 | * Set up the trim callback function so that we can add | |
3052 | * recently freed extents to the free extent cache once | |
3053 | * the transaction that freed them is written to the | |
3054 | * journal on disk. | |
3055 | */ | |
3056 | if (jnl) | |
3057 | journal_trim_set_callback(jnl, hfs_trim_callback, hfsmp); | |
3058 | ||
3059 | if (jnl == NULL) { | |
3060 | printf("hfs: FAILED to create the journal!\n"); | |
3061 | if (jvp && jvp != hfsmp->hfs_devvp) { | |
3062 | vnode_clearmountedon(jvp); | |
3063 | VNOP_CLOSE(jvp, hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE, vfs_context_kernel()); | |
3064 | } | |
3065 | jvp = NULL; | |
3066 | ||
3067 | return EINVAL; | |
3068 | } | |
3069 | ||
3070 | hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK); | |
3071 | ||
3072 | /* | |
3073 | * Flush all dirty metadata buffers. | |
3074 | */ | |
3075 | buf_flushdirtyblks(hfsmp->hfs_devvp, TRUE, 0, "hfs_sysctl"); | |
3076 | buf_flushdirtyblks(hfsmp->hfs_extents_vp, TRUE, 0, "hfs_sysctl"); | |
3077 | buf_flushdirtyblks(hfsmp->hfs_catalog_vp, TRUE, 0, "hfs_sysctl"); | |
3078 | buf_flushdirtyblks(hfsmp->hfs_allocation_vp, TRUE, 0, "hfs_sysctl"); | |
3079 | if (hfsmp->hfs_attribute_vp) | |
3080 | buf_flushdirtyblks(hfsmp->hfs_attribute_vp, TRUE, 0, "hfs_sysctl"); | |
3081 | ||
3082 | HFSTOVCB(hfsmp)->vcbJinfoBlock = name[1]; | |
3083 | HFSTOVCB(hfsmp)->vcbAtrb |= kHFSVolumeJournaledMask; | |
3084 | hfsmp->jvp = jvp; | |
3085 | hfsmp->jnl = jnl; | |
3086 | ||
3087 | // save this off for the hack-y check in hfs_remove() | |
3088 | hfsmp->jnl_start = (u_int32_t)name[2]; | |
3089 | hfsmp->jnl_size = (off_t)((unsigned)name[3]); | |
3090 | hfsmp->hfs_jnlinfoblkid = jinfo_attr.ca_fileid; | |
3091 | hfsmp->hfs_jnlfileid = jnl_attr.ca_fileid; | |
3092 | ||
3093 | vfs_setflags(hfsmp->hfs_mp, (u_int64_t)((unsigned int)MNT_JOURNALED)); | |
3094 | ||
3095 | hfs_unlock_global (hfsmp); | |
3096 | hfs_flushvolumeheader(hfsmp, MNT_WAIT, 1); | |
3097 | ||
3098 | { | |
3099 | fsid_t fsid; | |
3100 | ||
3101 | fsid.val[0] = (int32_t)hfsmp->hfs_raw_dev; | |
3102 | fsid.val[1] = (int32_t)vfs_typenum(HFSTOVFS(hfsmp)); | |
3103 | vfs_event_signal(&fsid, VQ_UPDATE, (intptr_t)NULL); | |
3104 | } | |
3105 | return 0; | |
3106 | } else if (name[0] == HFS_DISABLE_JOURNALING) { | |
3107 | // clear the journaling bit | |
3108 | vnode_t vp = vfs_context_cwd(context); | |
3109 | ||
3110 | /* Only root can disable journaling */ | |
3111 | if (!kauth_cred_issuser(kauth_cred_get())) { | |
3112 | return (EPERM); | |
3113 | } | |
3114 | if (vp == NULLVP) | |
3115 | return EINVAL; | |
3116 | ||
3117 | hfsmp = VTOHFS(vp); | |
3118 | ||
3119 | /* | |
3120 | * Disabling journaling is disallowed on volumes with directory hard links | |
3121 | * because we have not tested the relevant code path. | |
3122 | */ | |
3123 | if (hfsmp->hfs_private_attr[DIR_HARDLINKS].ca_entries != 0){ | |
3124 | printf("hfs: cannot disable journaling on volumes with directory hardlinks\n"); | |
3125 | return EPERM; | |
3126 | } | |
3127 | ||
3128 | printf("hfs: disabling journaling for mount @ %p\n", vnode_mount(vp)); | |
3129 | ||
3130 | hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK); | |
3131 | ||
3132 | // Lights out for you buddy! | |
3133 | journal_close(hfsmp->jnl); | |
3134 | hfsmp->jnl = NULL; | |
3135 | ||
3136 | if (hfsmp->jvp && hfsmp->jvp != hfsmp->hfs_devvp) { | |
3137 | vnode_clearmountedon(hfsmp->jvp); | |
3138 | VNOP_CLOSE(hfsmp->jvp, hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE, vfs_context_kernel()); | |
3139 | vnode_put(hfsmp->jvp); | |
3140 | } | |
3141 | hfsmp->jvp = NULL; | |
3142 | vfs_clearflags(hfsmp->hfs_mp, (u_int64_t)((unsigned int)MNT_JOURNALED)); | |
3143 | hfsmp->jnl_start = 0; | |
3144 | hfsmp->hfs_jnlinfoblkid = 0; | |
3145 | hfsmp->hfs_jnlfileid = 0; | |
3146 | ||
3147 | HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeJournaledMask; | |
3148 | ||
3149 | hfs_unlock_global (hfsmp); | |
3150 | ||
3151 | hfs_flushvolumeheader(hfsmp, MNT_WAIT, 1); | |
3152 | ||
3153 | { | |
3154 | fsid_t fsid; | |
3155 | ||
3156 | fsid.val[0] = (int32_t)hfsmp->hfs_raw_dev; | |
3157 | fsid.val[1] = (int32_t)vfs_typenum(HFSTOVFS(hfsmp)); | |
3158 | vfs_event_signal(&fsid, VQ_UPDATE, (intptr_t)NULL); | |
3159 | } | |
3160 | return 0; | |
3161 | } else if (name[0] == HFS_GET_JOURNAL_INFO) { | |
3162 | vnode_t vp = vfs_context_cwd(context); | |
3163 | off_t jnl_start, jnl_size; | |
3164 | ||
3165 | if (vp == NULLVP) | |
3166 | return EINVAL; | |
3167 | ||
3168 | /* 64-bit processes won't work with this sysctl -- can't fit a pointer into an int! */ | |
3169 | if (proc_is64bit(current_proc())) | |
3170 | return EINVAL; | |
3171 | ||
3172 | hfsmp = VTOHFS(vp); | |
3173 | if (hfsmp->jnl == NULL) { | |
3174 | jnl_start = 0; | |
3175 | jnl_size = 0; | |
3176 | } else { | |
3177 | jnl_start = (off_t)(hfsmp->jnl_start * HFSTOVCB(hfsmp)->blockSize) + (off_t)HFSTOVCB(hfsmp)->hfsPlusIOPosOffset; | |
3178 | jnl_size = (off_t)hfsmp->jnl_size; | |
3179 | } | |
3180 | ||
3181 | if ((error = copyout((caddr_t)&jnl_start, CAST_USER_ADDR_T(name[1]), sizeof(off_t))) != 0) { | |
3182 | return error; | |
3183 | } | |
3184 | if ((error = copyout((caddr_t)&jnl_size, CAST_USER_ADDR_T(name[2]), sizeof(off_t))) != 0) { | |
3185 | return error; | |
3186 | } | |
3187 | ||
3188 | return 0; | |
3189 | } else if (name[0] == HFS_SET_PKG_EXTENSIONS) { | |
3190 | ||
3191 | return set_package_extensions_table((user_addr_t)((unsigned)name[1]), name[2], name[3]); | |
3192 | ||
3193 | } else if (name[0] == VFS_CTL_QUERY) { | |
3194 | struct sysctl_req *req; | |
3195 | union union_vfsidctl vc; | |
3196 | struct mount *mp; | |
3197 | struct vfsquery vq; | |
3198 | ||
3199 | req = CAST_DOWN(struct sysctl_req *, oldp); /* we're new style vfs sysctl. */ | |
3200 | if (req == NULL) { | |
3201 | return EFAULT; | |
3202 | } | |
3203 | ||
3204 | error = SYSCTL_IN(req, &vc, proc_is64bit(p)? sizeof(vc.vc64):sizeof(vc.vc32)); | |
3205 | if (error) return (error); | |
3206 | ||
3207 | mp = vfs_getvfs(&vc.vc32.vc_fsid); /* works for 32 and 64 */ | |
3208 | if (mp == NULL) return (ENOENT); | |
3209 | ||
3210 | hfsmp = VFSTOHFS(mp); | |
3211 | bzero(&vq, sizeof(vq)); | |
3212 | vq.vq_flags = hfsmp->hfs_notification_conditions; | |
3213 | return SYSCTL_OUT(req, &vq, sizeof(vq));; | |
3214 | } else if (name[0] == HFS_REPLAY_JOURNAL) { | |
3215 | vnode_t devvp = NULL; | |
3216 | int device_fd; | |
3217 | if (namelen != 2) { | |
3218 | return (EINVAL); | |
3219 | } | |
3220 | device_fd = name[1]; | |
3221 | error = file_vnode(device_fd, &devvp); | |
3222 | if (error) { | |
3223 | return error; | |
3224 | } | |
3225 | error = vnode_getwithref(devvp); | |
3226 | if (error) { | |
3227 | file_drop(device_fd); | |
3228 | return error; | |
3229 | } | |
3230 | error = hfs_journal_replay(devvp, context); | |
3231 | file_drop(device_fd); | |
3232 | vnode_put(devvp); | |
3233 | return error; | |
3234 | } else if (name[0] == HFS_ENABLE_RESIZE_DEBUG) { | |
3235 | hfs_resize_debug = 1; | |
3236 | printf ("hfs_sysctl: Enabled volume resize debugging.\n"); | |
3237 | return 0; | |
3238 | } | |
3239 | ||
3240 | return (ENOTSUP); | |
3241 | } | |
3242 | ||
3243 | /* | |
3244 | * hfs_vfs_vget is not static since it is used in hfs_readwrite.c to support | |
3245 | * the build_path ioctl. We use it to leverage the code below that updates | |
3246 | * the origin list cache if necessary | |
3247 | */ | |
3248 | ||
3249 | int | |
3250 | hfs_vfs_vget(struct mount *mp, ino64_t ino, struct vnode **vpp, __unused vfs_context_t context) | |
3251 | { | |
3252 | int error; | |
3253 | int lockflags; | |
3254 | struct hfsmount *hfsmp; | |
3255 | ||
3256 | hfsmp = VFSTOHFS(mp); | |
3257 | ||
3258 | error = hfs_vget(hfsmp, (cnid_t)ino, vpp, 1, 0); | |
3259 | if (error) | |
3260 | return (error); | |
3261 | ||
3262 | /* | |
3263 | * ADLs may need to have their origin state updated | |
3264 | * since build_path needs a valid parent. The same is true | |
3265 | * for hardlinked files as well. There isn't a race window here | |
3266 | * in re-acquiring the cnode lock since we aren't pulling any data | |
3267 | * out of the cnode; instead, we're going to the catalog. | |
3268 | */ | |
3269 | if ((VTOC(*vpp)->c_flag & C_HARDLINK) && | |
3270 | (hfs_lock(VTOC(*vpp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) == 0)) { | |
3271 | cnode_t *cp = VTOC(*vpp); | |
3272 | struct cat_desc cdesc; | |
3273 | ||
3274 | if (!hfs_haslinkorigin(cp)) { | |
3275 | lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); | |
3276 | error = cat_findname(hfsmp, (cnid_t)ino, &cdesc); | |
3277 | hfs_systemfile_unlock(hfsmp, lockflags); | |
3278 | if (error == 0) { | |
3279 | if ((cdesc.cd_parentcnid != hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) && | |
3280 | (cdesc.cd_parentcnid != hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid)) { | |
3281 | hfs_savelinkorigin(cp, cdesc.cd_parentcnid); | |
3282 | } | |
3283 | cat_releasedesc(&cdesc); | |
3284 | } | |
3285 | } | |
3286 | hfs_unlock(cp); | |
3287 | } | |
3288 | return (0); | |
3289 | } | |
3290 | ||
3291 | ||
3292 | /* | |
3293 | * Look up an HFS object by ID. | |
3294 | * | |
3295 | * The object is returned with an iocount reference and the cnode locked. | |
3296 | * | |
3297 | * If the object is a file then it will represent the data fork. | |
3298 | */ | |
3299 | int | |
3300 | hfs_vget(struct hfsmount *hfsmp, cnid_t cnid, struct vnode **vpp, int skiplock, int allow_deleted) | |
3301 | { | |
3302 | struct vnode *vp = NULLVP; | |
3303 | struct cat_desc cndesc; | |
3304 | struct cat_attr cnattr; | |
3305 | struct cat_fork cnfork; | |
3306 | u_int32_t linkref = 0; | |
3307 | int error; | |
3308 | ||
3309 | /* Check for cnids that should't be exported. */ | |
3310 | if ((cnid < kHFSFirstUserCatalogNodeID) && | |
3311 | (cnid != kHFSRootFolderID && cnid != kHFSRootParentID)) { | |
3312 | return (ENOENT); | |
3313 | } | |
3314 | /* Don't export our private directories. */ | |
3315 | if (cnid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid || | |
3316 | cnid == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) { | |
3317 | return (ENOENT); | |
3318 | } | |
3319 | /* | |
3320 | * Check the hash first | |
3321 | */ | |
3322 | vp = hfs_chash_getvnode(hfsmp, cnid, 0, skiplock, allow_deleted); | |
3323 | if (vp) { | |
3324 | *vpp = vp; | |
3325 | return(0); | |
3326 | } | |
3327 | ||
3328 | bzero(&cndesc, sizeof(cndesc)); | |
3329 | bzero(&cnattr, sizeof(cnattr)); | |
3330 | bzero(&cnfork, sizeof(cnfork)); | |
3331 | ||
3332 | /* | |
3333 | * Not in hash, lookup in catalog | |
3334 | */ | |
3335 | if (cnid == kHFSRootParentID) { | |
3336 | static char hfs_rootname[] = "/"; | |
3337 | ||
3338 | cndesc.cd_nameptr = (const u_int8_t *)&hfs_rootname[0]; | |
3339 | cndesc.cd_namelen = 1; | |
3340 | cndesc.cd_parentcnid = kHFSRootParentID; | |
3341 | cndesc.cd_cnid = kHFSRootFolderID; | |
3342 | cndesc.cd_flags = CD_ISDIR; | |
3343 | ||
3344 | cnattr.ca_fileid = kHFSRootFolderID; | |
3345 | cnattr.ca_linkcount = 1; | |
3346 | cnattr.ca_entries = 1; | |
3347 | cnattr.ca_dircount = 1; | |
3348 | cnattr.ca_mode = (S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO); | |
3349 | } else { | |
3350 | int lockflags; | |
3351 | cnid_t pid; | |
3352 | const char *nameptr; | |
3353 | ||
3354 | lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); | |
3355 | error = cat_idlookup(hfsmp, cnid, 0, 0, &cndesc, &cnattr, &cnfork); | |
3356 | hfs_systemfile_unlock(hfsmp, lockflags); | |
3357 | ||
3358 | if (error) { | |
3359 | *vpp = NULL; | |
3360 | return (error); | |
3361 | } | |
3362 | ||
3363 | /* | |
3364 | * Check for a raw hardlink inode and save its linkref. | |
3365 | */ | |
3366 | pid = cndesc.cd_parentcnid; | |
3367 | nameptr = (const char *)cndesc.cd_nameptr; | |
3368 | ||
3369 | if ((pid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) && | |
3370 | (bcmp(nameptr, HFS_INODE_PREFIX, HFS_INODE_PREFIX_LEN) == 0)) { | |
3371 | linkref = strtoul(&nameptr[HFS_INODE_PREFIX_LEN], NULL, 10); | |
3372 | ||
3373 | } else if ((pid == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) && | |
3374 | (bcmp(nameptr, HFS_DIRINODE_PREFIX, HFS_DIRINODE_PREFIX_LEN) == 0)) { | |
3375 | linkref = strtoul(&nameptr[HFS_DIRINODE_PREFIX_LEN], NULL, 10); | |
3376 | ||
3377 | } else if ((pid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) && | |
3378 | (bcmp(nameptr, HFS_DELETE_PREFIX, HFS_DELETE_PREFIX_LEN) == 0)) { | |
3379 | *vpp = NULL; | |
3380 | cat_releasedesc(&cndesc); | |
3381 | return (ENOENT); /* open unlinked file */ | |
3382 | } | |
3383 | } | |
3384 | ||
3385 | /* | |
3386 | * Finish initializing cnode descriptor for hardlinks. | |
3387 | * | |
3388 | * We need a valid name and parent for reverse lookups. | |
3389 | */ | |
3390 | if (linkref) { | |
3391 | cnid_t lastid; | |
3392 | struct cat_desc linkdesc; | |
3393 | int linkerr = 0; | |
3394 | ||
3395 | cnattr.ca_linkref = linkref; | |
3396 | bzero (&linkdesc, sizeof (linkdesc)); | |
3397 | ||
3398 | /* | |
3399 | * If the caller supplied the raw inode value, then we don't know exactly | |
3400 | * which hardlink they wanted. It's likely that they acquired the raw inode | |
3401 | * value BEFORE the item became a hardlink, in which case, they probably | |
3402 | * want the oldest link. So request the oldest link from the catalog. | |
3403 | * | |
3404 | * Unfortunately, this requires that we iterate through all N hardlinks. On the plus | |
3405 | * side, since we know that we want the last linkID, we can also have this one | |
3406 | * call give us back the name of the last ID, since it's going to have it in-hand... | |
3407 | */ | |
3408 | linkerr = hfs_lookup_lastlink (hfsmp, linkref, &lastid, &linkdesc); | |
3409 | if ((linkerr == 0) && (lastid != 0)) { | |
3410 | /* | |
3411 | * Release any lingering buffers attached to our local descriptor. | |
3412 | * Then copy the name and other business into the cndesc | |
3413 | */ | |
3414 | cat_releasedesc (&cndesc); | |
3415 | bcopy (&linkdesc, &cndesc, sizeof(linkdesc)); | |
3416 | } | |
3417 | /* If it failed, the linkref code will just use whatever it had in-hand below. */ | |
3418 | } | |
3419 | ||
3420 | if (linkref) { | |
3421 | int newvnode_flags = 0; | |
3422 | ||
3423 | error = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, | |
3424 | &cnfork, &vp, &newvnode_flags); | |
3425 | if (error == 0) { | |
3426 | VTOC(vp)->c_flag |= C_HARDLINK; | |
3427 | vnode_setmultipath(vp); | |
3428 | } | |
3429 | } else { | |
3430 | struct componentname cn; | |
3431 | int newvnode_flags = 0; | |
3432 | ||
3433 | /* Supply hfs_getnewvnode with a component name. */ | |
3434 | MALLOC_ZONE(cn.cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); | |
3435 | cn.cn_nameiop = LOOKUP; | |
3436 | cn.cn_flags = ISLASTCN | HASBUF; | |
3437 | cn.cn_context = NULL; | |
3438 | cn.cn_pnlen = MAXPATHLEN; | |
3439 | cn.cn_nameptr = cn.cn_pnbuf; | |
3440 | cn.cn_namelen = cndesc.cd_namelen; | |
3441 | cn.cn_hash = 0; | |
3442 | cn.cn_consume = 0; | |
3443 | bcopy(cndesc.cd_nameptr, cn.cn_nameptr, cndesc.cd_namelen + 1); | |
3444 | ||
3445 | error = hfs_getnewvnode(hfsmp, NULLVP, &cn, &cndesc, 0, &cnattr, | |
3446 | &cnfork, &vp, &newvnode_flags); | |
3447 | ||
3448 | if (error == 0 && (VTOC(vp)->c_flag & C_HARDLINK)) { | |
3449 | hfs_savelinkorigin(VTOC(vp), cndesc.cd_parentcnid); | |
3450 | } | |
3451 | FREE_ZONE(cn.cn_pnbuf, cn.cn_pnlen, M_NAMEI); | |
3452 | } | |
3453 | cat_releasedesc(&cndesc); | |
3454 | ||
3455 | *vpp = vp; | |
3456 | if (vp && skiplock) { | |
3457 | hfs_unlock(VTOC(vp)); | |
3458 | } | |
3459 | return (error); | |
3460 | } | |
3461 | ||
3462 | ||
3463 | /* | |
3464 | * Flush out all the files in a filesystem. | |
3465 | */ | |
3466 | static int | |
3467 | #if QUOTA | |
3468 | hfs_flushfiles(struct mount *mp, int flags, struct proc *p) | |
3469 | #else | |
3470 | hfs_flushfiles(struct mount *mp, int flags, __unused struct proc *p) | |
3471 | #endif /* QUOTA */ | |
3472 | { | |
3473 | struct hfsmount *hfsmp; | |
3474 | struct vnode *skipvp = NULLVP; | |
3475 | int error; | |
3476 | int accounted_root_usecounts; | |
3477 | #if QUOTA | |
3478 | int i; | |
3479 | #endif | |
3480 | ||
3481 | hfsmp = VFSTOHFS(mp); | |
3482 | ||
3483 | accounted_root_usecounts = 0; | |
3484 | #if QUOTA | |
3485 | /* | |
3486 | * The open quota files have an indirect reference on | |
3487 | * the root directory vnode. We must account for this | |
3488 | * extra reference when doing the intial vflush. | |
3489 | */ | |
3490 | if (((unsigned int)vfs_flags(mp)) & MNT_QUOTA) { | |
3491 | /* Find out how many quota files we have open. */ | |
3492 | for (i = 0; i < MAXQUOTAS; i++) { | |
3493 | if (hfsmp->hfs_qfiles[i].qf_vp != NULLVP) | |
3494 | ++accounted_root_usecounts; | |
3495 | } | |
3496 | } | |
3497 | #endif /* QUOTA */ | |
3498 | ||
3499 | if (accounted_root_usecounts > 0) { | |
3500 | /* Obtain the root vnode so we can skip over it. */ | |
3501 | skipvp = hfs_chash_getvnode(hfsmp, kHFSRootFolderID, 0, 0, 0); | |
3502 | } | |
3503 | ||
3504 | error = vflush(mp, skipvp, SKIPSYSTEM | SKIPSWAP | flags); | |
3505 | if (error != 0) | |
3506 | return(error); | |
3507 | ||
3508 | error = vflush(mp, skipvp, SKIPSYSTEM | flags); | |
3509 | ||
3510 | if (skipvp) { | |
3511 | /* | |
3512 | * See if there are additional references on the | |
3513 | * root vp besides the ones obtained from the open | |
3514 | * quota files and CoreStorage. | |
3515 | */ | |
3516 | if ((error == 0) && | |
3517 | (vnode_isinuse(skipvp, accounted_root_usecounts))) { | |
3518 | error = EBUSY; /* root directory is still open */ | |
3519 | } | |
3520 | hfs_unlock(VTOC(skipvp)); | |
3521 | /* release the iocount from the hfs_chash_getvnode call above. */ | |
3522 | vnode_put(skipvp); | |
3523 | } | |
3524 | if (error && (flags & FORCECLOSE) == 0) | |
3525 | return (error); | |
3526 | ||
3527 | #if QUOTA | |
3528 | if (((unsigned int)vfs_flags(mp)) & MNT_QUOTA) { | |
3529 | for (i = 0; i < MAXQUOTAS; i++) { | |
3530 | if (hfsmp->hfs_qfiles[i].qf_vp == NULLVP) | |
3531 | continue; | |
3532 | hfs_quotaoff(p, mp, i); | |
3533 | } | |
3534 | } | |
3535 | #endif /* QUOTA */ | |
3536 | ||
3537 | if (skipvp) { | |
3538 | error = vflush(mp, NULLVP, SKIPSYSTEM | flags); | |
3539 | } | |
3540 | ||
3541 | return (error); | |
3542 | } | |
3543 | ||
3544 | /* | |
3545 | * Update volume encoding bitmap (HFS Plus only) | |
3546 | * | |
3547 | * Mark a legacy text encoding as in-use (as needed) | |
3548 | * in the volume header of this HFS+ filesystem. | |
3549 | */ | |
3550 | __private_extern__ | |
3551 | void | |
3552 | hfs_setencodingbits(struct hfsmount *hfsmp, u_int32_t encoding) | |
3553 | { | |
3554 | #define kIndexMacUkrainian 48 /* MacUkrainian encoding is 152 */ | |
3555 | #define kIndexMacFarsi 49 /* MacFarsi encoding is 140 */ | |
3556 | ||
3557 | u_int32_t index; | |
3558 | ||
3559 | switch (encoding) { | |
3560 | case kTextEncodingMacUkrainian: | |
3561 | index = kIndexMacUkrainian; | |
3562 | break; | |
3563 | case kTextEncodingMacFarsi: | |
3564 | index = kIndexMacFarsi; | |
3565 | break; | |
3566 | default: | |
3567 | index = encoding; | |
3568 | break; | |
3569 | } | |
3570 | ||
3571 | /* Only mark the encoding as in-use if it wasn't already set */ | |
3572 | if (index < 64 && (hfsmp->encodingsBitmap & (u_int64_t)(1ULL << index)) == 0) { | |
3573 | hfs_lock_mount (hfsmp); | |
3574 | hfsmp->encodingsBitmap |= (u_int64_t)(1ULL << index); | |
3575 | MarkVCBDirty(hfsmp); | |
3576 | hfs_unlock_mount(hfsmp); | |
3577 | } | |
3578 | } | |
3579 | ||
3580 | /* | |
3581 | * Update volume stats | |
3582 | * | |
3583 | * On journal volumes this will cause a volume header flush | |
3584 | */ | |
3585 | int | |
3586 | hfs_volupdate(struct hfsmount *hfsmp, enum volop op, int inroot) | |
3587 | { | |
3588 | struct timeval tv; | |
3589 | ||
3590 | microtime(&tv); | |
3591 | ||
3592 | hfs_lock_mount (hfsmp); | |
3593 | ||
3594 | MarkVCBDirty(hfsmp); | |
3595 | hfsmp->hfs_mtime = tv.tv_sec; | |
3596 | ||
3597 | switch (op) { | |
3598 | case VOL_UPDATE: | |
3599 | break; | |
3600 | case VOL_MKDIR: | |
3601 | if (hfsmp->hfs_dircount != 0xFFFFFFFF) | |
3602 | ++hfsmp->hfs_dircount; | |
3603 | if (inroot && hfsmp->vcbNmRtDirs != 0xFFFF) | |
3604 | ++hfsmp->vcbNmRtDirs; | |
3605 | break; | |
3606 | case VOL_RMDIR: | |
3607 | if (hfsmp->hfs_dircount != 0) | |
3608 | --hfsmp->hfs_dircount; | |
3609 | if (inroot && hfsmp->vcbNmRtDirs != 0xFFFF) | |
3610 | --hfsmp->vcbNmRtDirs; | |
3611 | break; | |
3612 | case VOL_MKFILE: | |
3613 | if (hfsmp->hfs_filecount != 0xFFFFFFFF) | |
3614 | ++hfsmp->hfs_filecount; | |
3615 | if (inroot && hfsmp->vcbNmFls != 0xFFFF) | |
3616 | ++hfsmp->vcbNmFls; | |
3617 | break; | |
3618 | case VOL_RMFILE: | |
3619 | if (hfsmp->hfs_filecount != 0) | |
3620 | --hfsmp->hfs_filecount; | |
3621 | if (inroot && hfsmp->vcbNmFls != 0xFFFF) | |
3622 | --hfsmp->vcbNmFls; | |
3623 | break; | |
3624 | } | |
3625 | ||
3626 | hfs_unlock_mount (hfsmp); | |
3627 | ||
3628 | if (hfsmp->jnl) { | |
3629 | hfs_flushvolumeheader(hfsmp, 0, 0); | |
3630 | } | |
3631 | ||
3632 | return (0); | |
3633 | } | |
3634 | ||
3635 | ||
3636 | #if CONFIG_HFS_STD | |
3637 | /* HFS Standard MDB flush */ | |
3638 | static int | |
3639 | hfs_flushMDB(struct hfsmount *hfsmp, int waitfor, int altflush) | |
3640 | { | |
3641 | ExtendedVCB *vcb = HFSTOVCB(hfsmp); | |
3642 | struct filefork *fp; | |
3643 | HFSMasterDirectoryBlock *mdb; | |
3644 | struct buf *bp = NULL; | |
3645 | int retval; | |
3646 | int sector_size; | |
3647 | ByteCount namelen; | |
3648 | ||
3649 | sector_size = hfsmp->hfs_logical_block_size; | |
3650 | retval = (int)buf_bread(hfsmp->hfs_devvp, (daddr64_t)HFS_PRI_SECTOR(sector_size), sector_size, NOCRED, &bp); | |
3651 | if (retval) { | |
3652 | if (bp) | |
3653 | buf_brelse(bp); | |
3654 | return retval; | |
3655 | } | |
3656 | ||
3657 | hfs_lock_mount (hfsmp); | |
3658 | ||
3659 | mdb = (HFSMasterDirectoryBlock *)(buf_dataptr(bp) + HFS_PRI_OFFSET(sector_size)); | |
3660 | ||
3661 | mdb->drCrDate = SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->hfs_itime))); | |
3662 | mdb->drLsMod = SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->vcbLsMod))); | |
3663 | mdb->drAtrb = SWAP_BE16 (vcb->vcbAtrb); | |
3664 | mdb->drNmFls = SWAP_BE16 (vcb->vcbNmFls); | |
3665 | mdb->drAllocPtr = SWAP_BE16 (vcb->nextAllocation); | |
3666 | mdb->drClpSiz = SWAP_BE32 (vcb->vcbClpSiz); | |
3667 | mdb->drNxtCNID = SWAP_BE32 (vcb->vcbNxtCNID); | |
3668 | mdb->drFreeBks = SWAP_BE16 (vcb->freeBlocks); | |
3669 | ||
3670 | namelen = strlen((char *)vcb->vcbVN); | |
3671 | retval = utf8_to_hfs(vcb, namelen, vcb->vcbVN, mdb->drVN); | |
3672 | /* Retry with MacRoman in case that's how it was exported. */ | |
3673 | if (retval) | |
3674 | retval = utf8_to_mac_roman(namelen, vcb->vcbVN, mdb->drVN); | |
3675 | ||
3676 | mdb->drVolBkUp = SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->vcbVolBkUp))); | |
3677 | mdb->drWrCnt = SWAP_BE32 (vcb->vcbWrCnt); | |
3678 | mdb->drNmRtDirs = SWAP_BE16 (vcb->vcbNmRtDirs); | |
3679 | mdb->drFilCnt = SWAP_BE32 (vcb->vcbFilCnt); | |
3680 | mdb->drDirCnt = SWAP_BE32 (vcb->vcbDirCnt); | |
3681 | ||
3682 | bcopy(vcb->vcbFndrInfo, mdb->drFndrInfo, sizeof(mdb->drFndrInfo)); | |
3683 | ||
3684 | fp = VTOF(vcb->extentsRefNum); | |
3685 | mdb->drXTExtRec[0].startBlock = SWAP_BE16 (fp->ff_extents[0].startBlock); | |
3686 | mdb->drXTExtRec[0].blockCount = SWAP_BE16 (fp->ff_extents[0].blockCount); | |
3687 | mdb->drXTExtRec[1].startBlock = SWAP_BE16 (fp->ff_extents[1].startBlock); | |
3688 | mdb->drXTExtRec[1].blockCount = SWAP_BE16 (fp->ff_extents[1].blockCount); | |
3689 | mdb->drXTExtRec[2].startBlock = SWAP_BE16 (fp->ff_extents[2].startBlock); | |
3690 | mdb->drXTExtRec[2].blockCount = SWAP_BE16 (fp->ff_extents[2].blockCount); | |
3691 | mdb->drXTFlSize = SWAP_BE32 (fp->ff_blocks * vcb->blockSize); | |
3692 | mdb->drXTClpSiz = SWAP_BE32 (fp->ff_clumpsize); | |
3693 | FTOC(fp)->c_flag &= ~C_MODIFIED; | |
3694 | ||
3695 | fp = VTOF(vcb->catalogRefNum); | |
3696 | mdb->drCTExtRec[0].startBlock = SWAP_BE16 (fp->ff_extents[0].startBlock); | |
3697 | mdb->drCTExtRec[0].blockCount = SWAP_BE16 (fp->ff_extents[0].blockCount); | |
3698 | mdb->drCTExtRec[1].startBlock = SWAP_BE16 (fp->ff_extents[1].startBlock); | |
3699 | mdb->drCTExtRec[1].blockCount = SWAP_BE16 (fp->ff_extents[1].blockCount); | |
3700 | mdb->drCTExtRec[2].startBlock = SWAP_BE16 (fp->ff_extents[2].startBlock); | |
3701 | mdb->drCTExtRec[2].blockCount = SWAP_BE16 (fp->ff_extents[2].blockCount); | |
3702 | mdb->drCTFlSize = SWAP_BE32 (fp->ff_blocks * vcb->blockSize); | |
3703 | mdb->drCTClpSiz = SWAP_BE32 (fp->ff_clumpsize); | |
3704 | FTOC(fp)->c_flag &= ~C_MODIFIED; | |
3705 | ||
3706 | MarkVCBClean( vcb ); | |
3707 | ||
3708 | hfs_unlock_mount (hfsmp); | |
3709 | ||
3710 | /* If requested, flush out the alternate MDB */ | |
3711 | if (altflush) { | |
3712 | struct buf *alt_bp = NULL; | |
3713 | ||
3714 | if (buf_meta_bread(hfsmp->hfs_devvp, hfsmp->hfs_partition_avh_sector, sector_size, NOCRED, &alt_bp) == 0) { | |
3715 | bcopy(mdb, (char *)buf_dataptr(alt_bp) + HFS_ALT_OFFSET(sector_size), kMDBSize); | |
3716 | ||
3717 | (void) VNOP_BWRITE(alt_bp); | |
3718 | } else if (alt_bp) | |
3719 | buf_brelse(alt_bp); | |
3720 | } | |
3721 | ||
3722 | if (waitfor != MNT_WAIT) | |
3723 | buf_bawrite(bp); | |
3724 | else | |
3725 | retval = VNOP_BWRITE(bp); | |
3726 | ||
3727 | return (retval); | |
3728 | } | |
3729 | #endif | |
3730 | ||
3731 | /* | |
3732 | * Flush any dirty in-memory mount data to the on-disk | |
3733 | * volume header. | |
3734 | * | |
3735 | * Note: the on-disk volume signature is intentionally | |
3736 | * not flushed since the on-disk "H+" and "HX" signatures | |
3737 | * are always stored in-memory as "H+". | |
3738 | */ | |
3739 | int | |
3740 | hfs_flushvolumeheader(struct hfsmount *hfsmp, int waitfor, int altflush) | |
3741 | { | |
3742 | ExtendedVCB *vcb = HFSTOVCB(hfsmp); | |
3743 | struct filefork *fp; | |
3744 | HFSPlusVolumeHeader *volumeHeader, *altVH; | |
3745 | int retval; | |
3746 | struct buf *bp, *alt_bp; | |
3747 | int i; | |
3748 | daddr64_t priIDSector; | |
3749 | int critical; | |
3750 | u_int16_t signature; | |
3751 | u_int16_t hfsversion; | |
3752 | daddr64_t avh_sector; | |
3753 | ||
3754 | if (hfsmp->hfs_flags & HFS_READ_ONLY) { | |
3755 | return(0); | |
3756 | } | |
3757 | #if CONFIG_HFS_STD | |
3758 | if (hfsmp->hfs_flags & HFS_STANDARD) { | |
3759 | return hfs_flushMDB(hfsmp, waitfor, altflush); | |
3760 | } | |
3761 | #endif | |
3762 | critical = altflush; | |
3763 | priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) + | |
3764 | HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size)); | |
3765 | ||
3766 | if (hfs_start_transaction(hfsmp) != 0) { | |
3767 | return EINVAL; | |
3768 | } | |
3769 | ||
3770 | bp = NULL; | |
3771 | alt_bp = NULL; | |
3772 | ||
3773 | retval = (int)buf_meta_bread(hfsmp->hfs_devvp, | |
3774 | HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys), | |
3775 | hfsmp->hfs_physical_block_size, NOCRED, &bp); | |
3776 | if (retval) { | |
3777 | printf("hfs: err %d reading VH blk (vol=%s)\n", retval, vcb->vcbVN); | |
3778 | goto err_exit; | |
3779 | } | |
3780 | ||
3781 | volumeHeader = (HFSPlusVolumeHeader *)((char *)buf_dataptr(bp) + | |
3782 | HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size)); | |
3783 | ||
3784 | /* | |
3785 | * Sanity check what we just read. If it's bad, try the alternate | |
3786 | * instead. | |
3787 | */ | |
3788 | signature = SWAP_BE16 (volumeHeader->signature); | |
3789 | hfsversion = SWAP_BE16 (volumeHeader->version); | |
3790 | if ((signature != kHFSPlusSigWord && signature != kHFSXSigWord) || | |
3791 | (hfsversion < kHFSPlusVersion) || (hfsversion > 100) || | |
3792 | (SWAP_BE32 (volumeHeader->blockSize) != vcb->blockSize)) { | |
3793 | printf("hfs: corrupt VH on %s, sig 0x%04x, ver %d, blksize %d\n", | |
3794 | vcb->vcbVN, signature, hfsversion, | |
3795 | SWAP_BE32 (volumeHeader->blockSize)); | |
3796 | hfs_mark_inconsistent(hfsmp, HFS_INCONSISTENCY_DETECTED); | |
3797 | ||
3798 | /* Almost always we read AVH relative to the partition size */ | |
3799 | avh_sector = hfsmp->hfs_partition_avh_sector; | |
3800 | ||
3801 | if (hfsmp->hfs_partition_avh_sector != hfsmp->hfs_fs_avh_sector) { | |
3802 | /* | |
3803 | * The two altVH offsets do not match --- which means that a smaller file | |
3804 | * system exists in a larger partition. Verify that we have the correct | |
3805 | * alternate volume header sector as per the current parititon size. | |
3806 | * The GPT device that we are mounted on top could have changed sizes | |
3807 | * without us knowing. | |
3808 | * | |
3809 | * We're in a transaction, so it's safe to modify the partition_avh_sector | |
3810 | * field if necessary. | |
3811 | */ | |
3812 | ||
3813 | uint64_t sector_count; | |
3814 | ||
3815 | /* Get underlying device block count */ | |
3816 | if ((retval = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCGETBLOCKCOUNT, | |
3817 | (caddr_t)§or_count, 0, vfs_context_current()))) { | |
3818 | printf("hfs_flushVH: err %d getting block count (%s) \n", retval, vcb->vcbVN); | |
3819 | retval = ENXIO; | |
3820 | goto err_exit; | |
3821 | } | |
3822 | ||
3823 | /* Partition size was changed without our knowledge */ | |
3824 | if (sector_count != (uint64_t)hfsmp->hfs_logical_block_count) { | |
3825 | hfsmp->hfs_partition_avh_sector = (hfsmp->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) + | |
3826 | HFS_ALT_SECTOR(hfsmp->hfs_logical_block_size, sector_count); | |
3827 | /* Note: hfs_fs_avh_sector will remain unchanged */ | |
3828 | printf ("hfs_flushVH: partition size changed, partition_avh_sector=%qu, fs_avh_sector=%qu\n", | |
3829 | hfsmp->hfs_partition_avh_sector, hfsmp->hfs_fs_avh_sector); | |
3830 | ||
3831 | /* | |
3832 | * We just updated the offset for AVH relative to | |
3833 | * the partition size, so the content of that AVH | |
3834 | * will be invalid. But since we are also maintaining | |
3835 | * a valid AVH relative to the file system size, we | |
3836 | * can read it since primary VH and partition AVH | |
3837 | * are not valid. | |
3838 | */ | |
3839 | avh_sector = hfsmp->hfs_fs_avh_sector; | |
3840 | } | |
3841 | } | |
3842 | ||
3843 | printf ("hfs: trying alternate (for %s) avh_sector=%qu\n", | |
3844 | (avh_sector == hfsmp->hfs_fs_avh_sector) ? "file system" : "partition", avh_sector); | |
3845 | ||
3846 | if (avh_sector) { | |
3847 | retval = buf_meta_bread(hfsmp->hfs_devvp, | |
3848 | HFS_PHYSBLK_ROUNDDOWN(avh_sector, hfsmp->hfs_log_per_phys), | |
3849 | hfsmp->hfs_physical_block_size, NOCRED, &alt_bp); | |
3850 | if (retval) { | |
3851 | printf("hfs: err %d reading alternate VH (%s)\n", retval, vcb->vcbVN); | |
3852 | goto err_exit; | |
3853 | } | |
3854 | ||
3855 | altVH = (HFSPlusVolumeHeader *)((char *)buf_dataptr(alt_bp) + | |
3856 | HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size)); | |
3857 | signature = SWAP_BE16(altVH->signature); | |
3858 | hfsversion = SWAP_BE16(altVH->version); | |
3859 | ||
3860 | if ((signature != kHFSPlusSigWord && signature != kHFSXSigWord) || | |
3861 | (hfsversion < kHFSPlusVersion) || (kHFSPlusVersion > 100) || | |
3862 | (SWAP_BE32(altVH->blockSize) != vcb->blockSize)) { | |
3863 | printf("hfs: corrupt alternate VH on %s, sig 0x%04x, ver %d, blksize %d\n", | |
3864 | vcb->vcbVN, signature, hfsversion, | |
3865 | SWAP_BE32(altVH->blockSize)); | |
3866 | retval = EIO; | |
3867 | goto err_exit; | |
3868 | } | |
3869 | ||
3870 | /* The alternate is plausible, so use it. */ | |
3871 | bcopy(altVH, volumeHeader, kMDBSize); | |
3872 | buf_brelse(alt_bp); | |
3873 | alt_bp = NULL; | |
3874 | } else { | |
3875 | /* No alternate VH, nothing more we can do. */ | |
3876 | retval = EIO; | |
3877 | goto err_exit; | |
3878 | } | |
3879 | } | |
3880 | ||
3881 | if (hfsmp->jnl) { | |
3882 | journal_modify_block_start(hfsmp->jnl, bp); | |
3883 | } | |
3884 | ||
3885 | /* | |
3886 | * For embedded HFS+ volumes, update create date if it changed | |
3887 | * (ie from a setattrlist call) | |
3888 | */ | |
3889 | if ((vcb->hfsPlusIOPosOffset != 0) && | |
3890 | (SWAP_BE32 (volumeHeader->createDate) != vcb->localCreateDate)) { | |
3891 | struct buf *bp2; | |
3892 | HFSMasterDirectoryBlock *mdb; | |
3893 | ||
3894 | retval = (int)buf_meta_bread(hfsmp->hfs_devvp, | |
3895 | HFS_PHYSBLK_ROUNDDOWN(HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size), hfsmp->hfs_log_per_phys), | |
3896 | hfsmp->hfs_physical_block_size, NOCRED, &bp2); | |
3897 | if (retval) { | |
3898 | if (bp2) | |
3899 | buf_brelse(bp2); | |
3900 | retval = 0; | |
3901 | } else { | |
3902 | mdb = (HFSMasterDirectoryBlock *)(buf_dataptr(bp2) + | |
3903 | HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size)); | |
3904 | ||
3905 | if ( SWAP_BE32 (mdb->drCrDate) != vcb->localCreateDate ) | |
3906 | { | |
3907 | if (hfsmp->jnl) { | |
3908 | journal_modify_block_start(hfsmp->jnl, bp2); | |
3909 | } | |
3910 | ||
3911 | mdb->drCrDate = SWAP_BE32 (vcb->localCreateDate); /* pick up the new create date */ | |
3912 | ||
3913 | if (hfsmp->jnl) { | |
3914 | journal_modify_block_end(hfsmp->jnl, bp2, NULL, NULL); | |
3915 | } else { | |
3916 | (void) VNOP_BWRITE(bp2); /* write out the changes */ | |
3917 | } | |
3918 | } | |
3919 | else | |
3920 | { | |
3921 | buf_brelse(bp2); /* just release it */ | |
3922 | } | |
3923 | } | |
3924 | } | |
3925 | ||
3926 | hfs_lock_mount (hfsmp); | |
3927 | ||
3928 | /* Note: only update the lower 16 bits worth of attributes */ | |
3929 | volumeHeader->attributes = SWAP_BE32 (vcb->vcbAtrb); | |
3930 | volumeHeader->journalInfoBlock = SWAP_BE32 (vcb->vcbJinfoBlock); | |
3931 | if (hfsmp->jnl) { | |
3932 | volumeHeader->lastMountedVersion = SWAP_BE32 (kHFSJMountVersion); | |
3933 | } else { | |
3934 | volumeHeader->lastMountedVersion = SWAP_BE32 (kHFSPlusMountVersion); | |
3935 | } | |
3936 | volumeHeader->createDate = SWAP_BE32 (vcb->localCreateDate); /* volume create date is in local time */ | |
3937 | volumeHeader->modifyDate = SWAP_BE32 (to_hfs_time(vcb->vcbLsMod)); | |
3938 | volumeHeader->backupDate = SWAP_BE32 (to_hfs_time(vcb->vcbVolBkUp)); | |
3939 | volumeHeader->fileCount = SWAP_BE32 (vcb->vcbFilCnt); | |
3940 | volumeHeader->folderCount = SWAP_BE32 (vcb->vcbDirCnt); | |
3941 | volumeHeader->totalBlocks = SWAP_BE32 (vcb->totalBlocks); | |
3942 | volumeHeader->freeBlocks = SWAP_BE32 (vcb->freeBlocks); | |
3943 | volumeHeader->nextAllocation = SWAP_BE32 (vcb->nextAllocation); | |
3944 | volumeHeader->rsrcClumpSize = SWAP_BE32 (vcb->vcbClpSiz); | |
3945 | volumeHeader->dataClumpSize = SWAP_BE32 (vcb->vcbClpSiz); | |
3946 | volumeHeader->nextCatalogID = SWAP_BE32 (vcb->vcbNxtCNID); | |
3947 | volumeHeader->writeCount = SWAP_BE32 (vcb->vcbWrCnt); | |
3948 | volumeHeader->encodingsBitmap = SWAP_BE64 (vcb->encodingsBitmap); | |
3949 | ||
3950 | if (bcmp(vcb->vcbFndrInfo, volumeHeader->finderInfo, sizeof(volumeHeader->finderInfo)) != 0) { | |
3951 | bcopy(vcb->vcbFndrInfo, volumeHeader->finderInfo, sizeof(volumeHeader->finderInfo)); | |
3952 | critical = 1; | |
3953 | } | |
3954 | ||
3955 | /* | |
3956 | * System files are only dirty when altflush is set. | |
3957 | */ | |
3958 | if (altflush == 0) { | |
3959 | goto done; | |
3960 | } | |
3961 | ||
3962 | /* Sync Extents over-flow file meta data */ | |
3963 | fp = VTOF(vcb->extentsRefNum); | |
3964 | if (FTOC(fp)->c_flag & C_MODIFIED) { | |
3965 | for (i = 0; i < kHFSPlusExtentDensity; i++) { | |
3966 | volumeHeader->extentsFile.extents[i].startBlock = | |
3967 | SWAP_BE32 (fp->ff_extents[i].startBlock); | |
3968 | volumeHeader->extentsFile.extents[i].blockCount = | |
3969 | SWAP_BE32 (fp->ff_extents[i].blockCount); | |
3970 | } | |
3971 | volumeHeader->extentsFile.logicalSize = SWAP_BE64 (fp->ff_size); | |
3972 | volumeHeader->extentsFile.totalBlocks = SWAP_BE32 (fp->ff_blocks); | |
3973 | volumeHeader->extentsFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize); | |
3974 | FTOC(fp)->c_flag &= ~C_MODIFIED; | |
3975 | } | |
3976 | ||
3977 | /* Sync Catalog file meta data */ | |
3978 | fp = VTOF(vcb->catalogRefNum); | |
3979 | if (FTOC(fp)->c_flag & C_MODIFIED) { | |
3980 | for (i = 0; i < kHFSPlusExtentDensity; i++) { | |
3981 | volumeHeader->catalogFile.extents[i].startBlock = | |
3982 | SWAP_BE32 (fp->ff_extents[i].startBlock); | |
3983 | volumeHeader->catalogFile.extents[i].blockCount = | |
3984 | SWAP_BE32 (fp->ff_extents[i].blockCount); | |
3985 | } | |
3986 | volumeHeader->catalogFile.logicalSize = SWAP_BE64 (fp->ff_size); | |
3987 | volumeHeader->catalogFile.totalBlocks = SWAP_BE32 (fp->ff_blocks); | |
3988 | volumeHeader->catalogFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize); | |
3989 | FTOC(fp)->c_flag &= ~C_MODIFIED; | |
3990 | } | |
3991 | ||
3992 | /* Sync Allocation file meta data */ | |
3993 | fp = VTOF(vcb->allocationsRefNum); | |
3994 | if (FTOC(fp)->c_flag & C_MODIFIED) { | |
3995 | for (i = 0; i < kHFSPlusExtentDensity; i++) { | |
3996 | volumeHeader->allocationFile.extents[i].startBlock = | |
3997 | SWAP_BE32 (fp->ff_extents[i].startBlock); | |
3998 | volumeHeader->allocationFile.extents[i].blockCount = | |
3999 | SWAP_BE32 (fp->ff_extents[i].blockCount); | |
4000 | } | |
4001 | volumeHeader->allocationFile.logicalSize = SWAP_BE64 (fp->ff_size); | |
4002 | volumeHeader->allocationFile.totalBlocks = SWAP_BE32 (fp->ff_blocks); | |
4003 | volumeHeader->allocationFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize); | |
4004 | FTOC(fp)->c_flag &= ~C_MODIFIED; | |
4005 | } | |
4006 | ||
4007 | /* Sync Attribute file meta data */ | |
4008 | if (hfsmp->hfs_attribute_vp) { | |
4009 | fp = VTOF(hfsmp->hfs_attribute_vp); | |
4010 | for (i = 0; i < kHFSPlusExtentDensity; i++) { | |
4011 | volumeHeader->attributesFile.extents[i].startBlock = | |
4012 | SWAP_BE32 (fp->ff_extents[i].startBlock); | |
4013 | volumeHeader->attributesFile.extents[i].blockCount = | |
4014 | SWAP_BE32 (fp->ff_extents[i].blockCount); | |
4015 | } | |
4016 | FTOC(fp)->c_flag &= ~C_MODIFIED; | |
4017 | volumeHeader->attributesFile.logicalSize = SWAP_BE64 (fp->ff_size); | |
4018 | volumeHeader->attributesFile.totalBlocks = SWAP_BE32 (fp->ff_blocks); | |
4019 | volumeHeader->attributesFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize); | |
4020 | } | |
4021 | ||
4022 | /* Sync Startup file meta data */ | |
4023 | if (hfsmp->hfs_startup_vp) { | |
4024 | fp = VTOF(hfsmp->hfs_startup_vp); | |
4025 | if (FTOC(fp)->c_flag & C_MODIFIED) { | |
4026 | for (i = 0; i < kHFSPlusExtentDensity; i++) { | |
4027 | volumeHeader->startupFile.extents[i].startBlock = | |
4028 | SWAP_BE32 (fp->ff_extents[i].startBlock); | |
4029 | volumeHeader->startupFile.extents[i].blockCount = | |
4030 | SWAP_BE32 (fp->ff_extents[i].blockCount); | |
4031 | } | |
4032 | volumeHeader->startupFile.logicalSize = SWAP_BE64 (fp->ff_size); | |
4033 | volumeHeader->startupFile.totalBlocks = SWAP_BE32 (fp->ff_blocks); | |
4034 | volumeHeader->startupFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize); | |
4035 | FTOC(fp)->c_flag &= ~C_MODIFIED; | |
4036 | } | |
4037 | } | |
4038 | ||
4039 | done: | |
4040 | MarkVCBClean(hfsmp); | |
4041 | hfs_unlock_mount (hfsmp); | |
4042 | ||
4043 | /* If requested, flush out the alternate volume header */ | |
4044 | if (altflush) { | |
4045 | /* | |
4046 | * The two altVH offsets do not match --- which means that a smaller file | |
4047 | * system exists in a larger partition. Verify that we have the correct | |
4048 | * alternate volume header sector as per the current parititon size. | |
4049 | * The GPT device that we are mounted on top could have changed sizes | |
4050 | * without us knowning. | |
4051 | * | |
4052 | * We're in a transaction, so it's safe to modify the partition_avh_sector | |
4053 | * field if necessary. | |
4054 | */ | |
4055 | if (hfsmp->hfs_partition_avh_sector != hfsmp->hfs_fs_avh_sector) { | |
4056 | uint64_t sector_count; | |
4057 | ||
4058 | /* Get underlying device block count */ | |
4059 | if ((retval = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCGETBLOCKCOUNT, | |
4060 | (caddr_t)§or_count, 0, vfs_context_current()))) { | |
4061 | printf("hfs_flushVH: err %d getting block count (%s) \n", retval, vcb->vcbVN); | |
4062 | retval = ENXIO; | |
4063 | goto err_exit; | |
4064 | } | |
4065 | ||
4066 | /* Partition size was changed without our knowledge */ | |
4067 | if (sector_count != (uint64_t)hfsmp->hfs_logical_block_count) { | |
4068 | hfsmp->hfs_partition_avh_sector = (hfsmp->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) + | |
4069 | HFS_ALT_SECTOR(hfsmp->hfs_logical_block_size, sector_count); | |
4070 | /* Note: hfs_fs_avh_sector will remain unchanged */ | |
4071 | printf ("hfs_flushVH: altflush: partition size changed, partition_avh_sector=%qu, fs_avh_sector=%qu\n", | |
4072 | hfsmp->hfs_partition_avh_sector, hfsmp->hfs_fs_avh_sector); | |
4073 | } | |
4074 | } | |
4075 | ||
4076 | /* | |
4077 | * First see if we need to write I/O to the "secondary" AVH | |
4078 | * located at FS Size - 1024 bytes, because this one will | |
4079 | * always go into the journal. We put this AVH into the journal | |
4080 | * because even if the filesystem size has shrunk, this LBA should be | |
4081 | * reachable after the partition-size modification has occurred. | |
4082 | * The one where we need to be careful is partitionsize-1024, since the | |
4083 | * partition size should hopefully shrink. | |
4084 | * | |
4085 | * Most of the time this block will not execute. | |
4086 | */ | |
4087 | if ((hfsmp->hfs_fs_avh_sector) && | |
4088 | (hfsmp->hfs_partition_avh_sector != hfsmp->hfs_fs_avh_sector)) { | |
4089 | if (buf_meta_bread(hfsmp->hfs_devvp, | |
4090 | HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_fs_avh_sector, hfsmp->hfs_log_per_phys), | |
4091 | hfsmp->hfs_physical_block_size, NOCRED, &alt_bp) == 0) { | |
4092 | if (hfsmp->jnl) { | |
4093 | journal_modify_block_start(hfsmp->jnl, alt_bp); | |
4094 | } | |
4095 | ||
4096 | bcopy(volumeHeader, (char *)buf_dataptr(alt_bp) + | |
4097 | HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size), | |
4098 | kMDBSize); | |
4099 | ||
4100 | if (hfsmp->jnl) { | |
4101 | journal_modify_block_end(hfsmp->jnl, alt_bp, NULL, NULL); | |
4102 | } else { | |
4103 | (void) VNOP_BWRITE(alt_bp); | |
4104 | } | |
4105 | } else if (alt_bp) { | |
4106 | buf_brelse(alt_bp); | |
4107 | } | |
4108 | } | |
4109 | ||
4110 | /* | |
4111 | * Flush out alternate volume header located at 1024 bytes before | |
4112 | * end of the partition as part of journal transaction. In | |
4113 | * most cases, this will be the only alternate volume header | |
4114 | * that we need to worry about because the file system size is | |
4115 | * same as the partition size, therefore hfs_fs_avh_sector is | |
4116 | * same as hfs_partition_avh_sector. This is the "priority" AVH. | |
4117 | * | |
4118 | * However, do not always put this I/O into the journal. If we skipped the | |
4119 | * FS-Size AVH write above, then we will put this I/O into the journal as | |
4120 | * that indicates the two were in sync. However, if the FS size is | |
4121 | * not the same as the partition size, we are tracking two. We don't | |
4122 | * put it in the journal in that case, since if the partition | |
4123 | * size changes between uptimes, and we need to replay the journal, | |
4124 | * this I/O could generate an EIO if during replay it is now trying | |
4125 | * to access blocks beyond the device EOF. | |
4126 | */ | |
4127 | if (hfsmp->hfs_partition_avh_sector) { | |
4128 | if (buf_meta_bread(hfsmp->hfs_devvp, | |
4129 | HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_partition_avh_sector, hfsmp->hfs_log_per_phys), | |
4130 | hfsmp->hfs_physical_block_size, NOCRED, &alt_bp) == 0) { | |
4131 | ||
4132 | /* only one AVH, put this I/O in the journal. */ | |
4133 | if ((hfsmp->jnl) && (hfsmp->hfs_partition_avh_sector == hfsmp->hfs_fs_avh_sector)) { | |
4134 | journal_modify_block_start(hfsmp->jnl, alt_bp); | |
4135 | } | |
4136 | ||
4137 | bcopy(volumeHeader, (char *)buf_dataptr(alt_bp) + | |
4138 | HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size), | |
4139 | kMDBSize); | |
4140 | ||
4141 | /* If journaled and we only have one AVH to track */ | |
4142 | if ((hfsmp->jnl) && (hfsmp->hfs_partition_avh_sector == hfsmp->hfs_fs_avh_sector)) { | |
4143 | journal_modify_block_end (hfsmp->jnl, alt_bp, NULL, NULL); | |
4144 | } else { | |
4145 | /* | |
4146 | * If we don't have a journal or there are two AVH's at the | |
4147 | * moment, then this one doesn't go in the journal. Note that | |
4148 | * this one may generate I/O errors, since the partition | |
4149 | * can be resized behind our backs at any moment and this I/O | |
4150 | * may now appear to be beyond the device EOF. | |
4151 | */ | |
4152 | (void) VNOP_BWRITE(alt_bp); | |
4153 | (void) VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, | |
4154 | NULL, FWRITE, NULL); | |
4155 | } | |
4156 | } else if (alt_bp) { | |
4157 | buf_brelse(alt_bp); | |
4158 | } | |
4159 | } | |
4160 | } | |
4161 | ||
4162 | /* Finish modifying the block for the primary VH */ | |
4163 | if (hfsmp->jnl) { | |
4164 | journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL); | |
4165 | } else { | |
4166 | if (waitfor != MNT_WAIT) { | |
4167 | buf_bawrite(bp); | |
4168 | } else { | |
4169 | retval = VNOP_BWRITE(bp); | |
4170 | /* When critical data changes, flush the device cache */ | |
4171 | if (critical && (retval == 0)) { | |
4172 | (void) VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, | |
4173 | NULL, FWRITE, NULL); | |
4174 | } | |
4175 | } | |
4176 | } | |
4177 | hfs_end_transaction(hfsmp); | |
4178 | ||
4179 | return (retval); | |
4180 | ||
4181 | err_exit: | |
4182 | if (alt_bp) | |
4183 | buf_brelse(alt_bp); | |
4184 | if (bp) | |
4185 | buf_brelse(bp); | |
4186 | hfs_end_transaction(hfsmp); | |
4187 | return retval; | |
4188 | } | |
4189 | ||
4190 | ||
4191 | /* | |
4192 | * Creates a UUID from a unique "name" in the HFS UUID Name space. | |
4193 | * See version 3 UUID. | |
4194 | */ | |
4195 | static void | |
4196 | hfs_getvoluuid(struct hfsmount *hfsmp, uuid_t result) | |
4197 | { | |
4198 | MD5_CTX md5c; | |
4199 | uint8_t rawUUID[8]; | |
4200 | ||
4201 | ((uint32_t *)rawUUID)[0] = hfsmp->vcbFndrInfo[6]; | |
4202 | ((uint32_t *)rawUUID)[1] = hfsmp->vcbFndrInfo[7]; | |
4203 | ||
4204 | MD5Init( &md5c ); | |
4205 | MD5Update( &md5c, HFS_UUID_NAMESPACE_ID, sizeof( uuid_t ) ); | |
4206 | MD5Update( &md5c, rawUUID, sizeof (rawUUID) ); | |
4207 | MD5Final( result, &md5c ); | |
4208 | ||
4209 | result[6] = 0x30 | ( result[6] & 0x0F ); | |
4210 | result[8] = 0x80 | ( result[8] & 0x3F ); | |
4211 | } | |
4212 | ||
4213 | /* | |
4214 | * Get file system attributes. | |
4215 | */ | |
4216 | static int | |
4217 | hfs_vfs_getattr(struct mount *mp, struct vfs_attr *fsap, __unused vfs_context_t context) | |
4218 | { | |
4219 | #define HFS_ATTR_CMN_VALIDMASK ATTR_CMN_VALIDMASK | |
4220 | #define HFS_ATTR_FILE_VALIDMASK (ATTR_FILE_VALIDMASK & ~(ATTR_FILE_FILETYPE | ATTR_FILE_FORKCOUNT | ATTR_FILE_FORKLIST)) | |
4221 | #define HFS_ATTR_CMN_VOL_VALIDMASK (ATTR_CMN_VALIDMASK & ~(ATTR_CMN_ACCTIME)) | |
4222 | ||
4223 | ExtendedVCB *vcb = VFSTOVCB(mp); | |
4224 | struct hfsmount *hfsmp = VFSTOHFS(mp); | |
4225 | u_int32_t freeCNIDs; | |
4226 | ||
4227 | int searchfs_on = 0; | |
4228 | int exchangedata_on = 1; | |
4229 | ||
4230 | #if CONFIG_SEARCHFS | |
4231 | searchfs_on = 1; | |
4232 | #endif | |
4233 | ||
4234 | #if CONFIG_PROTECT | |
4235 | if (cp_fs_protected(mp)) { | |
4236 | exchangedata_on = 0; | |
4237 | } | |
4238 | #endif | |
4239 | ||
4240 | freeCNIDs = (u_int32_t)0xFFFFFFFF - (u_int32_t)hfsmp->vcbNxtCNID; | |
4241 | ||
4242 | VFSATTR_RETURN(fsap, f_objcount, (u_int64_t)hfsmp->vcbFilCnt + (u_int64_t)hfsmp->vcbDirCnt); | |
4243 | VFSATTR_RETURN(fsap, f_filecount, (u_int64_t)hfsmp->vcbFilCnt); | |
4244 | VFSATTR_RETURN(fsap, f_dircount, (u_int64_t)hfsmp->vcbDirCnt); | |
4245 | VFSATTR_RETURN(fsap, f_maxobjcount, (u_int64_t)0xFFFFFFFF); | |
4246 | VFSATTR_RETURN(fsap, f_iosize, (size_t)cluster_max_io_size(mp, 0)); | |
4247 | VFSATTR_RETURN(fsap, f_blocks, (u_int64_t)hfsmp->totalBlocks); | |
4248 | VFSATTR_RETURN(fsap, f_bfree, (u_int64_t)hfs_freeblks(hfsmp, 0)); | |
4249 | VFSATTR_RETURN(fsap, f_bavail, (u_int64_t)hfs_freeblks(hfsmp, 1)); | |
4250 | VFSATTR_RETURN(fsap, f_bsize, (u_int32_t)vcb->blockSize); | |
4251 | /* XXX needs clarification */ | |
4252 | VFSATTR_RETURN(fsap, f_bused, hfsmp->totalBlocks - hfs_freeblks(hfsmp, 1)); | |
4253 | /* Maximum files is constrained by total blocks. */ | |
4254 | VFSATTR_RETURN(fsap, f_files, (u_int64_t)(hfsmp->totalBlocks - 2)); | |
4255 | VFSATTR_RETURN(fsap, f_ffree, MIN((u_int64_t)freeCNIDs, (u_int64_t)hfs_freeblks(hfsmp, 1))); | |
4256 | ||
4257 | fsap->f_fsid.val[0] = hfsmp->hfs_raw_dev; | |
4258 | fsap->f_fsid.val[1] = vfs_typenum(mp); | |
4259 | VFSATTR_SET_SUPPORTED(fsap, f_fsid); | |
4260 | ||
4261 | VFSATTR_RETURN(fsap, f_signature, vcb->vcbSigWord); | |
4262 | VFSATTR_RETURN(fsap, f_carbon_fsid, 0); | |
4263 | ||
4264 | if (VFSATTR_IS_ACTIVE(fsap, f_capabilities)) { | |
4265 | vol_capabilities_attr_t *cap; | |
4266 | ||
4267 | cap = &fsap->f_capabilities; | |
4268 | ||
4269 | if ((hfsmp->hfs_flags & HFS_STANDARD) == 0) { | |
4270 | /* HFS+ & variants */ | |
4271 | cap->capabilities[VOL_CAPABILITIES_FORMAT] = | |
4272 | VOL_CAP_FMT_PERSISTENTOBJECTIDS | | |
4273 | VOL_CAP_FMT_SYMBOLICLINKS | | |
4274 | VOL_CAP_FMT_HARDLINKS | | |
4275 | VOL_CAP_FMT_JOURNAL | | |
4276 | VOL_CAP_FMT_ZERO_RUNS | | |
4277 | (hfsmp->jnl ? VOL_CAP_FMT_JOURNAL_ACTIVE : 0) | | |
4278 | (hfsmp->hfs_flags & HFS_CASE_SENSITIVE ? VOL_CAP_FMT_CASE_SENSITIVE : 0) | | |
4279 | VOL_CAP_FMT_CASE_PRESERVING | | |
4280 | VOL_CAP_FMT_FAST_STATFS | | |
4281 | VOL_CAP_FMT_2TB_FILESIZE | | |
4282 | VOL_CAP_FMT_HIDDEN_FILES | | |
4283 | #if HFS_COMPRESSION | |
4284 | VOL_CAP_FMT_PATH_FROM_ID | | |
4285 | VOL_CAP_FMT_DECMPFS_COMPRESSION; | |
4286 | #else | |
4287 | VOL_CAP_FMT_PATH_FROM_ID; | |
4288 | #endif | |
4289 | } | |
4290 | #if CONFIG_HFS_STD | |
4291 | else { | |
4292 | /* HFS standard */ | |
4293 | cap->capabilities[VOL_CAPABILITIES_FORMAT] = | |
4294 | VOL_CAP_FMT_PERSISTENTOBJECTIDS | | |
4295 | VOL_CAP_FMT_CASE_PRESERVING | | |
4296 | VOL_CAP_FMT_FAST_STATFS | | |
4297 | VOL_CAP_FMT_HIDDEN_FILES | | |
4298 | VOL_CAP_FMT_PATH_FROM_ID; | |
4299 | } | |
4300 | #endif | |
4301 | ||
4302 | /* | |
4303 | * The capabilities word in 'cap' tell you whether or not | |
4304 | * this particular filesystem instance has feature X enabled. | |
4305 | */ | |
4306 | ||
4307 | cap->capabilities[VOL_CAPABILITIES_INTERFACES] = | |
4308 | VOL_CAP_INT_ATTRLIST | | |
4309 | VOL_CAP_INT_NFSEXPORT | | |
4310 | VOL_CAP_INT_READDIRATTR | | |
4311 | VOL_CAP_INT_ALLOCATE | | |
4312 | VOL_CAP_INT_VOL_RENAME | | |
4313 | VOL_CAP_INT_ADVLOCK | | |
4314 | VOL_CAP_INT_FLOCK | | |
4315 | #if NAMEDSTREAMS | |
4316 | VOL_CAP_INT_EXTENDED_ATTR | | |
4317 | VOL_CAP_INT_NAMEDSTREAMS; | |
4318 | #else | |
4319 | VOL_CAP_INT_EXTENDED_ATTR; | |
4320 | #endif | |
4321 | ||
4322 | /* HFS may conditionally support searchfs and exchangedata depending on the runtime */ | |
4323 | ||
4324 | if (searchfs_on) { | |
4325 | cap->capabilities[VOL_CAPABILITIES_INTERFACES] |= VOL_CAP_INT_SEARCHFS; | |
4326 | } | |
4327 | if (exchangedata_on) { | |
4328 | cap->capabilities[VOL_CAPABILITIES_INTERFACES] |= VOL_CAP_INT_EXCHANGEDATA; | |
4329 | } | |
4330 | ||
4331 | cap->capabilities[VOL_CAPABILITIES_RESERVED1] = 0; | |
4332 | cap->capabilities[VOL_CAPABILITIES_RESERVED2] = 0; | |
4333 | ||
4334 | cap->valid[VOL_CAPABILITIES_FORMAT] = | |
4335 | VOL_CAP_FMT_PERSISTENTOBJECTIDS | | |
4336 | VOL_CAP_FMT_SYMBOLICLINKS | | |
4337 | VOL_CAP_FMT_HARDLINKS | | |
4338 | VOL_CAP_FMT_JOURNAL | | |
4339 | VOL_CAP_FMT_JOURNAL_ACTIVE | | |
4340 | VOL_CAP_FMT_NO_ROOT_TIMES | | |
4341 | VOL_CAP_FMT_SPARSE_FILES | | |
4342 | VOL_CAP_FMT_ZERO_RUNS | | |
4343 | VOL_CAP_FMT_CASE_SENSITIVE | | |
4344 | VOL_CAP_FMT_CASE_PRESERVING | | |
4345 | VOL_CAP_FMT_FAST_STATFS | | |
4346 | VOL_CAP_FMT_2TB_FILESIZE | | |
4347 | VOL_CAP_FMT_OPENDENYMODES | | |
4348 | VOL_CAP_FMT_HIDDEN_FILES | | |
4349 | #if HFS_COMPRESSION | |
4350 | VOL_CAP_FMT_PATH_FROM_ID | | |
4351 | VOL_CAP_FMT_DECMPFS_COMPRESSION; | |
4352 | #else | |
4353 | VOL_CAP_FMT_PATH_FROM_ID; | |
4354 | #endif | |
4355 | ||
4356 | /* | |
4357 | * Bits in the "valid" field tell you whether or not the on-disk | |
4358 | * format supports feature X. | |
4359 | */ | |
4360 | ||
4361 | cap->valid[VOL_CAPABILITIES_INTERFACES] = | |
4362 | VOL_CAP_INT_ATTRLIST | | |
4363 | VOL_CAP_INT_NFSEXPORT | | |
4364 | VOL_CAP_INT_READDIRATTR | | |
4365 | VOL_CAP_INT_COPYFILE | | |
4366 | VOL_CAP_INT_ALLOCATE | | |
4367 | VOL_CAP_INT_VOL_RENAME | | |
4368 | VOL_CAP_INT_ADVLOCK | | |
4369 | VOL_CAP_INT_FLOCK | | |
4370 | VOL_CAP_INT_MANLOCK | | |
4371 | #if NAMEDSTREAMS | |
4372 | VOL_CAP_INT_EXTENDED_ATTR | | |
4373 | VOL_CAP_INT_NAMEDSTREAMS; | |
4374 | #else | |
4375 | VOL_CAP_INT_EXTENDED_ATTR; | |
4376 | #endif | |
4377 | ||
4378 | /* HFS always supports exchangedata and searchfs in the on-disk format natively */ | |
4379 | cap->valid[VOL_CAPABILITIES_INTERFACES] |= (VOL_CAP_INT_SEARCHFS | VOL_CAP_INT_EXCHANGEDATA); | |
4380 | ||
4381 | ||
4382 | cap->valid[VOL_CAPABILITIES_RESERVED1] = 0; | |
4383 | cap->valid[VOL_CAPABILITIES_RESERVED2] = 0; | |
4384 | VFSATTR_SET_SUPPORTED(fsap, f_capabilities); | |
4385 | } | |
4386 | if (VFSATTR_IS_ACTIVE(fsap, f_attributes)) { | |
4387 | vol_attributes_attr_t *attrp = &fsap->f_attributes; | |
4388 | ||
4389 | attrp->validattr.commonattr = HFS_ATTR_CMN_VOL_VALIDMASK; | |
4390 | attrp->validattr.volattr = ATTR_VOL_VALIDMASK & ~ATTR_VOL_INFO; | |
4391 | attrp->validattr.dirattr = ATTR_DIR_VALIDMASK; | |
4392 | attrp->validattr.fileattr = HFS_ATTR_FILE_VALIDMASK; | |
4393 | attrp->validattr.forkattr = 0; | |
4394 | ||
4395 | attrp->nativeattr.commonattr = HFS_ATTR_CMN_VOL_VALIDMASK; | |
4396 | attrp->nativeattr.volattr = ATTR_VOL_VALIDMASK & ~ATTR_VOL_INFO; | |
4397 | attrp->nativeattr.dirattr = ATTR_DIR_VALIDMASK; | |
4398 | attrp->nativeattr.fileattr = HFS_ATTR_FILE_VALIDMASK; | |
4399 | attrp->nativeattr.forkattr = 0; | |
4400 | VFSATTR_SET_SUPPORTED(fsap, f_attributes); | |
4401 | } | |
4402 | fsap->f_create_time.tv_sec = hfsmp->hfs_itime; | |
4403 | fsap->f_create_time.tv_nsec = 0; | |
4404 | VFSATTR_SET_SUPPORTED(fsap, f_create_time); | |
4405 | fsap->f_modify_time.tv_sec = hfsmp->vcbLsMod; | |
4406 | fsap->f_modify_time.tv_nsec = 0; | |
4407 | VFSATTR_SET_SUPPORTED(fsap, f_modify_time); | |
4408 | ||
4409 | fsap->f_backup_time.tv_sec = hfsmp->vcbVolBkUp; | |
4410 | fsap->f_backup_time.tv_nsec = 0; | |
4411 | VFSATTR_SET_SUPPORTED(fsap, f_backup_time); | |
4412 | if (VFSATTR_IS_ACTIVE(fsap, f_fssubtype)) { | |
4413 | u_int16_t subtype = 0; | |
4414 | ||
4415 | /* | |
4416 | * Subtypes (flavors) for HFS | |
4417 | * 0: Mac OS Extended | |
4418 | * 1: Mac OS Extended (Journaled) | |
4419 | * 2: Mac OS Extended (Case Sensitive) | |
4420 | * 3: Mac OS Extended (Case Sensitive, Journaled) | |
4421 | * 4 - 127: Reserved | |
4422 | * 128: Mac OS Standard | |
4423 | * | |
4424 | */ | |
4425 | if ((hfsmp->hfs_flags & HFS_STANDARD) == 0) { | |
4426 | if (hfsmp->jnl) { | |
4427 | subtype |= HFS_SUBTYPE_JOURNALED; | |
4428 | } | |
4429 | if (hfsmp->hfs_flags & HFS_CASE_SENSITIVE) { | |
4430 | subtype |= HFS_SUBTYPE_CASESENSITIVE; | |
4431 | } | |
4432 | } | |
4433 | #if CONFIG_HFS_STD | |
4434 | else { | |
4435 | subtype = HFS_SUBTYPE_STANDARDHFS; | |
4436 | } | |
4437 | #endif | |
4438 | fsap->f_fssubtype = subtype; | |
4439 | VFSATTR_SET_SUPPORTED(fsap, f_fssubtype); | |
4440 | } | |
4441 | ||
4442 | if (VFSATTR_IS_ACTIVE(fsap, f_vol_name)) { | |
4443 | strlcpy(fsap->f_vol_name, (char *) hfsmp->vcbVN, MAXPATHLEN); | |
4444 | VFSATTR_SET_SUPPORTED(fsap, f_vol_name); | |
4445 | } | |
4446 | if (VFSATTR_IS_ACTIVE(fsap, f_uuid)) { | |
4447 | hfs_getvoluuid(hfsmp, fsap->f_uuid); | |
4448 | VFSATTR_SET_SUPPORTED(fsap, f_uuid); | |
4449 | } | |
4450 | return (0); | |
4451 | } | |
4452 | ||
4453 | /* | |
4454 | * Perform a volume rename. Requires the FS' root vp. | |
4455 | */ | |
4456 | static int | |
4457 | hfs_rename_volume(struct vnode *vp, const char *name, proc_t p) | |
4458 | { | |
4459 | ExtendedVCB *vcb = VTOVCB(vp); | |
4460 | struct cnode *cp = VTOC(vp); | |
4461 | struct hfsmount *hfsmp = VTOHFS(vp); | |
4462 | struct cat_desc to_desc; | |
4463 | struct cat_desc todir_desc; | |
4464 | struct cat_desc new_desc; | |
4465 | cat_cookie_t cookie; | |
4466 | int lockflags; | |
4467 | int error = 0; | |
4468 | char converted_volname[256]; | |
4469 | size_t volname_length = 0; | |
4470 | size_t conv_volname_length = 0; | |
4471 | ||
4472 | ||
4473 | /* | |
4474 | * Ignore attempts to rename a volume to a zero-length name. | |
4475 | */ | |
4476 | if (name[0] == 0) | |
4477 | return(0); | |
4478 | ||
4479 | bzero(&to_desc, sizeof(to_desc)); | |
4480 | bzero(&todir_desc, sizeof(todir_desc)); | |
4481 | bzero(&new_desc, sizeof(new_desc)); | |
4482 | bzero(&cookie, sizeof(cookie)); | |
4483 | ||
4484 | todir_desc.cd_parentcnid = kHFSRootParentID; | |
4485 | todir_desc.cd_cnid = kHFSRootFolderID; | |
4486 | todir_desc.cd_flags = CD_ISDIR; | |
4487 | ||
4488 | to_desc.cd_nameptr = (const u_int8_t *)name; | |
4489 | to_desc.cd_namelen = strlen(name); | |
4490 | to_desc.cd_parentcnid = kHFSRootParentID; | |
4491 | to_desc.cd_cnid = cp->c_cnid; | |
4492 | to_desc.cd_flags = CD_ISDIR; | |
4493 | ||
4494 | if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)) == 0) { | |
4495 | if ((error = hfs_start_transaction(hfsmp)) == 0) { | |
4496 | if ((error = cat_preflight(hfsmp, CAT_RENAME, &cookie, p)) == 0) { | |
4497 | lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK); | |
4498 | ||
4499 | error = cat_rename(hfsmp, &cp->c_desc, &todir_desc, &to_desc, &new_desc); | |
4500 | ||
4501 | /* | |
4502 | * If successful, update the name in the VCB, ensure it's terminated. | |
4503 | */ | |
4504 | if (error == 0) { | |
4505 | strlcpy((char *)vcb->vcbVN, name, sizeof(vcb->vcbVN)); | |
4506 | ||
4507 | volname_length = strlen ((const char*)vcb->vcbVN); | |
4508 | #define DKIOCCSSETLVNAME _IOW('d', 198, char[256]) | |
4509 | /* Send the volume name down to CoreStorage if necessary */ | |
4510 | error = utf8_normalizestr(vcb->vcbVN, volname_length, (u_int8_t*)converted_volname, &conv_volname_length, 256, UTF_PRECOMPOSED); | |
4511 | if (error == 0) { | |
4512 | (void) VNOP_IOCTL (hfsmp->hfs_devvp, DKIOCCSSETLVNAME, converted_volname, 0, vfs_context_current()); | |
4513 | } | |
4514 | error = 0; | |
4515 | } | |
4516 | ||
4517 | hfs_systemfile_unlock(hfsmp, lockflags); | |
4518 | cat_postflight(hfsmp, &cookie, p); | |
4519 | ||
4520 | if (error) | |
4521 | MarkVCBDirty(vcb); | |
4522 | (void) hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0); | |
4523 | } | |
4524 | hfs_end_transaction(hfsmp); | |
4525 | } | |
4526 | if (!error) { | |
4527 | /* Release old allocated name buffer */ | |
4528 | if (cp->c_desc.cd_flags & CD_HASBUF) { | |
4529 | const char *tmp_name = (const char *)cp->c_desc.cd_nameptr; | |
4530 | ||
4531 | cp->c_desc.cd_nameptr = 0; | |
4532 | cp->c_desc.cd_namelen = 0; | |
4533 | cp->c_desc.cd_flags &= ~CD_HASBUF; | |
4534 | vfs_removename(tmp_name); | |
4535 | } | |
4536 | /* Update cnode's catalog descriptor */ | |
4537 | replace_desc(cp, &new_desc); | |
4538 | vcb->volumeNameEncodingHint = new_desc.cd_encoding; | |
4539 | cp->c_touch_chgtime = TRUE; | |
4540 | } | |
4541 | ||
4542 | hfs_unlock(cp); | |
4543 | } | |
4544 | ||
4545 | return(error); | |
4546 | } | |
4547 | ||
4548 | /* | |
4549 | * Get file system attributes. | |
4550 | */ | |
4551 | static int | |
4552 | hfs_vfs_setattr(struct mount *mp, struct vfs_attr *fsap, __unused vfs_context_t context) | |
4553 | { | |
4554 | kauth_cred_t cred = vfs_context_ucred(context); | |
4555 | int error = 0; | |
4556 | ||
4557 | /* | |
4558 | * Must be superuser or owner of filesystem to change volume attributes | |
4559 | */ | |
4560 | if (!kauth_cred_issuser(cred) && (kauth_cred_getuid(cred) != vfs_statfs(mp)->f_owner)) | |
4561 | return(EACCES); | |
4562 | ||
4563 | if (VFSATTR_IS_ACTIVE(fsap, f_vol_name)) { | |
4564 | vnode_t root_vp; | |
4565 | ||
4566 | error = hfs_vfs_root(mp, &root_vp, context); | |
4567 | if (error) | |
4568 | goto out; | |
4569 | ||
4570 | error = hfs_rename_volume(root_vp, fsap->f_vol_name, vfs_context_proc(context)); | |
4571 | (void) vnode_put(root_vp); | |
4572 | if (error) | |
4573 | goto out; | |
4574 | ||
4575 | VFSATTR_SET_SUPPORTED(fsap, f_vol_name); | |
4576 | } | |
4577 | ||
4578 | out: | |
4579 | return error; | |
4580 | } | |
4581 | ||
4582 | /* If a runtime corruption is detected, set the volume inconsistent | |
4583 | * bit in the volume attributes. The volume inconsistent bit is a persistent | |
4584 | * bit which represents that the volume is corrupt and needs repair. | |
4585 | * The volume inconsistent bit can be set from the kernel when it detects | |
4586 | * runtime corruption or from file system repair utilities like fsck_hfs when | |
4587 | * a repair operation fails. The bit should be cleared only from file system | |
4588 | * verify/repair utility like fsck_hfs when a verify/repair succeeds. | |
4589 | */ | |
4590 | __private_extern__ | |
4591 | void hfs_mark_inconsistent(struct hfsmount *hfsmp, | |
4592 | hfs_inconsistency_reason_t reason) | |
4593 | { | |
4594 | hfs_lock_mount (hfsmp); | |
4595 | if ((hfsmp->vcbAtrb & kHFSVolumeInconsistentMask) == 0) { | |
4596 | hfsmp->vcbAtrb |= kHFSVolumeInconsistentMask; | |
4597 | MarkVCBDirty(hfsmp); | |
4598 | } | |
4599 | if ((hfsmp->hfs_flags & HFS_READ_ONLY)==0) { | |
4600 | switch (reason) { | |
4601 | case HFS_INCONSISTENCY_DETECTED: | |
4602 | printf("hfs_mark_inconsistent: Runtime corruption detected on %s, fsck will be forced on next mount.\n", | |
4603 | hfsmp->vcbVN); | |
4604 | break; | |
4605 | case HFS_ROLLBACK_FAILED: | |
4606 | printf("hfs_mark_inconsistent: Failed to roll back; volume `%s' might be inconsistent; fsck will be forced on next mount.\n", | |
4607 | hfsmp->vcbVN); | |
4608 | break; | |
4609 | case HFS_OP_INCOMPLETE: | |
4610 | printf("hfs_mark_inconsistent: Failed to complete operation; volume `%s' might be inconsistent; fsck will be forced on next mount.\n", | |
4611 | hfsmp->vcbVN); | |
4612 | break; | |
4613 | case HFS_FSCK_FORCED: | |
4614 | printf("hfs_mark_inconsistent: fsck requested for `%s'; fsck will be forced on next mount.\n", | |
4615 | hfsmp->vcbVN); | |
4616 | break; | |
4617 | } | |
4618 | } | |
4619 | hfs_unlock_mount (hfsmp); | |
4620 | } | |
4621 | ||
4622 | /* Replay the journal on the device node provided. Returns zero if | |
4623 | * journal replay succeeded or no journal was supposed to be replayed. | |
4624 | */ | |
4625 | static int hfs_journal_replay(vnode_t devvp, vfs_context_t context) | |
4626 | { | |
4627 | int retval = 0; | |
4628 | int error = 0; | |
4629 | struct mount *mp = NULL; | |
4630 | struct hfs_mount_args *args = NULL; | |
4631 | ||
4632 | /* Replay allowed only on raw devices */ | |
4633 | if (!vnode_ischr(devvp) && !vnode_isblk(devvp)) { | |
4634 | retval = EINVAL; | |
4635 | goto out; | |
4636 | } | |
4637 | ||
4638 | /* Create dummy mount structures */ | |
4639 | MALLOC(mp, struct mount *, sizeof(struct mount), M_TEMP, M_WAITOK); | |
4640 | if (mp == NULL) { | |
4641 | retval = ENOMEM; | |
4642 | goto out; | |
4643 | } | |
4644 | bzero(mp, sizeof(struct mount)); | |
4645 | mount_lock_init(mp); | |
4646 | ||
4647 | MALLOC(args, struct hfs_mount_args *, sizeof(struct hfs_mount_args), M_TEMP, M_WAITOK); | |
4648 | if (args == NULL) { | |
4649 | retval = ENOMEM; | |
4650 | goto out; | |
4651 | } | |
4652 | bzero(args, sizeof(struct hfs_mount_args)); | |
4653 | ||
4654 | retval = hfs_mountfs(devvp, mp, args, 1, context); | |
4655 | buf_flushdirtyblks(devvp, TRUE, 0, "hfs_journal_replay"); | |
4656 | ||
4657 | /* FSYNC the devnode to be sure all data has been flushed */ | |
4658 | error = VNOP_FSYNC(devvp, MNT_WAIT, context); | |
4659 | if (error) { | |
4660 | retval = error; | |
4661 | } | |
4662 | ||
4663 | out: | |
4664 | if (mp) { | |
4665 | mount_lock_destroy(mp); | |
4666 | FREE(mp, M_TEMP); | |
4667 | } | |
4668 | if (args) { | |
4669 | FREE(args, M_TEMP); | |
4670 | } | |
4671 | return retval; | |
4672 | } | |
4673 | ||
4674 | ||
4675 | /* | |
4676 | * Cancel the syncer | |
4677 | */ | |
4678 | static void | |
4679 | hfs_syncer_free(struct hfsmount *hfsmp) | |
4680 | { | |
4681 | if (hfsmp && hfsmp->hfs_syncer) { | |
4682 | hfs_syncer_lock(hfsmp); | |
4683 | ||
4684 | /* | |
4685 | * First, make sure everything else knows we don't want any more | |
4686 | * requests queued. | |
4687 | */ | |
4688 | thread_call_t syncer = hfsmp->hfs_syncer; | |
4689 | hfsmp->hfs_syncer = NULL; | |
4690 | ||
4691 | hfs_syncer_unlock(hfsmp); | |
4692 | ||
4693 | // Now deal with requests that are outstanding | |
4694 | if (hfsmp->hfs_sync_incomplete) { | |
4695 | if (thread_call_cancel(syncer)) { | |
4696 | // We managed to cancel the timer so we're done | |
4697 | hfsmp->hfs_sync_incomplete = FALSE; | |
4698 | } else { | |
4699 | // Syncer must be running right now so we have to wait | |
4700 | hfs_syncer_lock(hfsmp); | |
4701 | while (hfsmp->hfs_sync_incomplete) | |
4702 | hfs_syncer_wait(hfsmp); | |
4703 | hfs_syncer_unlock(hfsmp); | |
4704 | } | |
4705 | } | |
4706 | ||
4707 | // Now we're safe to free the syncer | |
4708 | thread_call_free(syncer); | |
4709 | } | |
4710 | } | |
4711 | ||
4712 | /* | |
4713 | * hfs vfs operations. | |
4714 | */ | |
4715 | struct vfsops hfs_vfsops = { | |
4716 | hfs_mount, | |
4717 | hfs_start, | |
4718 | hfs_unmount, | |
4719 | hfs_vfs_root, | |
4720 | hfs_quotactl, | |
4721 | hfs_vfs_getattr, /* was hfs_statfs */ | |
4722 | hfs_sync, | |
4723 | hfs_vfs_vget, | |
4724 | hfs_fhtovp, | |
4725 | hfs_vptofh, | |
4726 | hfs_init, | |
4727 | hfs_sysctl, | |
4728 | hfs_vfs_setattr, | |
4729 | {NULL} | |
4730 | }; |