]> git.saurik.com Git - apple/xnu.git/blob - bsd/ufs/ffs/ffs_vfsops.c
xnu-517.7.7.tar.gz
[apple/xnu.git] / bsd / ufs / ffs / ffs_vfsops.c
1 /*
2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23 /*
24 * Copyright (c) 1989, 1991, 1993, 1994
25 * The Regents of the University of California. All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
56 */
57
58 #include <rev_endian_fs.h>
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/namei.h>
62 #include <sys/proc.h>
63 #include <sys/kernel.h>
64 #include <sys/vnode.h>
65 #include <sys/socket.h>
66 #include <sys/mount.h>
67 #include <sys/buf.h>
68 #include <sys/mbuf.h>
69 #include <sys/file.h>
70 #include <sys/disk.h>
71 #include <sys/ioctl.h>
72 #include <sys/errno.h>
73 #include <sys/malloc.h>
74 #include <sys/ubc.h>
75 #include <sys/quota.h>
76
77 #include <miscfs/specfs/specdev.h>
78
79 #include <ufs/ufs/quota.h>
80 #include <ufs/ufs/ufsmount.h>
81 #include <ufs/ufs/inode.h>
82 #include <ufs/ufs/ufs_extern.h>
83
84 #include <ufs/ffs/fs.h>
85 #include <ufs/ffs/ffs_extern.h>
86 #if REV_ENDIAN_FS
87 #include <ufs/ufs/ufs_byte_order.h>
88 #include <architecture/byte_order.h>
89 #endif /* REV_ENDIAN_FS */
90
91 int ffs_sbupdate __P((struct ufsmount *, int));
92
93 struct vfsops ufs_vfsops = {
94 ffs_mount,
95 ufs_start,
96 ffs_unmount,
97 ufs_root,
98 ufs_quotactl,
99 ffs_statfs,
100 ffs_sync,
101 ffs_vget,
102 ffs_fhtovp,
103 ffs_vptofh,
104 ffs_init,
105 ffs_sysctl,
106 };
107
108 extern u_long nextgennumber;
109
110 /*
111 * Called by main() when ufs is going to be mounted as root.
112 */
113 ffs_mountroot()
114 {
115 extern struct vnode *rootvp;
116 struct fs *fs;
117 struct mount *mp;
118 struct proc *p = current_proc(); /* XXX */
119 struct ufsmount *ump;
120 u_int size;
121 int error;
122
123 /*
124 * Get vnode for rootdev.
125 */
126 if (error = bdevvp(rootdev, &rootvp)) {
127 printf("ffs_mountroot: can't setup bdevvp");
128 return (error);
129 }
130 if (error = vfs_rootmountalloc("ufs", "root_device", &mp)) {
131 vrele(rootvp); /* release the reference from bdevvp() */
132 return (error);
133 }
134
135 /* Must set the MNT_ROOTFS flag before doing the actual mount */
136 mp->mnt_flag |= MNT_ROOTFS;
137
138 /* Set asynchronous flag by default */
139 mp->mnt_flag |= MNT_ASYNC;
140
141 if (error = ffs_mountfs(rootvp, mp, p)) {
142 mp->mnt_vfc->vfc_refcount--;
143
144 if (mp->mnt_kern_flag & MNTK_IO_XINFO)
145 FREE(mp->mnt_xinfo_ptr, M_TEMP);
146 vfs_unbusy(mp, p);
147
148 vrele(rootvp); /* release the reference from bdevvp() */
149 FREE_ZONE(mp, sizeof (struct mount), M_MOUNT);
150 return (error);
151 }
152 simple_lock(&mountlist_slock);
153 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
154 simple_unlock(&mountlist_slock);
155 ump = VFSTOUFS(mp);
156 fs = ump->um_fs;
157 (void) copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
158 (void)ffs_statfs(mp, &mp->mnt_stat, p);
159 vfs_unbusy(mp, p);
160 inittodr(fs->fs_time);
161 return (0);
162 }
163
164 /*
165 * VFS Operations.
166 *
167 * mount system call
168 */
169 int
170 ffs_mount(mp, path, data, ndp, p)
171 register struct mount *mp;
172 char *path;
173 caddr_t data;
174 struct nameidata *ndp;
175 struct proc *p;
176 {
177 struct vnode *devvp;
178 struct ufs_args args;
179 struct ufsmount *ump;
180 register struct fs *fs;
181 u_int size;
182 int error, flags;
183 mode_t accessmode;
184 int ronly;
185 int reload = 0;
186
187 if (error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args)))
188 return (error);
189 /*
190 * If updating, check whether changing from read-only to
191 * read/write; if there is no device name, that's all we do.
192 */
193 if (mp->mnt_flag & MNT_UPDATE) {
194 ump = VFSTOUFS(mp);
195 fs = ump->um_fs;
196 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
197 flags = WRITECLOSE;
198 if (mp->mnt_flag & MNT_FORCE)
199 flags |= FORCECLOSE;
200 if (error = ffs_flushfiles(mp, flags, p))
201 return (error);
202 fs->fs_clean = 1;
203 fs->fs_ronly = 1;
204 if (error = ffs_sbupdate(ump, MNT_WAIT)) {
205 fs->fs_clean = 0;
206 fs->fs_ronly = 0;
207 return (error);
208 }
209 }
210 /* save fs_ronly to later use */
211 ronly = fs->fs_ronly;
212 if ((mp->mnt_flag & MNT_RELOAD) || ronly)
213 reload = 1;
214 if ((reload) &&
215 (error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p)))
216 return (error);
217 /* replace the ronly after load */
218 fs->fs_ronly = ronly;
219 /*
220 * Do not update the file system if the user was in singleuser
221 * and then tries to mount -uw without fscking
222 */
223 if (!fs->fs_clean && ronly) {
224 printf("WARNING: trying to mount a dirty file system\n");
225 if (issingleuser() && (mp->mnt_flag & MNT_ROOTFS)) {
226 printf("WARNING: R/W mount of %s denied. Filesystem is not clean - run fsck\n",fs->fs_fsmnt);
227 /*
228 * Reset the readonly bit as reload might have
229 * modified this bit
230 */
231 fs->fs_ronly = 1;
232 return(EPERM);
233 }
234 }
235
236 if (ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
237 /*
238 * If upgrade to read-write by non-root, then verify
239 * that user has necessary permissions on the device.
240 */
241 if (p->p_ucred->cr_uid != 0) {
242 devvp = ump->um_devvp;
243 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
244 if (error = VOP_ACCESS(devvp, VREAD | VWRITE,
245 p->p_ucred, p)) {
246 VOP_UNLOCK(devvp, 0, p);
247 return (error);
248 }
249 VOP_UNLOCK(devvp, 0, p);
250 }
251 fs->fs_ronly = 0;
252 fs->fs_clean = 0;
253 (void) ffs_sbupdate(ump, MNT_WAIT);
254 }
255 if (args.fspec == 0) {
256 /*
257 * Process export requests.
258 */
259 return (vfs_export(mp, &ump->um_export, &args.export));
260 }
261 }
262 /*
263 * Not an update, or updating the name: look up the name
264 * and verify that it refers to a sensible block device.
265 */
266 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
267 if (error = namei(ndp))
268 return (error);
269 devvp = ndp->ni_vp;
270
271 if (devvp->v_type != VBLK) {
272 vrele(devvp);
273 return (ENOTBLK);
274 }
275 if (major(devvp->v_rdev) >= nblkdev) {
276 vrele(devvp);
277 return (ENXIO);
278 }
279 /*
280 * If mount by non-root, then verify that user has necessary
281 * permissions on the device.
282 */
283 if (p->p_ucred->cr_uid != 0) {
284 accessmode = VREAD;
285 if ((mp->mnt_flag & MNT_RDONLY) == 0)
286 accessmode |= VWRITE;
287 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
288 if (error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p)) {
289 vput(devvp);
290 return (error);
291 }
292 VOP_UNLOCK(devvp, 0, p);
293 }
294 if ((mp->mnt_flag & MNT_UPDATE) == 0)
295 error = ffs_mountfs(devvp, mp, p);
296 else {
297 if (devvp != ump->um_devvp)
298 error = EINVAL; /* needs translation */
299 else
300 vrele(devvp);
301 }
302 if (error) {
303 vrele(devvp);
304 return (error);
305 }
306 ump = VFSTOUFS(mp);
307 fs = ump->um_fs;
308 (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1,
309 (size_t *)&size);
310 bzero(fs->fs_fsmnt + size, sizeof(fs->fs_fsmnt) - size);
311 bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname,
312 MNAMELEN);
313 (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
314 (size_t *)&size);
315 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
316 (void)ffs_statfs(mp, &mp->mnt_stat, p);
317 return (0);
318 }
319
320 /*
321 * Reload all incore data for a filesystem (used after running fsck on
322 * the root filesystem and finding things to fix). The filesystem must
323 * be mounted read-only.
324 *
325 * Things to do to update the mount:
326 * 1) invalidate all cached meta-data.
327 * 2) re-read superblock from disk.
328 * 3) re-read summary information from disk.
329 * 4) invalidate all inactive vnodes.
330 * 5) invalidate all cached file data.
331 * 6) re-read inode data for all active vnodes.
332 */
333 ffs_reload(mountp, cred, p)
334 register struct mount *mountp;
335 struct ucred *cred;
336 struct proc *p;
337 {
338 register struct vnode *vp, *nvp, *devvp;
339 struct inode *ip;
340 void *space;
341 struct buf *bp;
342 struct fs *fs, *newfs;
343 int i, blks, size, error;
344 u_int64_t maxfilesize; /* XXX */
345 int32_t *lp;
346 #if REV_ENDIAN_FS
347 int rev_endian = (mountp->mnt_flag & MNT_REVEND);
348 #endif /* REV_ENDIAN_FS */
349
350 if ((mountp->mnt_flag & MNT_RDONLY) == 0)
351 return (EINVAL);
352 /*
353 * Step 1: invalidate all cached meta-data.
354 */
355 devvp = VFSTOUFS(mountp)->um_devvp;
356 if (vinvalbuf(devvp, 0, cred, p, 0, 0))
357 panic("ffs_reload: dirty1");
358 /*
359 * Step 2: re-read superblock from disk.
360 */
361 VOP_DEVBLOCKSIZE(devvp,&size);
362
363 if (error = bread(devvp, (ufs_daddr_t)(SBOFF/size), SBSIZE, NOCRED,&bp)) {
364 brelse(bp);
365 return (error);
366 }
367 newfs = (struct fs *)bp->b_data;
368 #if REV_ENDIAN_FS
369 if (rev_endian) {
370 byte_swap_sbin(newfs);
371 }
372 #endif /* REV_ENDIAN_FS */
373 if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
374 newfs->fs_bsize < sizeof(struct fs)) {
375 #if REV_ENDIAN_FS
376 if (rev_endian)
377 byte_swap_sbout(newfs);
378 #endif /* REV_ENDIAN_FS */
379
380 brelse(bp);
381 return (EIO); /* XXX needs translation */
382 }
383 fs = VFSTOUFS(mountp)->um_fs;
384 /*
385 * Copy pointer fields back into superblock before copying in XXX
386 * new superblock. These should really be in the ufsmount. XXX
387 * Note that important parameters (eg fs_ncg) are unchanged.
388 */
389 newfs->fs_csp = fs->fs_csp;
390 newfs->fs_maxcluster = fs->fs_maxcluster;
391 newfs->fs_contigdirs = fs->fs_contigdirs;
392 bcopy(newfs, fs, (u_int)fs->fs_sbsize);
393 if (fs->fs_sbsize < SBSIZE)
394 bp->b_flags |= B_INVAL;
395 #if REV_ENDIAN_FS
396 if (rev_endian)
397 byte_swap_sbout(newfs);
398 #endif /* REV_ENDIAN_FS */
399 brelse(bp);
400 mountp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
401 ffs_oldfscompat(fs);
402 maxfilesize = 0x100000000ULL; /* 4GB */
403 if (fs->fs_maxfilesize > maxfilesize) /* XXX */
404 fs->fs_maxfilesize = maxfilesize; /* XXX */
405 /*
406 * Step 3: re-read summary information from disk.
407 */
408 blks = howmany(fs->fs_cssize, fs->fs_fsize);
409 space = fs->fs_csp;
410 for (i = 0; i < blks; i += fs->fs_frag) {
411 size = fs->fs_bsize;
412 if (i + fs->fs_frag > blks)
413 size = (blks - i) * fs->fs_fsize;
414 if (error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
415 NOCRED, &bp)) {
416 brelse(bp);
417 return (error);
418 }
419 #if REV_ENDIAN_FS
420 if (rev_endian) {
421 /* csum swaps */
422 byte_swap_ints((int *)bp->b_data, size / sizeof(int));
423 }
424 #endif /* REV_ENDIAN_FS */
425 bcopy(bp->b_data, space, (u_int)size);
426 #if REV_ENDIAN_FS
427 if (rev_endian) {
428 /* csum swaps */
429 byte_swap_ints((int *)bp->b_data, size / sizeof(int));
430 }
431 #endif /* REV_ENDIAN_FS */
432 space = (char *) space + size;
433 brelse(bp);
434 }
435 /*
436 * We no longer know anything about clusters per cylinder group.
437 */
438 if (fs->fs_contigsumsize > 0) {
439 lp = fs->fs_maxcluster;
440 for (i = 0; i < fs->fs_ncg; i++)
441 *lp++ = fs->fs_contigsumsize;
442 }
443
444 loop:
445 simple_lock(&mntvnode_slock);
446 for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
447 if (vp->v_mount != mountp) {
448 simple_unlock(&mntvnode_slock);
449 goto loop;
450 }
451 nvp = vp->v_mntvnodes.le_next;
452 /*
453 * Step 4: invalidate all inactive vnodes.
454 */
455 if (vrecycle(vp, &mntvnode_slock, p))
456 goto loop;
457 /*
458 * Step 5: invalidate all cached file data.
459 */
460 simple_lock(&vp->v_interlock);
461 simple_unlock(&mntvnode_slock);
462 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
463 goto loop;
464 }
465 if (vinvalbuf(vp, 0, cred, p, 0, 0))
466 panic("ffs_reload: dirty2");
467 /*
468 * Step 6: re-read inode data for all active vnodes.
469 */
470 ip = VTOI(vp);
471 if (error =
472 bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
473 (int)fs->fs_bsize, NOCRED, &bp)) {
474 brelse(bp);
475 vput(vp);
476 return (error);
477 }
478 #if REV_ENDIAN_FS
479 if (rev_endian) {
480 byte_swap_inode_in(((struct dinode *)bp->b_data +
481 ino_to_fsbo(fs, ip->i_number)), ip);
482 } else {
483 #endif /* REV_ENDIAN_FS */
484 ip->i_din = *((struct dinode *)bp->b_data +
485 ino_to_fsbo(fs, ip->i_number));
486 #if REV_ENDIAN_FS
487 }
488 #endif /* REV_ENDIAN_FS */
489 brelse(bp);
490 vput(vp);
491 simple_lock(&mntvnode_slock);
492 }
493 simple_unlock(&mntvnode_slock);
494 return (0);
495 }
496
497 /*
498 * Common code for mount and mountroot
499 */
500 int
501 ffs_mountfs(devvp, mp, p)
502 register struct vnode *devvp;
503 struct mount *mp;
504 struct proc *p;
505 {
506 register struct ufsmount *ump;
507 struct buf *bp;
508 register struct fs *fs;
509 dev_t dev;
510 struct buf *cgbp;
511 struct cg *cgp;
512 int32_t clustersumoff;
513 void *space;
514 int error, i, blks, size, ronly;
515 int32_t *lp;
516 struct ucred *cred;
517 extern struct vnode *rootvp;
518 u_int64_t maxfilesize; /* XXX */
519 u_int dbsize = DEV_BSIZE;
520 #if REV_ENDIAN_FS
521 int rev_endian=0;
522 #endif /* REV_ENDIAN_FS */
523 dev = devvp->v_rdev;
524 cred = p ? p->p_ucred : NOCRED;
525 /*
526 * Disallow multiple mounts of the same device.
527 * Disallow mounting of a device that is currently in use
528 * (except for root, which might share swap device for miniroot).
529 * Flush out any old buffers remaining from a previous use.
530 */
531 if (error = vfs_mountedon(devvp))
532 return (error);
533 if (vcount(devvp) > 1 && devvp != rootvp)
534 return (EBUSY);
535 if (error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0))
536 return (error);
537
538 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
539 if (error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p))
540 return (error);
541
542 VOP_DEVBLOCKSIZE(devvp,&size);
543
544 bp = NULL;
545 ump = NULL;
546 if (error = bread(devvp, (ufs_daddr_t)(SBOFF/size), SBSIZE, cred, &bp))
547 goto out;
548 fs = (struct fs *)bp->b_data;
549 #if REV_ENDIAN_FS
550 if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE ||
551 fs->fs_bsize < sizeof(struct fs)) {
552 int magic = fs->fs_magic;
553
554 byte_swap_ints(&magic, 1);
555 if (magic != FS_MAGIC) {
556 error = EINVAL;
557 goto out;
558 }
559 byte_swap_sbin(fs);
560 if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE ||
561 fs->fs_bsize < sizeof(struct fs)) {
562 byte_swap_sbout(fs);
563 error = EINVAL; /* XXX needs translation */
564 goto out;
565 }
566 rev_endian=1;
567 }
568 #endif /* REV_ENDIAN_FS */
569 if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE ||
570 fs->fs_bsize < sizeof(struct fs)) {
571 #if REV_ENDIAN_FS
572 if (rev_endian)
573 byte_swap_sbout(fs);
574 #endif /* REV_ENDIAN_FS */
575 error = EINVAL; /* XXX needs translation */
576 goto out;
577 }
578
579
580 /*
581 * Buffer cache does not handle multiple pages in a buf when
582 * invalidating incore buffer in pageout. There are no locks
583 * in the pageout path. So there is a danger of loosing data when
584 * block allocation happens at the same time a pageout of buddy
585 * page occurs. incore() returns buf with both
586 * pages, this leads vnode-pageout to incorrectly flush of entire.
587 * buf. Till the low level ffs code is modified to deal with these
588 * do not mount any FS more than 4K size.
589 */
590 /*
591 * Can't mount filesystems with a fragment size less than DIRBLKSIZ
592 */
593 /*
594 * Don't mount dirty filesystems, except for the root filesystem
595 */
596 if ((fs->fs_bsize > PAGE_SIZE) || (fs->fs_fsize < DIRBLKSIZ) ||
597 ((!(mp->mnt_flag & MNT_ROOTFS)) && (!fs->fs_clean))) {
598 #if REV_ENDIAN_FS
599 if (rev_endian)
600 byte_swap_sbout(fs);
601 #endif /* REV_ENDIAN_FS */
602 error = ENOTSUP;
603 goto out;
604 }
605
606 /* Let's figure out the devblock size the file system is with */
607 /* the device block size = fragment size / number of sectors per frag */
608
609 dbsize = fs->fs_fsize / NSPF(fs);
610 if(dbsize <= 0 ) {
611 kprintf("device blocksize computaion failed\n");
612 } else {
613 if (VOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&dbsize,
614 FWRITE, NOCRED, p) != 0) {
615 kprintf("failed to set device blocksize\n");
616 }
617 /* force the specfs to reread blocksize from size() */
618 set_fsblocksize(devvp);
619 }
620
621 /* cache the IO attributes */
622 error = vfs_init_io_attributes(devvp, mp);
623 if (error) {
624 printf("ffs_mountfs: vfs_init_io_attributes returned %d\n",
625 error);
626 goto out;
627 }
628
629 /* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
630 if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
631 #if REV_ENDIAN_FS
632 if (rev_endian)
633 byte_swap_sbout(fs);
634 #endif /* REV_ENDIAN_FS */
635 error = EROFS; /* needs translation */
636 goto out;
637 }
638
639 /* If we are not mounting read only, then check for overlap
640 * condition in cylinder group's free block map.
641 * If overlap exists, then force this into a read only mount
642 * to avoid further corruption. PR#2216969
643 */
644 if (ronly == 0){
645 if (error = bread (devvp, fsbtodb(fs, cgtod(fs, 0)),
646 (int)fs->fs_cgsize, NOCRED, &cgbp)) {
647 brelse(cgbp);
648 goto out;
649 }
650 cgp = (struct cg *)cgbp->b_data;
651 #if REV_ENDIAN_FS
652 if (rev_endian)
653 byte_swap_cgin(cgp,fs);
654 #endif /* REV_ENDIAN_FS */
655 if (!cg_chkmagic(cgp)){
656 #if REV_ENDIAN_FS
657 if (rev_endian)
658 byte_swap_cgout(cgp,fs);
659 #endif /* REV_ENDIAN_FS */
660 brelse(cgbp);
661 goto out;
662 }
663 if (cgp->cg_clustersumoff != 0) {
664 /* Check for overlap */
665 clustersumoff = cgp->cg_freeoff +
666 howmany(fs->fs_cpg * fs->fs_spc / NSPF(fs), NBBY);
667 clustersumoff = roundup(clustersumoff, sizeof(long));
668 if (cgp->cg_clustersumoff < clustersumoff) {
669 /* Overlap exists */
670 mp->mnt_flag |= MNT_RDONLY;
671 ronly = 1;
672 }
673 }
674 #if REV_ENDIAN_FS
675 if (rev_endian)
676 byte_swap_cgout(cgp,fs);
677 #endif /* REV_ENDIAN_FS */
678 brelse(cgbp);
679 }
680
681 ump = _MALLOC(sizeof *ump, M_UFSMNT, M_WAITOK);
682 bzero((caddr_t)ump, sizeof *ump);
683 ump->um_fs = _MALLOC((u_long)fs->fs_sbsize, M_UFSMNT,
684 M_WAITOK);
685 bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize);
686 if (fs->fs_sbsize < SBSIZE)
687 bp->b_flags |= B_INVAL;
688 #if REV_ENDIAN_FS
689 if (rev_endian)
690 byte_swap_sbout(fs);
691 #endif /* REV_ENDIAN_FS */
692 brelse(bp);
693 bp = NULL;
694 fs = ump->um_fs;
695 fs->fs_ronly = ronly;
696 size = fs->fs_cssize;
697 blks = howmany(size, fs->fs_fsize);
698 if (fs->fs_contigsumsize > 0)
699 size += fs->fs_ncg * sizeof(int32_t);
700 size += fs->fs_ncg * sizeof(u_int8_t);
701 space = _MALLOC((u_long)size, M_UFSMNT, M_WAITOK);
702 fs->fs_csp = space;
703 for (i = 0; i < blks; i += fs->fs_frag) {
704 size = fs->fs_bsize;
705 if (i + fs->fs_frag > blks)
706 size = (blks - i) * fs->fs_fsize;
707 if (error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
708 cred, &bp)) {
709 _FREE(fs->fs_csp, M_UFSMNT);
710 goto out;
711 }
712 bcopy(bp->b_data, space, (u_int)size);
713 #if REV_ENDIAN_FS
714 if (rev_endian)
715 byte_swap_ints((int *) space, size / sizeof(int));
716 #endif /* REV_ENDIAN_FS */
717 space = (char *)space + size;
718 brelse(bp);
719 bp = NULL;
720 }
721 if (fs->fs_contigsumsize > 0) {
722 fs->fs_maxcluster = lp = space;
723 for (i = 0; i < fs->fs_ncg; i++)
724 *lp++ = fs->fs_contigsumsize;
725 space = lp;
726 }
727 size = fs->fs_ncg * sizeof(u_int8_t);
728 fs->fs_contigdirs = (u_int8_t *)space;
729 space = (u_int8_t *)space + size;
730 bzero(fs->fs_contigdirs, size);
731 /* XXX Compatibility for old filesystems */
732 if (fs->fs_avgfilesize <= 0)
733 fs->fs_avgfilesize = AVFILESIZ;
734 if (fs->fs_avgfpdir <= 0)
735 fs->fs_avgfpdir = AFPDIR;
736 /* XXX End of compatibility */
737 mp->mnt_data = (qaddr_t)ump;
738 mp->mnt_stat.f_fsid.val[0] = (long)dev;
739 mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
740 /* XXX warning hardcoded max symlen and not "mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;" */
741 mp->mnt_maxsymlinklen = 60;
742 #if REV_ENDIAN_FS
743 if (rev_endian)
744 mp->mnt_flag |= MNT_REVEND;
745 #endif /* REV_ENDIAN_FS */
746 ump->um_mountp = mp;
747 ump->um_dev = dev;
748 ump->um_devvp = devvp;
749 ump->um_nindir = fs->fs_nindir;
750 ump->um_bptrtodb = fs->fs_fsbtodb;
751 ump->um_seqinc = fs->fs_frag;
752 for (i = 0; i < MAXQUOTAS; i++)
753 ump->um_qfiles[i].qf_vp = NULLVP;
754 devvp->v_specflags |= SI_MOUNTEDON;
755 ffs_oldfscompat(fs);
756 ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */
757 maxfilesize = 0x100000000ULL; /* 4GB */
758 #if 0
759 maxfilesize = (u_int64_t)0x40000000 * fs->fs_bsize - 1; /* XXX */
760 #endif /* 0 */
761 if (fs->fs_maxfilesize > maxfilesize) /* XXX */
762 fs->fs_maxfilesize = maxfilesize; /* XXX */
763 if (ronly == 0) {
764 fs->fs_clean = 0;
765 (void) ffs_sbupdate(ump, MNT_WAIT);
766 }
767 return (0);
768 out:
769 if (bp)
770 brelse(bp);
771 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
772 if (ump) {
773 _FREE(ump->um_fs, M_UFSMNT);
774 _FREE(ump, M_UFSMNT);
775 mp->mnt_data = (qaddr_t)0;
776 }
777 return (error);
778 }
779
780 /*
781 * Sanity checks for old file systems.
782 *
783 * XXX - goes away some day.
784 */
785 ffs_oldfscompat(fs)
786 struct fs *fs;
787 {
788 int i;
789
790 fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect); /* XXX */
791 fs->fs_interleave = max(fs->fs_interleave, 1); /* XXX */
792 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
793 fs->fs_nrpos = 8; /* XXX */
794 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
795 u_int64_t sizepb = fs->fs_bsize; /* XXX */
796 /* XXX */
797 fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */
798 for (i = 0; i < NIADDR; i++) { /* XXX */
799 sizepb *= NINDIR(fs); /* XXX */
800 fs->fs_maxfilesize += sizepb; /* XXX */
801 } /* XXX */
802 fs->fs_qbmask = ~fs->fs_bmask; /* XXX */
803 fs->fs_qfmask = ~fs->fs_fmask; /* XXX */
804 } /* XXX */
805 return (0);
806 }
807
808 /*
809 * unmount system call
810 */
811 int
812 ffs_unmount(mp, mntflags, p)
813 struct mount *mp;
814 int mntflags;
815 struct proc *p;
816 {
817 register struct ufsmount *ump;
818 register struct fs *fs;
819 int error, flags;
820 int force;
821
822 flags = 0;
823 force = 0;
824 if (mntflags & MNT_FORCE) {
825 flags |= FORCECLOSE;
826 force = 1;
827 }
828 if ( (error = ffs_flushfiles(mp, flags, p)) && !force )
829 return (error);
830 ump = VFSTOUFS(mp);
831 fs = ump->um_fs;
832 if (fs->fs_ronly == 0) {
833 fs->fs_clean = 1;
834 if (error = ffs_sbupdate(ump, MNT_WAIT)) {
835 fs->fs_clean = 0;
836 #ifdef notyet
837 /* we can atleast cleanup ; as the media could be WP */
838 /* & during mount, we do not check for write failures */
839 /* FIXME LATER : the Correct fix would be to have */
840 /* mount detect the WP media and downgrade to readonly mount */
841 /* For now, here it is */
842 return (error);
843 #endif /* notyet */
844 }
845 }
846 ump->um_devvp->v_specflags &= ~SI_MOUNTEDON;
847 error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
848 NOCRED, p);
849 if (error && !force)
850 return (error);
851 vrele(ump->um_devvp);
852
853 _FREE(fs->fs_csp, M_UFSMNT);
854 _FREE(fs, M_UFSMNT);
855 _FREE(ump, M_UFSMNT);
856 mp->mnt_data = (qaddr_t)0;
857 #if REV_ENDIAN_FS
858 mp->mnt_flag &= ~MNT_REVEND;
859 #endif /* REV_ENDIAN_FS */
860 return (0);
861 }
862
863 /*
864 * Flush out all the files in a filesystem.
865 */
866 ffs_flushfiles(mp, flags, p)
867 register struct mount *mp;
868 int flags;
869 struct proc *p;
870 {
871 register struct ufsmount *ump;
872 int i, error;
873
874 ump = VFSTOUFS(mp);
875
876 #if QUOTA
877 /*
878 * NOTE: The open quota files have an indirect reference
879 * on the root directory vnode. We must account for this
880 * extra reference when doing the intial vflush.
881 */
882 if (mp->mnt_flag & MNT_QUOTA) {
883 struct vnode *rootvp = NULLVP;
884 int quotafilecnt = 0;
885
886 /* Find out how many quota files we have open. */
887 for (i = 0; i < MAXQUOTAS; i++) {
888 if (ump->um_qfiles[i].qf_vp != NULLVP)
889 ++quotafilecnt;
890 }
891
892 /*
893 * Check if the root vnode is in our inode hash
894 * (so we can skip over it).
895 */
896 rootvp = ufs_ihashget(ump->um_dev, ROOTINO);
897
898 error = vflush(mp, rootvp, SKIPSYSTEM|flags);
899
900 if (rootvp) {
901 /*
902 * See if there are additional references on the
903 * root vp besides the ones obtained from the open
904 * quota files and the hfs_chashget call above.
905 */
906 if ((error == 0) &&
907 (rootvp->v_usecount > (1 + quotafilecnt))) {
908 error = EBUSY; /* root dir is still open */
909 }
910 vput(rootvp);
911 }
912 if (error && (flags & FORCECLOSE) == 0)
913 return (error);
914
915 for (i = 0; i < MAXQUOTAS; i++) {
916 if (ump->um_qfiles[i].qf_vp == NULLVP)
917 continue;
918 quotaoff(p, mp, i);
919 }
920 /*
921 * Here we fall through to vflush again to ensure
922 * that we have gotten rid of all the system vnodes.
923 */
924 }
925 #endif
926 error = vflush(mp, NULLVP, SKIPSWAP|flags);
927 error = vflush(mp, NULLVP, flags);
928 return (error);
929 }
930
931 /*
932 * Get file system statistics.
933 */
934 int
935 ffs_statfs(mp, sbp, p)
936 struct mount *mp;
937 register struct statfs *sbp;
938 struct proc *p;
939 {
940 register struct ufsmount *ump;
941 register struct fs *fs;
942
943 ump = VFSTOUFS(mp);
944 fs = ump->um_fs;
945 if (fs->fs_magic != FS_MAGIC)
946 panic("ffs_statfs");
947 sbp->f_bsize = fs->fs_fsize;
948 sbp->f_iosize = fs->fs_bsize;
949 sbp->f_blocks = fs->fs_dsize;
950 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
951 fs->fs_cstotal.cs_nffree;
952 sbp->f_bavail = freespace(fs, fs->fs_minfree);
953 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
954 sbp->f_ffree = fs->fs_cstotal.cs_nifree;
955 if (sbp != &mp->mnt_stat) {
956 sbp->f_type = mp->mnt_vfc->vfc_typenum;
957 bcopy((caddr_t)mp->mnt_stat.f_mntonname,
958 (caddr_t)&sbp->f_mntonname[0], MNAMELEN);
959 bcopy((caddr_t)mp->mnt_stat.f_mntfromname,
960 (caddr_t)&sbp->f_mntfromname[0], MNAMELEN);
961 }
962 return (0);
963 }
964
965 /*
966 * Go through the disk queues to initiate sandbagged IO;
967 * go through the inodes to write those that have been modified;
968 * initiate the writing of the super block if it has been modified.
969 *
970 * Note: we are always called with the filesystem marked `MPBUSY'.
971 */
972 int
973 ffs_sync(mp, waitfor, cred, p)
974 struct mount *mp;
975 int waitfor;
976 struct ucred *cred;
977 struct proc *p;
978 {
979 struct vnode *nvp, *vp;
980 struct inode *ip;
981 struct ufsmount *ump = VFSTOUFS(mp);
982 struct fs *fs;
983 int error, allerror = 0;
984
985 fs = ump->um_fs;
986 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
987 printf("fs = %s\n", fs->fs_fsmnt);
988 panic("update: rofs mod");
989 }
990 /*
991 * Write back each (modified) inode.
992 */
993 simple_lock(&mntvnode_slock);
994 loop:
995 for (vp = mp->mnt_vnodelist.lh_first;
996 vp != NULL;
997 vp = nvp) {
998 int didhold = 0;
999
1000 /*
1001 * If the vnode that we are about to sync is no longer
1002 * associated with this mount point, start over.
1003 */
1004 if (vp->v_mount != mp)
1005 goto loop;
1006 simple_lock(&vp->v_interlock);
1007 nvp = vp->v_mntvnodes.le_next;
1008 ip = VTOI(vp);
1009
1010 // restart our whole search if this guy is locked
1011 // or being reclaimed.
1012 if (ip == NULL || vp->v_flag & (VXLOCK|VORECLAIM)) {
1013 simple_unlock(&vp->v_interlock);
1014 continue;
1015 }
1016
1017 if ((vp->v_type == VNON) ||
1018 ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
1019 vp->v_dirtyblkhd.lh_first == NULL && !(vp->v_flag & VHASDIRTY))) {
1020 simple_unlock(&vp->v_interlock);
1021 continue;
1022 }
1023 simple_unlock(&mntvnode_slock);
1024 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p);
1025 if (error) {
1026 simple_lock(&mntvnode_slock);
1027 if (error == ENOENT)
1028 goto loop;
1029 continue;
1030 }
1031 didhold = ubc_hold(vp);
1032 if (error = VOP_FSYNC(vp, cred, waitfor, p))
1033 allerror = error;
1034 VOP_UNLOCK(vp, 0, p);
1035 if (didhold)
1036 ubc_rele(vp);
1037 vrele(vp);
1038 simple_lock(&mntvnode_slock);
1039 }
1040 simple_unlock(&mntvnode_slock);
1041 /*
1042 * Force stale file system control information to be flushed.
1043 */
1044 if (error = VOP_FSYNC(ump->um_devvp, cred, waitfor, p))
1045 allerror = error;
1046 #if QUOTA
1047 qsync(mp);
1048 #endif
1049 /*
1050 * Write back modified superblock.
1051 */
1052 if (fs->fs_fmod != 0) {
1053 fs->fs_fmod = 0;
1054 fs->fs_time = time.tv_sec;
1055 if (error = ffs_sbupdate(ump, waitfor))
1056 allerror = error;
1057 }
1058 return (allerror);
1059 }
1060
1061 /*
1062 * Look up a FFS dinode number to find its incore vnode, otherwise read it
1063 * in from disk. If it is in core, wait for the lock bit to clear, then
1064 * return the inode locked. Detection and handling of mount points must be
1065 * done by the calling routine.
1066 */
1067 int
1068 ffs_vget(mp, inop, vpp)
1069 struct mount *mp;
1070 void *inop;
1071 struct vnode **vpp;
1072 {
1073 struct proc *p = current_proc(); /* XXX */
1074 struct fs *fs;
1075 struct inode *ip;
1076 struct ufsmount *ump;
1077 struct buf *bp;
1078 struct vnode *vp;
1079 ino_t ino;
1080 dev_t dev;
1081 int i, type, error = 0;
1082
1083 ino = (ino_t) inop;
1084 ump = VFSTOUFS(mp);
1085 dev = ump->um_dev;
1086
1087 /* Check for unmount in progress */
1088 if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
1089 *vpp = NULL;
1090 return (EPERM);
1091 }
1092
1093 /* check in the inode hash */
1094 if ((*vpp = ufs_ihashget(dev, ino)) != NULL) {
1095 vp = *vpp;
1096 UBCINFOCHECK("ffs_vget", vp);
1097 return (0);
1098 }
1099
1100 /*
1101 * Not in inode hash.
1102 * Allocate a new vnode/inode.
1103 */
1104 type = ump->um_devvp->v_tag == VT_MFS ? M_MFSNODE : M_FFSNODE; /* XXX */
1105 MALLOC_ZONE(ip, struct inode *, sizeof(struct inode), type, M_WAITOK);
1106 bzero((caddr_t)ip, sizeof(struct inode));
1107 lockinit(&ip->i_lock, PINOD, "inode", 0, 0);
1108 /* lock the inode */
1109 lockmgr(&ip->i_lock, LK_EXCLUSIVE, (struct slock *)0, p);
1110
1111 ip->i_fs = fs = ump->um_fs;
1112 ip->i_dev = dev;
1113 ip->i_number = ino;
1114 SET(ip->i_flag, IN_ALLOC);
1115 #if QUOTA
1116 for (i = 0; i < MAXQUOTAS; i++)
1117 ip->i_dquot[i] = NODQUOT;
1118 #endif
1119
1120 /*
1121 * We could have blocked in MALLOC_ZONE. Check for the race.
1122 */
1123 if ((*vpp = ufs_ihashget(dev, ino)) != NULL) {
1124 /* lost the race, clean up */
1125 FREE_ZONE(ip, sizeof(struct inode), type);
1126 vp = *vpp;
1127 UBCINFOCHECK("ffs_vget", vp);
1128 return (0);
1129 }
1130
1131 /*
1132 * Put it onto its hash chain locked so that other requests for
1133 * this inode will block if they arrive while we are sleeping waiting
1134 * for old data structures to be purged or for the contents of the
1135 * disk portion of this inode to be read.
1136 */
1137 ufs_ihashins(ip);
1138
1139 /* Read in the disk contents for the inode, copy into the inode. */
1140 if (error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1141 (int)fs->fs_bsize, NOCRED, &bp)) {
1142 brelse(bp);
1143 goto errout;
1144 }
1145 #if REV_ENDIAN_FS
1146 if (mp->mnt_flag & MNT_REVEND) {
1147 byte_swap_inode_in(((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino)),ip);
1148 } else {
1149 ip->i_din = *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino));
1150 }
1151 #else
1152 ip->i_din = *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino));
1153 #endif /* REV_ENDIAN_FS */
1154 brelse(bp);
1155
1156 if (error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp))
1157 goto errout;
1158
1159 vp->v_data = ip;
1160 ip->i_vnode = vp;
1161
1162 /*
1163 * Initialize the vnode from the inode, check for aliases.
1164 * Note that the underlying vnode may have changed.
1165 */
1166 if (error = ufs_vinit(mp, ffs_specop_p, FFS_FIFOOPS, &vp)) {
1167 vput(vp);
1168 *vpp = NULL;
1169 goto out;
1170 }
1171 /*
1172 * Finish inode initialization now that aliasing has been resolved.
1173 */
1174 ip->i_devvp = ump->um_devvp;
1175 VREF(ip->i_devvp);
1176 /*
1177 * Set up a generation number for this inode if it does not
1178 * already have one. This should only happen on old filesystems.
1179 */
1180 if (ip->i_gen == 0) {
1181 if (++nextgennumber < (u_long)time.tv_sec)
1182 nextgennumber = time.tv_sec;
1183 ip->i_gen = nextgennumber;
1184 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
1185 ip->i_flag |= IN_MODIFIED;
1186 }
1187 /*
1188 * Ensure that uid and gid are correct. This is a temporary
1189 * fix until fsck has been changed to do the update.
1190 */
1191 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1192 ip->i_uid = ip->i_din.di_ouid; /* XXX */
1193 ip->i_gid = ip->i_din.di_ogid; /* XXX */
1194 } /* XXX */
1195
1196 if (UBCINFOMISSING(vp) || UBCINFORECLAIMED(vp))
1197 ubc_info_init(vp);
1198 *vpp = vp;
1199
1200 out:
1201 CLR(ip->i_flag, IN_ALLOC);
1202 if (ISSET(ip->i_flag, IN_WALLOC))
1203 wakeup(ip);
1204 return (error);
1205
1206 errout:
1207 ufs_ihashrem(ip);
1208 CLR(ip->i_flag, IN_ALLOC);
1209 if (ISSET(ip->i_flag, IN_WALLOC))
1210 wakeup(ip);
1211 FREE_ZONE(ip, sizeof(struct inode), type);
1212 *vpp = NULL;
1213 return (error);
1214 }
1215
1216 /*
1217 * File handle to vnode
1218 *
1219 * Have to be really careful about stale file handles:
1220 * - check that the inode number is valid
1221 * - call ffs_vget() to get the locked inode
1222 * - check for an unallocated inode (i_mode == 0)
1223 * - check that the given client host has export rights and return
1224 * those rights via. exflagsp and credanonp
1225 */
1226 int
1227 ffs_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp)
1228 register struct mount *mp;
1229 struct fid *fhp;
1230 struct mbuf *nam;
1231 struct vnode **vpp;
1232 int *exflagsp;
1233 struct ucred **credanonp;
1234 {
1235 register struct ufid *ufhp;
1236 struct fs *fs;
1237
1238 ufhp = (struct ufid *)fhp;
1239 fs = VFSTOUFS(mp)->um_fs;
1240 if (ufhp->ufid_ino < ROOTINO ||
1241 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1242 return (ESTALE);
1243 return (ufs_check_export(mp, ufhp, nam, vpp, exflagsp, credanonp));
1244 }
1245
1246 /*
1247 * Vnode pointer to File handle
1248 */
1249 /* ARGSUSED */
1250 ffs_vptofh(vp, fhp)
1251 struct vnode *vp;
1252 struct fid *fhp;
1253 {
1254 register struct inode *ip;
1255 register struct ufid *ufhp;
1256
1257 ip = VTOI(vp);
1258 ufhp = (struct ufid *)fhp;
1259 ufhp->ufid_len = sizeof(struct ufid);
1260 ufhp->ufid_ino = ip->i_number;
1261 ufhp->ufid_gen = ip->i_gen;
1262 return (0);
1263 }
1264
1265 /*
1266 * Initialize the filesystem; just use ufs_init.
1267 */
1268 int
1269 ffs_init(vfsp)
1270 struct vfsconf *vfsp;
1271 {
1272
1273 return (ufs_init(vfsp));
1274 }
1275
1276 /*
1277 * fast filesystem related variables.
1278 */
1279 ffs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1280 int *name;
1281 u_int namelen;
1282 void *oldp;
1283 size_t *oldlenp;
1284 void *newp;
1285 size_t newlen;
1286 struct proc *p;
1287 {
1288 extern int doclusterread, doclusterwrite, doreallocblks, doasyncfree;
1289
1290 /* all sysctl names at this level are terminal */
1291 if (namelen != 1)
1292 return (ENOTDIR); /* overloaded */
1293
1294 switch (name[0]) {
1295 case FFS_CLUSTERREAD:
1296 return (sysctl_int(oldp, oldlenp, newp, newlen,
1297 &doclusterread));
1298 case FFS_CLUSTERWRITE:
1299 return (sysctl_int(oldp, oldlenp, newp, newlen,
1300 &doclusterwrite));
1301 case FFS_REALLOCBLKS:
1302 return (sysctl_int(oldp, oldlenp, newp, newlen,
1303 &doreallocblks));
1304 case FFS_ASYNCFREE:
1305 return (sysctl_int(oldp, oldlenp, newp, newlen, &doasyncfree));
1306 default:
1307 return (EOPNOTSUPP);
1308 }
1309 /* NOTREACHED */
1310 }
1311
1312 /*
1313 * Write a superblock and associated information back to disk.
1314 */
1315 int
1316 ffs_sbupdate(mp, waitfor)
1317 struct ufsmount *mp;
1318 int waitfor;
1319 {
1320 register struct fs *dfs, *fs = mp->um_fs;
1321 register struct buf *bp;
1322 int blks;
1323 void *space;
1324 int i, size, error, allerror = 0;
1325 int devBlockSize=0;
1326 #if REV_ENDIAN_FS
1327 int rev_endian=(mp->um_mountp->mnt_flag & MNT_REVEND);
1328 #endif /* REV_ENDIAN_FS */
1329
1330 /*
1331 * First write back the summary information.
1332 */
1333 blks = howmany(fs->fs_cssize, fs->fs_fsize);
1334 space = fs->fs_csp;
1335 for (i = 0; i < blks; i += fs->fs_frag) {
1336 size = fs->fs_bsize;
1337 if (i + fs->fs_frag > blks)
1338 size = (blks - i) * fs->fs_fsize;
1339 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1340 size, 0, 0, BLK_META);
1341 bcopy(space, bp->b_data, (u_int)size);
1342 #if REV_ENDIAN_FS
1343 if (rev_endian) {
1344 byte_swap_ints((int *)bp->b_data, size / sizeof(int));
1345 }
1346 #endif /* REV_ENDIAN_FS */
1347 space = (char *)space + size;
1348 if (waitfor != MNT_WAIT)
1349 bawrite(bp);
1350 else if (error = bwrite(bp))
1351 allerror = error;
1352 }
1353 /*
1354 * Now write back the superblock itself. If any errors occurred
1355 * up to this point, then fail so that the superblock avoids
1356 * being written out as clean.
1357 */
1358 if (allerror)
1359 return (allerror);
1360 VOP_DEVBLOCKSIZE(mp->um_devvp,&devBlockSize);
1361 bp = getblk(mp->um_devvp, (SBOFF/devBlockSize), (int)fs->fs_sbsize, 0, 0, BLK_META);
1362 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
1363 /* Restore compatibility to old file systems. XXX */
1364 dfs = (struct fs *)bp->b_data; /* XXX */
1365 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
1366 dfs->fs_nrpos = -1; /* XXX */
1367 #if REV_ENDIAN_FS
1368 /*
1369 * Swapping bytes here ; so that in case
1370 * of inode format < FS_44INODEFMT appropriate
1371 * fields get moved
1372 */
1373 if (rev_endian) {
1374 byte_swap_sbout((struct fs *)bp->b_data);
1375 }
1376 #endif /* REV_ENDIAN_FS */
1377 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1378 int32_t *lp, tmp; /* XXX */
1379 /* XXX */
1380 lp = (int32_t *)&dfs->fs_qbmask; /* XXX */
1381 tmp = lp[4]; /* XXX */
1382 for (i = 4; i > 0; i--) /* XXX */
1383 lp[i] = lp[i-1]; /* XXX */
1384 lp[0] = tmp; /* XXX */
1385 } /* XXX */
1386 #if REV_ENDIAN_FS
1387 /* Note that dfs is already swapped so swap the filesize
1388 * before writing
1389 */
1390 if (rev_endian) {
1391 dfs->fs_maxfilesize = NXSwapLongLong(mp->um_savedmaxfilesize); /* XXX */
1392 } else {
1393 #endif /* REV_ENDIAN_FS */
1394 dfs->fs_maxfilesize = mp->um_savedmaxfilesize; /* XXX */
1395 #if REV_ENDIAN_FS
1396 }
1397 #endif /* REV_ENDIAN_FS */
1398 if (waitfor != MNT_WAIT)
1399 bawrite(bp);
1400 else if (error = bwrite(bp))
1401 allerror = error;
1402
1403 return (allerror);
1404 }