]> git.saurik.com Git - apple/xnu.git/blob - bsd/ufs/ffs/ffs_vfsops.c
e91a041f756e30767217f5d0448b1b57c04a8d74
[apple/xnu.git] / bsd / ufs / ffs / ffs_vfsops.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23 /*
24 * Copyright (c) 1989, 1991, 1993, 1994
25 * The Regents of the University of California. All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
56 */
57
58 #include <rev_endian_fs.h>
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/namei.h>
62 #include <sys/proc.h>
63 #include <sys/kernel.h>
64 #include <sys/vnode.h>
65 #include <sys/socket.h>
66 #include <sys/mount.h>
67 #include <sys/buf.h>
68 #include <sys/mbuf.h>
69 #include <sys/file.h>
70 #include <dev/disk.h>
71 #include <sys/ioctl.h>
72 #include <sys/errno.h>
73 #include <sys/malloc.h>
74 #include <sys/ubc.h>
75 #include <sys/quota.h>
76
77 #include <miscfs/specfs/specdev.h>
78
79 #include <ufs/ufs/quota.h>
80 #include <ufs/ufs/ufsmount.h>
81 #include <ufs/ufs/inode.h>
82 #include <ufs/ufs/ufs_extern.h>
83
84 #include <ufs/ffs/fs.h>
85 #include <ufs/ffs/ffs_extern.h>
86 #if REV_ENDIAN_FS
87 #include <ufs/ufs/ufs_byte_order.h>
88 #include <architecture/byte_order.h>
89 #endif /* REV_ENDIAN_FS */
90
91 int ffs_sbupdate __P((struct ufsmount *, int));
92
93 struct vfsops ufs_vfsops = {
94 ffs_mount,
95 ufs_start,
96 ffs_unmount,
97 ufs_root,
98 ufs_quotactl,
99 ffs_statfs,
100 ffs_sync,
101 ffs_vget,
102 ffs_fhtovp,
103 ffs_vptofh,
104 ffs_init,
105 ffs_sysctl,
106 };
107
108 extern u_long nextgennumber;
109
110 /*
111 * Called by main() when ufs is going to be mounted as root.
112 */
113 ffs_mountroot()
114 {
115 extern struct vnode *rootvp;
116 struct fs *fs;
117 struct mount *mp;
118 struct proc *p = current_proc(); /* XXX */
119 struct ufsmount *ump;
120 u_int size;
121 int error;
122
123 /*
124 * Get vnode for rootdev.
125 */
126 if (error = bdevvp(rootdev, &rootvp)) {
127 printf("ffs_mountroot: can't setup bdevvp");
128 return (error);
129 }
130 if (error = vfs_rootmountalloc("ufs", "root_device", &mp)) {
131 vrele(rootvp); /* release the reference from bdevvp() */
132 return (error);
133 }
134
135 /* Must set the MNT_ROOTFS flag before doing the actual mount */
136 mp->mnt_flag |= MNT_ROOTFS;
137
138 if (error = ffs_mountfs(rootvp, mp, p)) {
139 mp->mnt_vfc->vfc_refcount--;
140 vfs_unbusy(mp, p);
141 vrele(rootvp); /* release the reference from bdevvp() */
142 _FREE_ZONE(mp, sizeof (struct mount), M_MOUNT);
143 return (error);
144 }
145 simple_lock(&mountlist_slock);
146 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
147 simple_unlock(&mountlist_slock);
148 ump = VFSTOUFS(mp);
149 fs = ump->um_fs;
150 (void) copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
151 (void)ffs_statfs(mp, &mp->mnt_stat, p);
152 vfs_unbusy(mp, p);
153 inittodr(fs->fs_time);
154 return (0);
155 }
156
157 /*
158 * VFS Operations.
159 *
160 * mount system call
161 */
162 int
163 ffs_mount(mp, path, data, ndp, p)
164 register struct mount *mp;
165 char *path;
166 caddr_t data;
167 struct nameidata *ndp;
168 struct proc *p;
169 {
170 struct vnode *devvp;
171 struct ufs_args args;
172 struct ufsmount *ump;
173 register struct fs *fs;
174 u_int size;
175 int error, flags;
176 mode_t accessmode;
177 int ronly;
178 int reload = 0;
179
180 if (error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args)))
181 return (error);
182 /*
183 * If updating, check whether changing from read-only to
184 * read/write; if there is no device name, that's all we do.
185 */
186 if (mp->mnt_flag & MNT_UPDATE) {
187 ump = VFSTOUFS(mp);
188 fs = ump->um_fs;
189 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
190 flags = WRITECLOSE;
191 if (mp->mnt_flag & MNT_FORCE)
192 flags |= FORCECLOSE;
193 if (error = ffs_flushfiles(mp, flags, p))
194 return (error);
195 fs->fs_clean = 1;
196 fs->fs_ronly = 1;
197 if (error = ffs_sbupdate(ump, MNT_WAIT)) {
198 fs->fs_clean = 0;
199 fs->fs_ronly = 0;
200 return (error);
201 }
202 }
203 /* save fs_ronly to later use */
204 ronly = fs->fs_ronly;
205 if ((mp->mnt_flag & MNT_RELOAD) || ronly)
206 reload = 1;
207 if ((reload) &&
208 (error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p)))
209 return (error);
210 /* replace the ronly after load */
211 fs->fs_ronly = ronly;
212 /*
213 * Do not update the file system if the user was in singleuser
214 * and then tries to mount -uw without fscking
215 */
216 if (!fs->fs_clean && ronly) {
217 printf("WARNING: trying to mount a dirty file system\n");
218 if (issingleuser() && (mp->mnt_flag & MNT_ROOTFS)) {
219 printf("WARNING: R/W mount of %s denied. Filesystem is not clean - run fsck\n",fs->fs_fsmnt);
220 /*
221 * Reset the readonly bit as reload might have
222 * modified this bit
223 */
224 fs->fs_ronly = 1;
225 return(EPERM);
226 }
227 }
228
229 if (ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
230 /*
231 * If upgrade to read-write by non-root, then verify
232 * that user has necessary permissions on the device.
233 */
234 if (p->p_ucred->cr_uid != 0) {
235 devvp = ump->um_devvp;
236 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
237 if (error = VOP_ACCESS(devvp, VREAD | VWRITE,
238 p->p_ucred, p)) {
239 VOP_UNLOCK(devvp, 0, p);
240 return (error);
241 }
242 VOP_UNLOCK(devvp, 0, p);
243 }
244 fs->fs_ronly = 0;
245 fs->fs_clean = 0;
246 (void) ffs_sbupdate(ump, MNT_WAIT);
247 }
248 if (args.fspec == 0) {
249 /*
250 * Process export requests.
251 */
252 return (vfs_export(mp, &ump->um_export, &args.export));
253 }
254 }
255 /*
256 * Not an update, or updating the name: look up the name
257 * and verify that it refers to a sensible block device.
258 */
259 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
260 if (error = namei(ndp))
261 return (error);
262 devvp = ndp->ni_vp;
263
264 if (devvp->v_type != VBLK) {
265 vrele(devvp);
266 return (ENOTBLK);
267 }
268 if (major(devvp->v_rdev) >= nblkdev) {
269 vrele(devvp);
270 return (ENXIO);
271 }
272 /*
273 * If mount by non-root, then verify that user has necessary
274 * permissions on the device.
275 */
276 if (p->p_ucred->cr_uid != 0) {
277 accessmode = VREAD;
278 if ((mp->mnt_flag & MNT_RDONLY) == 0)
279 accessmode |= VWRITE;
280 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
281 if (error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p)) {
282 vput(devvp);
283 return (error);
284 }
285 VOP_UNLOCK(devvp, 0, p);
286 }
287 if ((mp->mnt_flag & MNT_UPDATE) == 0)
288 error = ffs_mountfs(devvp, mp, p);
289 else {
290 if (devvp != ump->um_devvp)
291 error = EINVAL; /* needs translation */
292 else
293 vrele(devvp);
294 }
295 if (error) {
296 vrele(devvp);
297 return (error);
298 }
299 ump = VFSTOUFS(mp);
300 fs = ump->um_fs;
301 (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
302 bzero(fs->fs_fsmnt + size, sizeof(fs->fs_fsmnt) - size);
303 bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname,
304 MNAMELEN);
305 (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
306 &size);
307 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
308 (void)ffs_statfs(mp, &mp->mnt_stat, p);
309 return (0);
310 }
311
312 /*
313 * Reload all incore data for a filesystem (used after running fsck on
314 * the root filesystem and finding things to fix). The filesystem must
315 * be mounted read-only.
316 *
317 * Things to do to update the mount:
318 * 1) invalidate all cached meta-data.
319 * 2) re-read superblock from disk.
320 * 3) re-read summary information from disk.
321 * 4) invalidate all inactive vnodes.
322 * 5) invalidate all cached file data.
323 * 6) re-read inode data for all active vnodes.
324 */
325 ffs_reload(mountp, cred, p)
326 register struct mount *mountp;
327 struct ucred *cred;
328 struct proc *p;
329 {
330 register struct vnode *vp, *nvp, *devvp;
331 struct inode *ip;
332 void *space;
333 struct buf *bp;
334 struct fs *fs, *newfs;
335 int i, blks, size, error;
336 u_int64_t maxfilesize; /* XXX */
337 int32_t *lp;
338 #if REV_ENDIAN_FS
339 int rev_endian = (mountp->mnt_flag & MNT_REVEND);
340 #endif /* REV_ENDIAN_FS */
341
342 if ((mountp->mnt_flag & MNT_RDONLY) == 0)
343 return (EINVAL);
344 /*
345 * Step 1: invalidate all cached meta-data.
346 */
347 devvp = VFSTOUFS(mountp)->um_devvp;
348 if (vinvalbuf(devvp, 0, cred, p, 0, 0))
349 panic("ffs_reload: dirty1");
350 /*
351 * Step 2: re-read superblock from disk.
352 */
353 VOP_DEVBLOCKSIZE(devvp,&size);
354
355 if (error = bread(devvp, (ufs_daddr_t)(SBOFF/size), SBSIZE, NOCRED,&bp)) {
356 brelse(bp);
357 return (error);
358 }
359 newfs = (struct fs *)bp->b_data;
360 #if REV_ENDIAN_FS
361 if (rev_endian) {
362 byte_swap_sbin(newfs);
363 }
364 #endif /* REV_ENDIAN_FS */
365 if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
366 newfs->fs_bsize < sizeof(struct fs)) {
367 #if REV_ENDIAN_FS
368 if (rev_endian)
369 byte_swap_sbout(newfs);
370 #endif /* REV_ENDIAN_FS */
371
372 brelse(bp);
373 return (EIO); /* XXX needs translation */
374 }
375 fs = VFSTOUFS(mountp)->um_fs;
376 /*
377 * Copy pointer fields back into superblock before copying in XXX
378 * new superblock. These should really be in the ufsmount. XXX
379 * Note that important parameters (eg fs_ncg) are unchanged.
380 */
381 newfs->fs_csp = fs->fs_csp;
382 newfs->fs_maxcluster = fs->fs_maxcluster;
383 bcopy(newfs, fs, (u_int)fs->fs_sbsize);
384 if (fs->fs_sbsize < SBSIZE)
385 bp->b_flags |= B_INVAL;
386 #if REV_ENDIAN_FS
387 if (rev_endian)
388 byte_swap_sbout(newfs);
389 #endif /* REV_ENDIAN_FS */
390 brelse(bp);
391 mountp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
392 ffs_oldfscompat(fs);
393 maxfilesize = (u_int64_t)0x100000000; /* 4GB */
394 if (fs->fs_maxfilesize > maxfilesize) /* XXX */
395 fs->fs_maxfilesize = maxfilesize; /* XXX */
396 /*
397 * Step 3: re-read summary information from disk.
398 */
399 blks = howmany(fs->fs_cssize, fs->fs_fsize);
400 space = fs->fs_csp;
401 for (i = 0; i < blks; i += fs->fs_frag) {
402 size = fs->fs_bsize;
403 if (i + fs->fs_frag > blks)
404 size = (blks - i) * fs->fs_fsize;
405 if (error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
406 NOCRED, &bp)) {
407 brelse(bp);
408 return (error);
409 }
410 #if REV_ENDIAN_FS
411 if (rev_endian) {
412 /* csum swaps */
413 byte_swap_ints((int *)bp->b_data, size / sizeof(int));
414 }
415 #endif /* REV_ENDIAN_FS */
416 bcopy(bp->b_data, space, (u_int)size);
417 #if REV_ENDIAN_FS
418 if (rev_endian) {
419 /* csum swaps */
420 byte_swap_ints((int *)bp->b_data, size / sizeof(int));
421 }
422 #endif /* REV_ENDIAN_FS */
423 brelse(bp);
424 }
425 /*
426 * We no longer know anything about clusters per cylinder group.
427 */
428 if (fs->fs_contigsumsize > 0) {
429 lp = fs->fs_maxcluster;
430 for (i = 0; i < fs->fs_ncg; i++)
431 *lp++ = fs->fs_contigsumsize;
432 }
433
434 loop:
435 simple_lock(&mntvnode_slock);
436 for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
437 if (vp->v_mount != mountp) {
438 simple_unlock(&mntvnode_slock);
439 goto loop;
440 }
441 nvp = vp->v_mntvnodes.le_next;
442 /*
443 * Step 4: invalidate all inactive vnodes.
444 */
445 if (vrecycle(vp, &mntvnode_slock, p))
446 goto loop;
447 /*
448 * Step 5: invalidate all cached file data.
449 */
450 simple_lock(&vp->v_interlock);
451 simple_unlock(&mntvnode_slock);
452 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
453 goto loop;
454 }
455 if (vinvalbuf(vp, 0, cred, p, 0, 0))
456 panic("ffs_reload: dirty2");
457 /*
458 * Step 6: re-read inode data for all active vnodes.
459 */
460 ip = VTOI(vp);
461 if (error =
462 bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
463 (int)fs->fs_bsize, NOCRED, &bp)) {
464 brelse(bp);
465 vput(vp);
466 return (error);
467 }
468 #if REV_ENDIAN_FS
469 if (rev_endian) {
470 byte_swap_inode_in(((struct dinode *)bp->b_data +
471 ino_to_fsbo(fs, ip->i_number)), ip);
472 } else {
473 #endif /* REV_ENDIAN_FS */
474 ip->i_din = *((struct dinode *)bp->b_data +
475 ino_to_fsbo(fs, ip->i_number));
476 #if REV_ENDIAN_FS
477 }
478 #endif /* REV_ENDIAN_FS */
479 brelse(bp);
480 vput(vp);
481 simple_lock(&mntvnode_slock);
482 }
483 simple_unlock(&mntvnode_slock);
484 return (0);
485 }
486
487 /*
488 * Common code for mount and mountroot
489 */
490 int
491 ffs_mountfs(devvp, mp, p)
492 register struct vnode *devvp;
493 struct mount *mp;
494 struct proc *p;
495 {
496 register struct ufsmount *ump;
497 struct buf *bp;
498 register struct fs *fs;
499 dev_t dev;
500 struct buf *cgbp;
501 struct cg *cgp;
502 int32_t clustersumoff;
503 void *space;
504 int error, i, blks, size, ronly;
505 int32_t *lp;
506 struct ucred *cred;
507 extern struct vnode *rootvp;
508 u_int64_t maxfilesize; /* XXX */
509 u_int dbsize = DEV_BSIZE;
510 #if REV_ENDIAN_FS
511 int rev_endian=0;
512 #endif /* REV_ENDIAN_FS */
513 dev = devvp->v_rdev;
514 cred = p ? p->p_ucred : NOCRED;
515 /*
516 * Disallow multiple mounts of the same device.
517 * Disallow mounting of a device that is currently in use
518 * (except for root, which might share swap device for miniroot).
519 * Flush out any old buffers remaining from a previous use.
520 */
521 if (error = vfs_mountedon(devvp))
522 return (error);
523 if (vcount(devvp) > 1 && devvp != rootvp)
524 return (EBUSY);
525 if (error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0))
526 return (error);
527
528 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
529 if (error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p))
530 return (error);
531
532 VOP_DEVBLOCKSIZE(devvp,&size);
533
534 bp = NULL;
535 ump = NULL;
536 if (error = bread(devvp, (ufs_daddr_t)(SBOFF/size), SBSIZE, cred, &bp))
537 goto out;
538 fs = (struct fs *)bp->b_data;
539 #if REV_ENDIAN_FS
540 if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE ||
541 fs->fs_bsize < sizeof(struct fs)) {
542 int magic = fs->fs_magic;
543
544 byte_swap_ints(&magic, 1);
545 if (magic != FS_MAGIC) {
546 error = EINVAL;
547 goto out;
548 }
549 byte_swap_sbin(fs);
550 if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE ||
551 fs->fs_bsize < sizeof(struct fs)) {
552 byte_swap_sbout(fs);
553 error = EINVAL; /* XXX needs translation */
554 goto out;
555 }
556 rev_endian=1;
557 }
558 #endif /* REV_ENDIAN_FS */
559 if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE ||
560 fs->fs_bsize < sizeof(struct fs)) {
561 #if REV_ENDIAN_FS
562 if (rev_endian)
563 byte_swap_sbout(fs);
564 #endif /* REV_ENDIAN_FS */
565 error = EINVAL; /* XXX needs translation */
566 goto out;
567 }
568
569
570 /*
571 * Buffer cache does not handle multiple pages in a buf when
572 * invalidating incore buffer in pageout. There are no locks
573 * in the pageout path. So there is a danger of loosing data when
574 * block allocation happens at the same time a pageout of buddy
575 * page occurs. incore() returns buf with both
576 * pages, this leads vnode-pageout to incorrectly flush of entire.
577 * buf. Till the low level ffs code is modified to deal with these
578 * do not mount any FS more than 4K size.
579 */
580 /*
581 * Can't mount filesystems with a fragment size less than DIRBLKSIZ
582 */
583 /*
584 * Don't mount dirty filesystems, except for the root filesystem
585 */
586 if ((fs->fs_bsize > PAGE_SIZE) || (fs->fs_fsize < DIRBLKSIZ) ||
587 ((!(mp->mnt_flag & MNT_ROOTFS)) && (!fs->fs_clean))) {
588 #if REV_ENDIAN_FS
589 if (rev_endian)
590 byte_swap_sbout(fs);
591 #endif /* REV_ENDIAN_FS */
592 error = ENOTSUP;
593 goto out;
594 }
595
596 /* Let's figure out the devblock size the file system is with */
597 /* the device block size = fragment size / number of sectors per frag */
598
599 dbsize = fs->fs_fsize / NSPF(fs);
600 if(dbsize <= 0 ) {
601 kprintf("device blocksize computaion failed\n");
602 } else {
603 if (VOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, &dbsize, FWRITE, NOCRED,
604 p) != 0) {
605 kprintf("failed to set device blocksize\n");
606 }
607 /* force the specfs to reread blocksize from size() */
608 set_fsblocksize(devvp);
609 }
610
611 /* cache the IO attributes */
612 error = vfs_init_io_attributes(devvp, mp);
613 if (error) {
614 printf("ffs_mountfs: vfs_init_io_attributes returned %d\n",
615 error);
616 goto out;
617 }
618
619 /* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
620 if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
621 #if REV_ENDIAN_FS
622 if (rev_endian)
623 byte_swap_sbout(fs);
624 #endif /* REV_ENDIAN_FS */
625 error = EROFS; /* needs translation */
626 goto out;
627 }
628
629 /* If we are not mounting read only, then check for overlap
630 * condition in cylinder group's free block map.
631 * If overlap exists, then force this into a read only mount
632 * to avoid further corruption. PR#2216969
633 */
634 if (ronly == 0){
635 if (error = bread (devvp, fsbtodb(fs, cgtod(fs, 0)),
636 (int)fs->fs_cgsize, NOCRED, &cgbp)) {
637 brelse(cgbp);
638 goto out;
639 }
640 cgp = (struct cg *)cgbp->b_data;
641 #if REV_ENDIAN_FS
642 if (rev_endian)
643 byte_swap_cgin(cgp,fs);
644 #endif /* REV_ENDIAN_FS */
645 if (!cg_chkmagic(cgp)){
646 #if REV_ENDIAN_FS
647 if (rev_endian)
648 byte_swap_cgout(cgp,fs);
649 #endif /* REV_ENDIAN_FS */
650 brelse(cgbp);
651 goto out;
652 }
653 if (cgp->cg_clustersumoff != 0) {
654 /* Check for overlap */
655 clustersumoff = cgp->cg_freeoff +
656 howmany(fs->fs_cpg * fs->fs_spc / NSPF(fs), NBBY);
657 clustersumoff = roundup(clustersumoff, sizeof(long));
658 if (cgp->cg_clustersumoff < clustersumoff) {
659 /* Overlap exists */
660 mp->mnt_flag |= MNT_RDONLY;
661 ronly = 1;
662 }
663 }
664 #if REV_ENDIAN_FS
665 if (rev_endian)
666 byte_swap_cgout(cgp,fs);
667 #endif /* REV_ENDIAN_FS */
668 brelse(cgbp);
669 }
670
671 ump = _MALLOC(sizeof *ump, M_UFSMNT, M_WAITOK);
672 bzero((caddr_t)ump, sizeof *ump);
673 ump->um_fs = _MALLOC((u_long)fs->fs_sbsize, M_UFSMNT,
674 M_WAITOK);
675 bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize);
676 if (fs->fs_sbsize < SBSIZE)
677 bp->b_flags |= B_INVAL;
678 #if REV_ENDIAN_FS
679 if (rev_endian)
680 byte_swap_sbout(fs);
681 #endif /* REV_ENDIAN_FS */
682 brelse(bp);
683 bp = NULL;
684 fs = ump->um_fs;
685 fs->fs_ronly = ronly;
686 size = fs->fs_cssize;
687 blks = howmany(size, fs->fs_fsize);
688 if (fs->fs_contigsumsize > 0)
689 size += fs->fs_ncg * sizeof(int32_t);
690 space = _MALLOC((u_long)size, M_UFSMNT, M_WAITOK);
691 fs->fs_csp = space;
692 for (i = 0; i < blks; i += fs->fs_frag) {
693 size = fs->fs_bsize;
694 if (i + fs->fs_frag > blks)
695 size = (blks - i) * fs->fs_fsize;
696 if (error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
697 cred, &bp)) {
698 _FREE(fs->fs_csp, M_UFSMNT);
699 goto out;
700 }
701 bcopy(bp->b_data, space, (u_int)size);
702 #if REV_ENDIAN_FS
703 if (rev_endian)
704 byte_swap_ints((int *) space, size / sizeof(int));
705 #endif /* REV_ENDIAN_FS */
706 space = (char *)space + size;
707 brelse(bp);
708 bp = NULL;
709 }
710 if (fs->fs_contigsumsize > 0) {
711 fs->fs_maxcluster = lp = space;
712 for (i = 0; i < fs->fs_ncg; i++)
713 *lp++ = fs->fs_contigsumsize;
714 }
715 mp->mnt_data = (qaddr_t)ump;
716 mp->mnt_stat.f_fsid.val[0] = (long)dev;
717 mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
718 #warning hardcoded max symlen and not "mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;"
719 mp->mnt_maxsymlinklen = 60;
720 #if REV_ENDIAN_FS
721 if (rev_endian)
722 mp->mnt_flag |= MNT_REVEND;
723 #endif /* REV_ENDIAN_FS */
724 ump->um_mountp = mp;
725 ump->um_dev = dev;
726 ump->um_devvp = devvp;
727 ump->um_nindir = fs->fs_nindir;
728 ump->um_bptrtodb = fs->fs_fsbtodb;
729 ump->um_seqinc = fs->fs_frag;
730 for (i = 0; i < MAXQUOTAS; i++)
731 ump->um_qfiles[i].qf_vp = NULLVP;
732 devvp->v_specflags |= SI_MOUNTEDON;
733 ffs_oldfscompat(fs);
734 ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */
735 maxfilesize = (u_int64_t)0x100000000; /* 4GB */
736 #if 0
737 maxfilesize = (u_int64_t)0x40000000 * fs->fs_bsize - 1; /* XXX */
738 #endif /* 0 */
739 if (fs->fs_maxfilesize > maxfilesize) /* XXX */
740 fs->fs_maxfilesize = maxfilesize; /* XXX */
741 if (ronly == 0) {
742 fs->fs_clean = 0;
743 (void) ffs_sbupdate(ump, MNT_WAIT);
744 }
745 return (0);
746 out:
747 if (bp)
748 brelse(bp);
749 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
750 if (ump) {
751 _FREE(ump->um_fs, M_UFSMNT);
752 _FREE(ump, M_UFSMNT);
753 mp->mnt_data = (qaddr_t)0;
754 }
755 return (error);
756 }
757
758 /*
759 * Sanity checks for old file systems.
760 *
761 * XXX - goes away some day.
762 */
763 ffs_oldfscompat(fs)
764 struct fs *fs;
765 {
766 int i;
767
768 fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect); /* XXX */
769 fs->fs_interleave = max(fs->fs_interleave, 1); /* XXX */
770 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
771 fs->fs_nrpos = 8; /* XXX */
772 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
773 u_int64_t sizepb = fs->fs_bsize; /* XXX */
774 /* XXX */
775 fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */
776 for (i = 0; i < NIADDR; i++) { /* XXX */
777 sizepb *= NINDIR(fs); /* XXX */
778 fs->fs_maxfilesize += sizepb; /* XXX */
779 } /* XXX */
780 fs->fs_qbmask = ~fs->fs_bmask; /* XXX */
781 fs->fs_qfmask = ~fs->fs_fmask; /* XXX */
782 } /* XXX */
783 return (0);
784 }
785
786 /*
787 * unmount system call
788 */
789 int
790 ffs_unmount(mp, mntflags, p)
791 struct mount *mp;
792 int mntflags;
793 struct proc *p;
794 {
795 register struct ufsmount *ump;
796 register struct fs *fs;
797 int error, flags;
798 int force;
799
800 flags = 0;
801 force = 0;
802 if (mntflags & MNT_FORCE) {
803 flags |= FORCECLOSE;
804 force = 1;
805 }
806 if ( (error = ffs_flushfiles(mp, flags, p)) && !force )
807 return (error);
808 ump = VFSTOUFS(mp);
809 fs = ump->um_fs;
810 if (fs->fs_ronly == 0) {
811 fs->fs_clean = 1;
812 if (error = ffs_sbupdate(ump, MNT_WAIT)) {
813 fs->fs_clean = 0;
814 #ifdef notyet
815 /* we can atleast cleanup ; as the media could be WP */
816 /* & during mount, we do not check for write failures */
817 /* FIXME LATER : the Correct fix would be to have */
818 /* mount detect the WP media and downgrade to readonly mount */
819 /* For now, here it is */
820 return (error);
821 #endif /* notyet */
822 }
823 }
824 ump->um_devvp->v_specflags &= ~SI_MOUNTEDON;
825 error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
826 NOCRED, p);
827 if (error && !force)
828 return (error);
829 vrele(ump->um_devvp);
830
831 _FREE(fs->fs_csp, M_UFSMNT);
832 _FREE(fs, M_UFSMNT);
833 _FREE(ump, M_UFSMNT);
834 mp->mnt_data = (qaddr_t)0;
835 #if REV_ENDIAN_FS
836 mp->mnt_flag &= ~MNT_REVEND;
837 #endif /* REV_ENDIAN_FS */
838 return (0);
839 }
840
841 /*
842 * Flush out all the files in a filesystem.
843 */
844 ffs_flushfiles(mp, flags, p)
845 register struct mount *mp;
846 int flags;
847 struct proc *p;
848 {
849 register struct ufsmount *ump;
850 int i, error;
851
852 ump = VFSTOUFS(mp);
853 #if QUOTA
854 if (mp->mnt_flag & MNT_QUOTA) {
855 if (error = vflush(mp, NULLVP, SKIPSYSTEM|flags))
856 return (error);
857 for (i = 0; i < MAXQUOTAS; i++) {
858 if (ump->um_qfiles[i].qf_vp == NULLVP)
859 continue;
860 quotaoff(p, mp, i);
861 }
862 /*
863 * Here we fall through to vflush again to ensure
864 * that we have gotten rid of all the system vnodes.
865 */
866 }
867 #endif
868 error = vflush(mp, NULLVP, SKIPSWAP|flags);
869 error = vflush(mp, NULLVP, flags);
870 return (error);
871 }
872
873 /*
874 * Get file system statistics.
875 */
876 int
877 ffs_statfs(mp, sbp, p)
878 struct mount *mp;
879 register struct statfs *sbp;
880 struct proc *p;
881 {
882 register struct ufsmount *ump;
883 register struct fs *fs;
884
885 ump = VFSTOUFS(mp);
886 fs = ump->um_fs;
887 if (fs->fs_magic != FS_MAGIC)
888 panic("ffs_statfs");
889 sbp->f_bsize = fs->fs_fsize;
890 sbp->f_iosize = fs->fs_bsize;
891 sbp->f_blocks = fs->fs_dsize;
892 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
893 fs->fs_cstotal.cs_nffree;
894 sbp->f_bavail = freespace(fs, fs->fs_minfree);
895 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
896 sbp->f_ffree = fs->fs_cstotal.cs_nifree;
897 if (sbp != &mp->mnt_stat) {
898 sbp->f_type = mp->mnt_vfc->vfc_typenum;
899 bcopy((caddr_t)mp->mnt_stat.f_mntonname,
900 (caddr_t)&sbp->f_mntonname[0], MNAMELEN);
901 bcopy((caddr_t)mp->mnt_stat.f_mntfromname,
902 (caddr_t)&sbp->f_mntfromname[0], MNAMELEN);
903 }
904 return (0);
905 }
906
907 /*
908 * Go through the disk queues to initiate sandbagged IO;
909 * go through the inodes to write those that have been modified;
910 * initiate the writing of the super block if it has been modified.
911 *
912 * Note: we are always called with the filesystem marked `MPBUSY'.
913 */
914 int
915 ffs_sync(mp, waitfor, cred, p)
916 struct mount *mp;
917 int waitfor;
918 struct ucred *cred;
919 struct proc *p;
920 {
921 struct vnode *nvp, *vp;
922 struct inode *ip;
923 struct ufsmount *ump = VFSTOUFS(mp);
924 struct fs *fs;
925 int error, allerror = 0;
926
927 fs = ump->um_fs;
928 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
929 printf("fs = %s\n", fs->fs_fsmnt);
930 panic("update: rofs mod");
931 }
932 /*
933 * Write back each (modified) inode.
934 */
935 simple_lock(&mntvnode_slock);
936 loop:
937 for (vp = mp->mnt_vnodelist.lh_first;
938 vp != NULL;
939 vp = nvp) {
940 int didhold = 0;
941
942 /*
943 * If the vnode that we are about to sync is no longer
944 * associated with this mount point, start over.
945 */
946 if (vp->v_mount != mp)
947 goto loop;
948 simple_lock(&vp->v_interlock);
949 nvp = vp->v_mntvnodes.le_next;
950 ip = VTOI(vp);
951 if ((vp->v_type == VNON) ||
952 ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
953 vp->v_dirtyblkhd.lh_first == NULL && !(vp->v_flag & VHASDIRTY))) {
954 simple_unlock(&vp->v_interlock);
955 continue;
956 }
957 simple_unlock(&mntvnode_slock);
958 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p);
959 if (error) {
960 simple_lock(&mntvnode_slock);
961 if (error == ENOENT)
962 goto loop;
963 continue;
964 }
965 didhold = ubc_hold(vp);
966 if (error = VOP_FSYNC(vp, cred, waitfor, p))
967 allerror = error;
968 VOP_UNLOCK(vp, 0, p);
969 if (didhold)
970 ubc_rele(vp);
971 vrele(vp);
972 simple_lock(&mntvnode_slock);
973 }
974 simple_unlock(&mntvnode_slock);
975 /*
976 * Force stale file system control information to be flushed.
977 */
978 if (error = VOP_FSYNC(ump->um_devvp, cred, waitfor, p))
979 allerror = error;
980 #if QUOTA
981 qsync(mp);
982 #endif
983 /*
984 * Write back modified superblock.
985 */
986 if (fs->fs_fmod != 0) {
987 fs->fs_fmod = 0;
988 fs->fs_time = time.tv_sec;
989 if (error = ffs_sbupdate(ump, waitfor))
990 allerror = error;
991 }
992 return (allerror);
993 }
994
995 /*
996 * Look up a FFS dinode number to find its incore vnode, otherwise read it
997 * in from disk. If it is in core, wait for the lock bit to clear, then
998 * return the inode locked. Detection and handling of mount points must be
999 * done by the calling routine.
1000 */
1001 int
1002 ffs_vget(mp, ino, vpp)
1003 struct mount *mp;
1004 ino_t ino;
1005 struct vnode **vpp;
1006 {
1007 struct proc *p = current_proc(); /* XXX */
1008 struct fs *fs;
1009 struct inode *ip;
1010 struct ufsmount *ump;
1011 struct buf *bp;
1012 struct vnode *vp;
1013 dev_t dev;
1014 int i, type, error;
1015
1016 ump = VFSTOUFS(mp);
1017 dev = ump->um_dev;
1018
1019 /* Check for unmount in progress */
1020 if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
1021 *vpp = NULL;
1022 return (EPERM);
1023 }
1024
1025 if ((*vpp = ufs_ihashget(dev, ino)) != NULL) {
1026 vp = *vpp;
1027 UBCINFOCHECK("ffs_vget", vp);
1028 return (0);
1029 }
1030 /* Allocate a new vnode/inode. */
1031 type = ump->um_devvp->v_tag == VT_MFS ? M_MFSNODE : M_FFSNODE; /* XXX */
1032 MALLOC_ZONE(ip, struct inode *, sizeof(struct inode), type, M_WAITOK);
1033 bzero((caddr_t)ip, sizeof(struct inode));
1034 lockinit(&ip->i_lock, PINOD, "inode", 0, 0);
1035 /* lock the inode */
1036 lockmgr(&ip->i_lock, LK_EXCLUSIVE, (struct slock *)0, p);
1037
1038 ip->i_fs = fs = ump->um_fs;
1039 ip->i_dev = dev;
1040 ip->i_number = ino;
1041 ip->i_flag |= IN_ALLOC;
1042 #if QUOTA
1043 for (i = 0; i < MAXQUOTAS; i++)
1044 ip->i_dquot[i] = NODQUOT;
1045 #endif
1046
1047 /*
1048 * MALLOC_ZONE is blocking call. Check for race.
1049 */
1050 if ((*vpp = ufs_ihashget(dev, ino)) != NULL) {
1051 /* Clean up */
1052 FREE_ZONE(ip, sizeof(struct inode), type);
1053 vp = *vpp;
1054 UBCINFOCHECK("ffs_vget", vp);
1055 return (0);
1056 }
1057
1058 /*
1059 * Put it onto its hash chain locked so that other requests for
1060 * this inode will block if they arrive while we are sleeping waiting
1061 * for old data structures to be purged or for the contents of the
1062 * disk portion of this inode to be read.
1063 */
1064 ufs_ihashins(ip);
1065
1066 if (error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) {
1067 ufs_ihashrem(ip);
1068 if (ISSET(ip->i_flag, IN_WALLOC))
1069 wakeup(ip);
1070 FREE_ZONE(ip, sizeof(struct inode), type);
1071 *vpp = NULL;
1072 return (error);
1073 }
1074 vp->v_data = ip;
1075 ip->i_vnode = vp;
1076
1077 /*
1078 * A vnode is associated with the inode now,
1079 * vget() can deal with the serialization.
1080 */
1081 CLR(ip->i_flag, IN_ALLOC);
1082 if (ISSET(ip->i_flag, IN_WALLOC))
1083 wakeup(ip);
1084
1085 /* Read in the disk contents for the inode, copy into the inode. */
1086 if (error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1087 (int)fs->fs_bsize, NOCRED, &bp)) {
1088 /*
1089 * The inode does not contain anything useful, so it would
1090 * be misleading to leave it on its hash chain. With mode
1091 * still zero, it will be unlinked and returned to the free
1092 * list by vput().
1093 */
1094 vput(vp);
1095 brelse(bp);
1096 *vpp = NULL;
1097 return (error);
1098 }
1099 #if REV_ENDIAN_FS
1100 if (mp->mnt_flag & MNT_REVEND) {
1101 byte_swap_inode_in(((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino)),ip);
1102 } else {
1103 #endif /* REV_ENDIAN_FS */
1104 ip->i_din = *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino));
1105 #if REV_ENDIAN_FS
1106 }
1107 #endif /* REV_ENDIAN_FS */
1108 brelse(bp);
1109
1110 /*
1111 * Initialize the vnode from the inode, check for aliases.
1112 * Note that the underlying vnode may have changed.
1113 */
1114 if (error = ufs_vinit(mp, ffs_specop_p, FFS_FIFOOPS, &vp)) {
1115 vput(vp);
1116 *vpp = NULL;
1117 return (error);
1118 }
1119 /*
1120 * Finish inode initialization now that aliasing has been resolved.
1121 */
1122 ip->i_devvp = ump->um_devvp;
1123 VREF(ip->i_devvp);
1124 /*
1125 * Set up a generation number for this inode if it does not
1126 * already have one. This should only happen on old filesystems.
1127 */
1128 if (ip->i_gen == 0) {
1129 if (++nextgennumber < (u_long)time.tv_sec)
1130 nextgennumber = time.tv_sec;
1131 ip->i_gen = nextgennumber;
1132 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
1133 ip->i_flag |= IN_MODIFIED;
1134 }
1135 /*
1136 * Ensure that uid and gid are correct. This is a temporary
1137 * fix until fsck has been changed to do the update.
1138 */
1139 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1140 ip->i_uid = ip->i_din.di_ouid; /* XXX */
1141 ip->i_gid = ip->i_din.di_ogid; /* XXX */
1142 } /* XXX */
1143
1144 *vpp = vp;
1145 if (UBCINFOMISSING(vp) || UBCINFORECLAIMED(vp))
1146 ubc_info_init(vp);
1147 return (0);
1148 }
1149
1150 /*
1151 * File handle to vnode
1152 *
1153 * Have to be really careful about stale file handles:
1154 * - check that the inode number is valid
1155 * - call ffs_vget() to get the locked inode
1156 * - check for an unallocated inode (i_mode == 0)
1157 * - check that the given client host has export rights and return
1158 * those rights via. exflagsp and credanonp
1159 */
1160 int
1161 ffs_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp)
1162 register struct mount *mp;
1163 struct fid *fhp;
1164 struct mbuf *nam;
1165 struct vnode **vpp;
1166 int *exflagsp;
1167 struct ucred **credanonp;
1168 {
1169 register struct ufid *ufhp;
1170 struct fs *fs;
1171
1172 ufhp = (struct ufid *)fhp;
1173 fs = VFSTOUFS(mp)->um_fs;
1174 if (ufhp->ufid_ino < ROOTINO ||
1175 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1176 return (ESTALE);
1177 return (ufs_check_export(mp, ufhp, nam, vpp, exflagsp, credanonp));
1178 }
1179
1180 /*
1181 * Vnode pointer to File handle
1182 */
1183 /* ARGSUSED */
1184 ffs_vptofh(vp, fhp)
1185 struct vnode *vp;
1186 struct fid *fhp;
1187 {
1188 register struct inode *ip;
1189 register struct ufid *ufhp;
1190
1191 ip = VTOI(vp);
1192 ufhp = (struct ufid *)fhp;
1193 ufhp->ufid_len = sizeof(struct ufid);
1194 ufhp->ufid_ino = ip->i_number;
1195 ufhp->ufid_gen = ip->i_gen;
1196 return (0);
1197 }
1198
1199 /*
1200 * Initialize the filesystem; just use ufs_init.
1201 */
1202 int
1203 ffs_init(vfsp)
1204 struct vfsconf *vfsp;
1205 {
1206
1207 return (ufs_init(vfsp));
1208 }
1209
1210 /*
1211 * fast filesystem related variables.
1212 */
1213 ffs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1214 int *name;
1215 u_int namelen;
1216 void *oldp;
1217 size_t *oldlenp;
1218 void *newp;
1219 size_t newlen;
1220 struct proc *p;
1221 {
1222 extern int doclusterread, doclusterwrite, doreallocblks, doasyncfree;
1223
1224 /* all sysctl names at this level are terminal */
1225 if (namelen != 1)
1226 return (ENOTDIR); /* overloaded */
1227
1228 switch (name[0]) {
1229 case FFS_CLUSTERREAD:
1230 return (sysctl_int(oldp, oldlenp, newp, newlen,
1231 &doclusterread));
1232 case FFS_CLUSTERWRITE:
1233 return (sysctl_int(oldp, oldlenp, newp, newlen,
1234 &doclusterwrite));
1235 case FFS_REALLOCBLKS:
1236 return (sysctl_int(oldp, oldlenp, newp, newlen,
1237 &doreallocblks));
1238 case FFS_ASYNCFREE:
1239 return (sysctl_int(oldp, oldlenp, newp, newlen, &doasyncfree));
1240 default:
1241 return (EOPNOTSUPP);
1242 }
1243 /* NOTREACHED */
1244 }
1245
1246 /*
1247 * Write a superblock and associated information back to disk.
1248 */
1249 int
1250 ffs_sbupdate(mp, waitfor)
1251 struct ufsmount *mp;
1252 int waitfor;
1253 {
1254 register struct fs *dfs, *fs = mp->um_fs;
1255 register struct buf *bp;
1256 int blks;
1257 void *space;
1258 int i, size, error, allerror = 0;
1259 int devBlockSize=0;
1260 #if REV_ENDIAN_FS
1261 int rev_endian=(mp->um_mountp->mnt_flag & MNT_REVEND);
1262 #endif /* REV_ENDIAN_FS */
1263
1264 /*
1265 * First write back the summary information.
1266 */
1267 blks = howmany(fs->fs_cssize, fs->fs_fsize);
1268 space = fs->fs_csp;
1269 for (i = 0; i < blks; i += fs->fs_frag) {
1270 size = fs->fs_bsize;
1271 if (i + fs->fs_frag > blks)
1272 size = (blks - i) * fs->fs_fsize;
1273 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1274 size, 0, 0, BLK_META);
1275 bcopy(space, bp->b_data, (u_int)size);
1276 #if REV_ENDIAN_FS
1277 if (rev_endian) {
1278 byte_swap_ints((int *)bp->b_data, size / sizeof(int));
1279 }
1280 #endif /* REV_ENDIAN_FS */
1281 space = (char *)space + size;
1282 if (waitfor != MNT_WAIT)
1283 bawrite(bp);
1284 else if (error = bwrite(bp))
1285 allerror = error;
1286 }
1287 /*
1288 * Now write back the superblock itself. If any errors occurred
1289 * up to this point, then fail so that the superblock avoids
1290 * being written out as clean.
1291 */
1292 if (allerror)
1293 return (allerror);
1294 VOP_DEVBLOCKSIZE(mp->um_devvp,&devBlockSize);
1295 bp = getblk(mp->um_devvp, (SBOFF/devBlockSize), (int)fs->fs_sbsize, 0, 0, BLK_META);
1296 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
1297 /* Restore compatibility to old file systems. XXX */
1298 dfs = (struct fs *)bp->b_data; /* XXX */
1299 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
1300 dfs->fs_nrpos = -1; /* XXX */
1301 #if REV_ENDIAN_FS
1302 /*
1303 * Swapping bytes here ; so that in case
1304 * of inode format < FS_44INODEFMT appropriate
1305 * fields get moved
1306 */
1307 if (rev_endian) {
1308 byte_swap_sbout((struct fs *)bp->b_data);
1309 }
1310 #endif /* REV_ENDIAN_FS */
1311 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1312 int32_t *lp, tmp; /* XXX */
1313 /* XXX */
1314 lp = (int32_t *)&dfs->fs_qbmask; /* XXX */
1315 tmp = lp[4]; /* XXX */
1316 for (i = 4; i > 0; i--) /* XXX */
1317 lp[i] = lp[i-1]; /* XXX */
1318 lp[0] = tmp; /* XXX */
1319 } /* XXX */
1320 #if REV_ENDIAN_FS
1321 /* Note that dfs is already swapped so swap the filesize
1322 * before writing
1323 */
1324 if (rev_endian) {
1325 dfs->fs_maxfilesize = NXSwapLongLong(mp->um_savedmaxfilesize); /* XXX */
1326 } else {
1327 #endif /* REV_ENDIAN_FS */
1328 dfs->fs_maxfilesize = mp->um_savedmaxfilesize; /* XXX */
1329 #if REV_ENDIAN_FS
1330 }
1331 #endif /* REV_ENDIAN_FS */
1332 if (waitfor != MNT_WAIT)
1333 bawrite(bp);
1334 else if (error = bwrite(bp))
1335 allerror = error;
1336
1337 return (allerror);
1338 }