]> git.saurik.com Git - apple/xnu.git/blob - bsd/ufs/ffs/ffs_vfsops.c
7da3b2a3e856a6a4d36a4cbaf1a5244d3c4146c8
[apple/xnu.git] / bsd / ufs / ffs / ffs_vfsops.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23 /*
24 * Copyright (c) 1989, 1991, 1993, 1994
25 * The Regents of the University of California. All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
56 */
57
58 #include <rev_endian_fs.h>
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/namei.h>
62 #include <sys/proc.h>
63 #include <sys/kernel.h>
64 #include <sys/vnode.h>
65 #include <sys/socket.h>
66 #include <sys/mount.h>
67 #include <sys/buf.h>
68 #include <sys/mbuf.h>
69 #include <sys/file.h>
70 #include <dev/disk.h>
71 #include <sys/ioctl.h>
72 #include <sys/errno.h>
73 #include <sys/malloc.h>
74 #include <sys/ubc.h>
75
76 #include <miscfs/specfs/specdev.h>
77
78 #include <ufs/ufs/quota.h>
79 #include <ufs/ufs/ufsmount.h>
80 #include <ufs/ufs/inode.h>
81 #include <ufs/ufs/ufs_extern.h>
82
83 #include <ufs/ffs/fs.h>
84 #include <ufs/ffs/ffs_extern.h>
85 #if REV_ENDIAN_FS
86 #include <ufs/ufs/ufs_byte_order.h>
87 #include <architecture/byte_order.h>
88 #endif /* REV_ENDIAN_FS */
89
90 int ffs_sbupdate __P((struct ufsmount *, int));
91
92 struct vfsops ufs_vfsops = {
93 ffs_mount,
94 ufs_start,
95 ffs_unmount,
96 ufs_root,
97 ufs_quotactl,
98 ffs_statfs,
99 ffs_sync,
100 ffs_vget,
101 ffs_fhtovp,
102 ffs_vptofh,
103 ffs_init,
104 ffs_sysctl,
105 };
106
107 extern u_long nextgennumber;
108
109 /*
110 * Called by main() when ufs is going to be mounted as root.
111 */
112 ffs_mountroot()
113 {
114 extern struct vnode *rootvp;
115 struct fs *fs;
116 struct mount *mp;
117 struct proc *p = current_proc(); /* XXX */
118 struct ufsmount *ump;
119 u_int size;
120 int error;
121
122 /*
123 * Get vnode for rootdev.
124 */
125 if (error = bdevvp(rootdev, &rootvp)) {
126 printf("ffs_mountroot: can't setup bdevvp");
127 return (error);
128 }
129 if (error = vfs_rootmountalloc("ufs", "root_device", &mp))
130 return (error);
131
132 /* Must set the MNT_ROOTFS flag before doing the actual mount */
133 mp->mnt_flag |= MNT_ROOTFS;
134
135 if (error = ffs_mountfs(rootvp, mp, p)) {
136 mp->mnt_vfc->vfc_refcount--;
137 vfs_unbusy(mp, p);
138 _FREE_ZONE(mp, sizeof (struct mount), M_MOUNT);
139 return (error);
140 }
141 simple_lock(&mountlist_slock);
142 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
143 simple_unlock(&mountlist_slock);
144 ump = VFSTOUFS(mp);
145 fs = ump->um_fs;
146 (void) copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
147 (void)ffs_statfs(mp, &mp->mnt_stat, p);
148 vfs_unbusy(mp, p);
149 inittodr(fs->fs_time);
150 return (0);
151 }
152
153 /*
154 * VFS Operations.
155 *
156 * mount system call
157 */
158 int
159 ffs_mount(mp, path, data, ndp, p)
160 register struct mount *mp;
161 char *path;
162 caddr_t data;
163 struct nameidata *ndp;
164 struct proc *p;
165 {
166 struct vnode *devvp;
167 struct ufs_args args;
168 struct ufsmount *ump;
169 register struct fs *fs;
170 u_int size;
171 int error, flags;
172 mode_t accessmode;
173 int ronly;
174 int reload = 0;
175
176 if (error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args)))
177 return (error);
178 /*
179 * If updating, check whether changing from read-only to
180 * read/write; if there is no device name, that's all we do.
181 */
182 if (mp->mnt_flag & MNT_UPDATE) {
183 ump = VFSTOUFS(mp);
184 fs = ump->um_fs;
185 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
186 flags = WRITECLOSE;
187 if (mp->mnt_flag & MNT_FORCE)
188 flags |= FORCECLOSE;
189 if (error = ffs_flushfiles(mp, flags, p))
190 return (error);
191 fs->fs_clean = 1;
192 fs->fs_ronly = 1;
193 if (error = ffs_sbupdate(ump, MNT_WAIT)) {
194 fs->fs_clean = 0;
195 fs->fs_ronly = 0;
196 return (error);
197 }
198 }
199 /* save fs_ronly to later use */
200 ronly = fs->fs_ronly;
201 if ((mp->mnt_flag & MNT_RELOAD) || ronly)
202 reload = 1;
203 if ((reload) &&
204 (error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p)))
205 return (error);
206 /* replace the ronly after load */
207 fs->fs_ronly = ronly;
208 /*
209 * Do not update the file system if the user was in singleuser
210 * and then tries to mount -uw without fscking
211 */
212 if (!fs->fs_clean && ronly) {
213 printf("WARNING: trying to mount a dirty file system\n");
214 if (issingleuser() && (mp->mnt_flag & MNT_ROOTFS)) {
215 printf("WARNING: R/W mount of %s denied. Filesystem is not clean - run fsck\n",fs->fs_fsmnt);
216 /*
217 * Reset the readonly bit as reload might have
218 * modified this bit
219 */
220 fs->fs_ronly = 1;
221 return(EPERM);
222 }
223 }
224
225 if (ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
226 /*
227 * If upgrade to read-write by non-root, then verify
228 * that user has necessary permissions on the device.
229 */
230 if (p->p_ucred->cr_uid != 0) {
231 devvp = ump->um_devvp;
232 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
233 if (error = VOP_ACCESS(devvp, VREAD | VWRITE,
234 p->p_ucred, p)) {
235 VOP_UNLOCK(devvp, 0, p);
236 return (error);
237 }
238 VOP_UNLOCK(devvp, 0, p);
239 }
240 fs->fs_ronly = 0;
241 fs->fs_clean = 0;
242 (void) ffs_sbupdate(ump, MNT_WAIT);
243 }
244 if (args.fspec == 0) {
245 /*
246 * Process export requests.
247 */
248 return (vfs_export(mp, &ump->um_export, &args.export));
249 }
250 }
251 /*
252 * Not an update, or updating the name: look up the name
253 * and verify that it refers to a sensible block device.
254 */
255 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
256 if (error = namei(ndp))
257 return (error);
258 devvp = ndp->ni_vp;
259
260 if (devvp->v_type != VBLK) {
261 vrele(devvp);
262 return (ENOTBLK);
263 }
264 if (major(devvp->v_rdev) >= nblkdev) {
265 vrele(devvp);
266 return (ENXIO);
267 }
268 /*
269 * If mount by non-root, then verify that user has necessary
270 * permissions on the device.
271 */
272 if (p->p_ucred->cr_uid != 0) {
273 accessmode = VREAD;
274 if ((mp->mnt_flag & MNT_RDONLY) == 0)
275 accessmode |= VWRITE;
276 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
277 if (error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p)) {
278 vput(devvp);
279 return (error);
280 }
281 VOP_UNLOCK(devvp, 0, p);
282 }
283 if ((mp->mnt_flag & MNT_UPDATE) == 0)
284 error = ffs_mountfs(devvp, mp, p);
285 else {
286 if (devvp != ump->um_devvp)
287 error = EINVAL; /* needs translation */
288 else
289 vrele(devvp);
290 }
291 if (error) {
292 vrele(devvp);
293 return (error);
294 }
295 ump = VFSTOUFS(mp);
296 fs = ump->um_fs;
297 (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
298 bzero(fs->fs_fsmnt + size, sizeof(fs->fs_fsmnt) - size);
299 bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname,
300 MNAMELEN);
301 (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
302 &size);
303 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
304 (void)ffs_statfs(mp, &mp->mnt_stat, p);
305 return (0);
306 }
307
308 /*
309 * Reload all incore data for a filesystem (used after running fsck on
310 * the root filesystem and finding things to fix). The filesystem must
311 * be mounted read-only.
312 *
313 * Things to do to update the mount:
314 * 1) invalidate all cached meta-data.
315 * 2) re-read superblock from disk.
316 * 3) re-read summary information from disk.
317 * 4) invalidate all inactive vnodes.
318 * 5) invalidate all cached file data.
319 * 6) re-read inode data for all active vnodes.
320 */
321 ffs_reload(mountp, cred, p)
322 register struct mount *mountp;
323 struct ucred *cred;
324 struct proc *p;
325 {
326 register struct vnode *vp, *nvp, *devvp;
327 struct inode *ip;
328 struct csum *space;
329 struct buf *bp;
330 struct fs *fs, *newfs;
331 int i, blks, size, error;
332 u_int64_t maxfilesize; /* XXX */
333 int32_t *lp;
334 #if REV_ENDIAN_FS
335 int rev_endian = (mountp->mnt_flag & MNT_REVEND);
336 #endif /* REV_ENDIAN_FS */
337
338 if ((mountp->mnt_flag & MNT_RDONLY) == 0)
339 return (EINVAL);
340 /*
341 * Step 1: invalidate all cached meta-data.
342 */
343 devvp = VFSTOUFS(mountp)->um_devvp;
344 if (vinvalbuf(devvp, 0, cred, p, 0, 0))
345 panic("ffs_reload: dirty1");
346 /*
347 * Step 2: re-read superblock from disk.
348 */
349 VOP_DEVBLOCKSIZE(devvp,&size);
350
351 if (error = bread(devvp, (ufs_daddr_t)(SBOFF/size), SBSIZE, NOCRED,&bp)) {
352 brelse(bp);
353 return (error);
354 }
355 newfs = (struct fs *)bp->b_data;
356 #if REV_ENDIAN_FS
357 if (rev_endian) {
358 byte_swap_sbin(newfs);
359 }
360 #endif /* REV_ENDIAN_FS */
361 if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
362 newfs->fs_bsize < sizeof(struct fs)) {
363 #if REV_ENDIAN_FS
364 if (rev_endian)
365 byte_swap_sbout(newfs);
366 #endif /* REV_ENDIAN_FS */
367
368 brelse(bp);
369 return (EIO); /* XXX needs translation */
370 }
371 fs = VFSTOUFS(mountp)->um_fs;
372 /*
373 * Copy pointer fields back into superblock before copying in XXX
374 * new superblock. These should really be in the ufsmount. XXX
375 * Note that important parameters (eg fs_ncg) are unchanged.
376 */
377 bcopy(&fs->fs_csp[0], &newfs->fs_csp[0], sizeof(fs->fs_csp));
378 newfs->fs_maxcluster = fs->fs_maxcluster;
379 bcopy(newfs, fs, (u_int)fs->fs_sbsize);
380 if (fs->fs_sbsize < SBSIZE)
381 bp->b_flags |= B_INVAL;
382 #if REV_ENDIAN_FS
383 if (rev_endian)
384 byte_swap_sbout(newfs);
385 #endif /* REV_ENDIAN_FS */
386 brelse(bp);
387 mountp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
388 ffs_oldfscompat(fs);
389 maxfilesize = (u_int64_t)0x100000000; /* 4GB */
390 if (fs->fs_maxfilesize > maxfilesize) /* XXX */
391 fs->fs_maxfilesize = maxfilesize; /* XXX */
392 /*
393 * Step 3: re-read summary information from disk.
394 */
395 blks = howmany(fs->fs_cssize, fs->fs_fsize);
396 space = fs->fs_csp[0];
397 for (i = 0; i < blks; i += fs->fs_frag) {
398 size = fs->fs_bsize;
399 if (i + fs->fs_frag > blks)
400 size = (blks - i) * fs->fs_fsize;
401 if (error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
402 NOCRED, &bp)) {
403 brelse(bp);
404 return (error);
405 }
406 #if REV_ENDIAN_FS
407 if (rev_endian) {
408 /* csum swaps */
409 byte_swap_ints((int *)bp->b_data, size / sizeof(int));
410 }
411 #endif /* REV_ENDIAN_FS */
412 bcopy(bp->b_data, fs->fs_csp[fragstoblks(fs, i)], (u_int)size);
413 #if REV_ENDIAN_FS
414 if (rev_endian) {
415 /* csum swaps */
416 byte_swap_ints((int *)bp->b_data, size / sizeof(int));
417 }
418 #endif /* REV_ENDIAN_FS */
419 brelse(bp);
420 }
421 /*
422 * We no longer know anything about clusters per cylinder group.
423 */
424 if (fs->fs_contigsumsize > 0) {
425 lp = fs->fs_maxcluster;
426 for (i = 0; i < fs->fs_ncg; i++)
427 *lp++ = fs->fs_contigsumsize;
428 }
429
430 loop:
431 simple_lock(&mntvnode_slock);
432 for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
433 if (vp->v_mount != mountp) {
434 simple_unlock(&mntvnode_slock);
435 goto loop;
436 }
437 nvp = vp->v_mntvnodes.le_next;
438 /*
439 * Step 4: invalidate all inactive vnodes.
440 */
441 if (vrecycle(vp, &mntvnode_slock, p))
442 goto loop;
443 /*
444 * Step 5: invalidate all cached file data.
445 */
446 simple_lock(&vp->v_interlock);
447 simple_unlock(&mntvnode_slock);
448 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
449 goto loop;
450 }
451 if (vinvalbuf(vp, 0, cred, p, 0, 0))
452 panic("ffs_reload: dirty2");
453 /*
454 * Step 6: re-read inode data for all active vnodes.
455 */
456 ip = VTOI(vp);
457 if (error =
458 bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
459 (int)fs->fs_bsize, NOCRED, &bp)) {
460 brelse(bp);
461 vput(vp);
462 return (error);
463 }
464 #if REV_ENDIAN_FS
465 if (rev_endian) {
466 byte_swap_inode_in(((struct dinode *)bp->b_data +
467 ino_to_fsbo(fs, ip->i_number)), ip);
468 } else {
469 #endif /* REV_ENDIAN_FS */
470 ip->i_din = *((struct dinode *)bp->b_data +
471 ino_to_fsbo(fs, ip->i_number));
472 #if REV_ENDIAN_FS
473 }
474 #endif /* REV_ENDIAN_FS */
475 brelse(bp);
476 vput(vp);
477 simple_lock(&mntvnode_slock);
478 }
479 simple_unlock(&mntvnode_slock);
480 return (0);
481 }
482
483 /*
484 * Common code for mount and mountroot
485 */
486 int
487 ffs_mountfs(devvp, mp, p)
488 register struct vnode *devvp;
489 struct mount *mp;
490 struct proc *p;
491 {
492 register struct ufsmount *ump;
493 struct buf *bp;
494 register struct fs *fs;
495 dev_t dev;
496 struct buf *cgbp;
497 struct cg *cgp;
498 int32_t clustersumoff;
499 caddr_t base, space;
500 int error, i, blks, size, ronly;
501 int32_t *lp;
502 struct ucred *cred;
503 extern struct vnode *rootvp;
504 u_int64_t maxfilesize; /* XXX */
505 u_int dbsize = DEV_BSIZE;
506 #if REV_ENDIAN_FS
507 int rev_endian=0;
508 #endif /* REV_ENDIAN_FS */
509 dev = devvp->v_rdev;
510 cred = p ? p->p_ucred : NOCRED;
511 /*
512 * Disallow multiple mounts of the same device.
513 * Disallow mounting of a device that is currently in use
514 * (except for root, which might share swap device for miniroot).
515 * Flush out any old buffers remaining from a previous use.
516 */
517 if (error = vfs_mountedon(devvp))
518 return (error);
519 if (vcount(devvp) > 1 && devvp != rootvp)
520 return (EBUSY);
521 if (error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0))
522 return (error);
523
524 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
525 if (error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p))
526 return (error);
527
528 VOP_DEVBLOCKSIZE(devvp,&size);
529
530 bp = NULL;
531 ump = NULL;
532 if (error = bread(devvp, (ufs_daddr_t)(SBOFF/size), SBSIZE, cred, &bp))
533 goto out;
534 fs = (struct fs *)bp->b_data;
535 #if REV_ENDIAN_FS
536 if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE ||
537 fs->fs_bsize < sizeof(struct fs)) {
538 int magic = fs->fs_magic;
539
540 byte_swap_ints(&magic, 1);
541 if (magic != FS_MAGIC) {
542 error = EINVAL;
543 goto out;
544 }
545 byte_swap_sbin(fs);
546 if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE ||
547 fs->fs_bsize < sizeof(struct fs)) {
548 byte_swap_sbout(fs);
549 error = EINVAL; /* XXX needs translation */
550 goto out;
551 }
552 rev_endian=1;
553 }
554 #endif /* REV_ENDIAN_FS */
555 if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE ||
556 fs->fs_bsize < sizeof(struct fs)) {
557 #if REV_ENDIAN_FS
558 if (rev_endian)
559 byte_swap_sbout(fs);
560 #endif /* REV_ENDIAN_FS */
561 error = EINVAL; /* XXX needs translation */
562 goto out;
563 }
564
565
566 /*
567 * Buffer cache does not handle multiple pages in a buf when
568 * invalidating incore buffer in pageout. There are no locks
569 * in the pageout path. So there is a danger of loosing data when
570 * block allocation happens at the same time a pageout of buddy
571 * page occurs. incore() returns buf with both
572 * pages, this leads vnode-pageout to incorrectly flush of entire.
573 * buf. Till the low level ffs code is modified to deal with these
574 * do not mount any FS more than 4K size.
575 */
576 /*
577 * Can't mount filesystems with a fragment size less than DIRBLKSIZ
578 */
579 /*
580 * Don't mount dirty filesystems, except for the root filesystem
581 */
582 if ((fs->fs_bsize > PAGE_SIZE) || (fs->fs_fsize < DIRBLKSIZ) ||
583 ((!(mp->mnt_flag & MNT_ROOTFS)) && (!fs->fs_clean))) {
584 #if REV_ENDIAN_FS
585 if (rev_endian)
586 byte_swap_sbout(fs);
587 #endif /* REV_ENDIAN_FS */
588 error = ENOTSUP;
589 goto out;
590 }
591
592 /* Let's figure out the devblock size the file system is with */
593 /* the device block size = fragment size / number of sectors per frag */
594
595 dbsize = fs->fs_fsize / NSPF(fs);
596 if(dbsize <= 0 ) {
597 kprintf("device blocksize computaion failed\n");
598 } else {
599 if (VOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, &dbsize, FWRITE, NOCRED,
600 p) != 0) {
601 kprintf("failed to set device blocksize\n");
602 }
603 /* force the specfs to reread blocksize from size() */
604 set_fsblocksize(devvp);
605 }
606
607 /* cache the IO attributes */
608 error = vfs_init_io_attributes(devvp, mp);
609 if (error) {
610 printf("ffs_mountfs: vfs_init_io_attributes returned %d\n",
611 error);
612 goto out;
613 }
614
615 /* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
616 if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
617 #if REV_ENDIAN_FS
618 if (rev_endian)
619 byte_swap_sbout(fs);
620 #endif /* REV_ENDIAN_FS */
621 error = EROFS; /* needs translation */
622 goto out;
623 }
624
625 /* If we are not mounting read only, then check for overlap
626 * condition in cylinder group's free block map.
627 * If overlap exists, then force this into a read only mount
628 * to avoid further corruption. PR#2216969
629 */
630 if (ronly == 0){
631 if (error = bread (devvp, fsbtodb(fs, cgtod(fs, 0)),
632 (int)fs->fs_cgsize, NOCRED, &cgbp)) {
633 brelse(cgbp);
634 goto out;
635 }
636 cgp = (struct cg *)cgbp->b_data;
637 #if REV_ENDIAN_FS
638 if (rev_endian)
639 byte_swap_cgin(cgp,fs);
640 #endif /* REV_ENDIAN_FS */
641 if (!cg_chkmagic(cgp)){
642 #if REV_ENDIAN_FS
643 if (rev_endian)
644 byte_swap_cgout(cgp,fs);
645 #endif /* REV_ENDIAN_FS */
646 brelse(cgbp);
647 goto out;
648 }
649 if (cgp->cg_clustersumoff != 0) {
650 /* Check for overlap */
651 clustersumoff = cgp->cg_freeoff +
652 howmany(fs->fs_cpg * fs->fs_spc / NSPF(fs), NBBY);
653 clustersumoff = roundup(clustersumoff, sizeof(long));
654 if (cgp->cg_clustersumoff < clustersumoff) {
655 /* Overlap exists */
656 mp->mnt_flag |= MNT_RDONLY;
657 ronly = 1;
658 }
659 }
660 #if REV_ENDIAN_FS
661 if (rev_endian)
662 byte_swap_cgout(cgp,fs);
663 #endif /* REV_ENDIAN_FS */
664 brelse(cgbp);
665 }
666
667 ump = _MALLOC(sizeof *ump, M_UFSMNT, M_WAITOK);
668 bzero((caddr_t)ump, sizeof *ump);
669 ump->um_fs = _MALLOC((u_long)fs->fs_sbsize, M_UFSMNT,
670 M_WAITOK);
671 bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize);
672 if (fs->fs_sbsize < SBSIZE)
673 bp->b_flags |= B_INVAL;
674 #if REV_ENDIAN_FS
675 if (rev_endian)
676 byte_swap_sbout(fs);
677 #endif /* REV_ENDIAN_FS */
678 brelse(bp);
679 bp = NULL;
680 fs = ump->um_fs;
681 fs->fs_ronly = ronly;
682 size = fs->fs_cssize;
683 blks = howmany(size, fs->fs_fsize);
684 if (fs->fs_contigsumsize > 0)
685 size += fs->fs_ncg * sizeof(int32_t);
686 base = space = _MALLOC((u_long)size, M_UFSMNT, M_WAITOK);
687 base = space;
688 for (i = 0; i < blks; i += fs->fs_frag) {
689 size = fs->fs_bsize;
690 if (i + fs->fs_frag > blks)
691 size = (blks - i) * fs->fs_fsize;
692 if (error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
693 cred, &bp)) {
694 _FREE(base, M_UFSMNT);
695 goto out;
696 }
697 bcopy(bp->b_data, space, (u_int)size);
698 #if REV_ENDIAN_FS
699 if (rev_endian)
700 byte_swap_ints((int *) space, size / sizeof(int));
701 #endif /* REV_ENDIAN_FS */
702 fs->fs_csp[fragstoblks(fs, i)] = (struct csum *)space;
703 space += size;
704 brelse(bp);
705 bp = NULL;
706 }
707 if (fs->fs_contigsumsize > 0) {
708 fs->fs_maxcluster = lp = (int32_t *)space;
709 for (i = 0; i < fs->fs_ncg; i++)
710 *lp++ = fs->fs_contigsumsize;
711 }
712 mp->mnt_data = (qaddr_t)ump;
713 mp->mnt_stat.f_fsid.val[0] = (long)dev;
714 mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
715 #warning hardcoded max symlen and not "mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;"
716 mp->mnt_maxsymlinklen = 60;
717 #if REV_ENDIAN_FS
718 if (rev_endian)
719 mp->mnt_flag |= MNT_REVEND;
720 #endif /* REV_ENDIAN_FS */
721 ump->um_mountp = mp;
722 ump->um_dev = dev;
723 ump->um_devvp = devvp;
724 ump->um_nindir = fs->fs_nindir;
725 ump->um_bptrtodb = fs->fs_fsbtodb;
726 ump->um_seqinc = fs->fs_frag;
727 for (i = 0; i < MAXQUOTAS; i++)
728 ump->um_quotas[i] = NULLVP;
729 devvp->v_specflags |= SI_MOUNTEDON;
730 ffs_oldfscompat(fs);
731 ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */
732 maxfilesize = (u_int64_t)0x100000000; /* 4GB */
733 #if 0
734 maxfilesize = (u_int64_t)0x40000000 * fs->fs_bsize - 1; /* XXX */
735 #endif /* 0 */
736 if (fs->fs_maxfilesize > maxfilesize) /* XXX */
737 fs->fs_maxfilesize = maxfilesize; /* XXX */
738 if (ronly == 0) {
739 fs->fs_clean = 0;
740 (void) ffs_sbupdate(ump, MNT_WAIT);
741 }
742 return (0);
743 out:
744 if (bp)
745 brelse(bp);
746 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
747 if (ump) {
748 _FREE(ump->um_fs, M_UFSMNT);
749 _FREE(ump, M_UFSMNT);
750 mp->mnt_data = (qaddr_t)0;
751 }
752 return (error);
753 }
754
755 /*
756 * Sanity checks for old file systems.
757 *
758 * XXX - goes away some day.
759 */
760 ffs_oldfscompat(fs)
761 struct fs *fs;
762 {
763 int i;
764
765 fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect); /* XXX */
766 fs->fs_interleave = max(fs->fs_interleave, 1); /* XXX */
767 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
768 fs->fs_nrpos = 8; /* XXX */
769 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
770 u_int64_t sizepb = fs->fs_bsize; /* XXX */
771 /* XXX */
772 fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */
773 for (i = 0; i < NIADDR; i++) { /* XXX */
774 sizepb *= NINDIR(fs); /* XXX */
775 fs->fs_maxfilesize += sizepb; /* XXX */
776 } /* XXX */
777 fs->fs_qbmask = ~fs->fs_bmask; /* XXX */
778 fs->fs_qfmask = ~fs->fs_fmask; /* XXX */
779 } /* XXX */
780 return (0);
781 }
782
783 /*
784 * unmount system call
785 */
786 int
787 ffs_unmount(mp, mntflags, p)
788 struct mount *mp;
789 int mntflags;
790 struct proc *p;
791 {
792 register struct ufsmount *ump;
793 register struct fs *fs;
794 int error, flags;
795 flags = 0;
796 if (mntflags & MNT_FORCE)
797 flags |= FORCECLOSE;
798 if (error = ffs_flushfiles(mp, flags, p))
799 return (error);
800 ump = VFSTOUFS(mp);
801 fs = ump->um_fs;
802 if (fs->fs_ronly == 0) {
803 fs->fs_clean = 1;
804 if (error = ffs_sbupdate(ump, MNT_WAIT)) {
805 fs->fs_clean = 0;
806 #ifdef notyet
807 /* we can atleast cleanup ; as the media could be WP */
808 /* & during mount, we do not check for write failures */
809 /* FIXME LATER : the Correct fix would be to have */
810 /* mount detect the WP media and downgrade to readonly mount */
811 /* For now, here it is */
812 return (error);
813 #endif /* notyet */
814 }
815 }
816 ump->um_devvp->v_specflags &= ~SI_MOUNTEDON;
817 error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
818 NOCRED, p);
819 vrele(ump->um_devvp);
820
821 _FREE(fs->fs_csp[0], M_UFSMNT);
822 _FREE(fs, M_UFSMNT);
823 _FREE(ump, M_UFSMNT);
824 mp->mnt_data = (qaddr_t)0;
825 #if REV_ENDIAN_FS
826 mp->mnt_flag &= ~MNT_REVEND;
827 #endif /* REV_ENDIAN_FS */
828 return (error);
829 }
830
831 /*
832 * Flush out all the files in a filesystem.
833 */
834 ffs_flushfiles(mp, flags, p)
835 register struct mount *mp;
836 int flags;
837 struct proc *p;
838 {
839 register struct ufsmount *ump;
840 int i, error;
841
842 ump = VFSTOUFS(mp);
843 #if QUOTA
844 if (mp->mnt_flag & MNT_QUOTA) {
845 if (error = vflush(mp, NULLVP, SKIPSYSTEM|flags))
846 return (error);
847 for (i = 0; i < MAXQUOTAS; i++) {
848 if (ump->um_quotas[i] == NULLVP)
849 continue;
850 quotaoff(p, mp, i);
851 }
852 /*
853 * Here we fall through to vflush again to ensure
854 * that we have gotten rid of all the system vnodes.
855 */
856 }
857 #endif
858 error = vflush(mp, NULLVP, SKIPSWAP|flags);
859 error = vflush(mp, NULLVP, flags);
860 return (error);
861 }
862
863 /*
864 * Get file system statistics.
865 */
866 int
867 ffs_statfs(mp, sbp, p)
868 struct mount *mp;
869 register struct statfs *sbp;
870 struct proc *p;
871 {
872 register struct ufsmount *ump;
873 register struct fs *fs;
874
875 ump = VFSTOUFS(mp);
876 fs = ump->um_fs;
877 if (fs->fs_magic != FS_MAGIC)
878 panic("ffs_statfs");
879 sbp->f_bsize = fs->fs_fsize;
880 sbp->f_iosize = fs->fs_bsize;
881 sbp->f_blocks = fs->fs_dsize;
882 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
883 fs->fs_cstotal.cs_nffree;
884 sbp->f_bavail = freespace(fs, fs->fs_minfree);
885 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
886 sbp->f_ffree = fs->fs_cstotal.cs_nifree;
887 if (sbp != &mp->mnt_stat) {
888 sbp->f_type = mp->mnt_vfc->vfc_typenum;
889 bcopy((caddr_t)mp->mnt_stat.f_mntonname,
890 (caddr_t)&sbp->f_mntonname[0], MNAMELEN);
891 bcopy((caddr_t)mp->mnt_stat.f_mntfromname,
892 (caddr_t)&sbp->f_mntfromname[0], MNAMELEN);
893 }
894 return (0);
895 }
896
897 /*
898 * Go through the disk queues to initiate sandbagged IO;
899 * go through the inodes to write those that have been modified;
900 * initiate the writing of the super block if it has been modified.
901 *
902 * Note: we are always called with the filesystem marked `MPBUSY'.
903 */
904 int
905 ffs_sync(mp, waitfor, cred, p)
906 struct mount *mp;
907 int waitfor;
908 struct ucred *cred;
909 struct proc *p;
910 {
911 struct vnode *nvp, *vp;
912 struct inode *ip;
913 struct ufsmount *ump = VFSTOUFS(mp);
914 struct fs *fs;
915 int error, allerror = 0;
916
917 fs = ump->um_fs;
918 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
919 printf("fs = %s\n", fs->fs_fsmnt);
920 panic("update: rofs mod");
921 }
922 /*
923 * Write back each (modified) inode.
924 */
925 simple_lock(&mntvnode_slock);
926 loop:
927 for (vp = mp->mnt_vnodelist.lh_first;
928 vp != NULL;
929 vp = nvp) {
930 int didhold = 0;
931
932 /*
933 * If the vnode that we are about to sync is no longer
934 * associated with this mount point, start over.
935 */
936 if (vp->v_mount != mp)
937 goto loop;
938 simple_lock(&vp->v_interlock);
939 nvp = vp->v_mntvnodes.le_next;
940 ip = VTOI(vp);
941 if ((vp->v_type == VNON) ||
942 ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
943 vp->v_dirtyblkhd.lh_first == NULL && !(vp->v_flag & VHASDIRTY))) {
944 simple_unlock(&vp->v_interlock);
945 continue;
946 }
947 simple_unlock(&mntvnode_slock);
948 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p);
949 if (error) {
950 simple_lock(&mntvnode_slock);
951 if (error == ENOENT)
952 goto loop;
953 continue;
954 }
955 didhold = ubc_hold(vp);
956 if (error = VOP_FSYNC(vp, cred, waitfor, p))
957 allerror = error;
958 VOP_UNLOCK(vp, 0, p);
959 if (didhold)
960 ubc_rele(vp);
961 vrele(vp);
962 simple_lock(&mntvnode_slock);
963 }
964 simple_unlock(&mntvnode_slock);
965 /*
966 * Force stale file system control information to be flushed.
967 */
968 if (error = VOP_FSYNC(ump->um_devvp, cred, waitfor, p))
969 allerror = error;
970 #if QUOTA
971 qsync(mp);
972 #endif
973 /*
974 * Write back modified superblock.
975 */
976 if (fs->fs_fmod != 0) {
977 fs->fs_fmod = 0;
978 fs->fs_time = time.tv_sec;
979 if (error = ffs_sbupdate(ump, waitfor))
980 allerror = error;
981 }
982 return (allerror);
983 }
984
985 /*
986 * Look up a FFS dinode number to find its incore vnode, otherwise read it
987 * in from disk. If it is in core, wait for the lock bit to clear, then
988 * return the inode locked. Detection and handling of mount points must be
989 * done by the calling routine.
990 */
991 int
992 ffs_vget(mp, ino, vpp)
993 struct mount *mp;
994 ino_t ino;
995 struct vnode **vpp;
996 {
997 struct proc *p = current_proc(); /* XXX */
998 struct fs *fs;
999 struct inode *ip;
1000 struct ufsmount *ump;
1001 struct buf *bp;
1002 struct vnode *vp;
1003 dev_t dev;
1004 int i, type, error;
1005
1006 ump = VFSTOUFS(mp);
1007 dev = ump->um_dev;
1008
1009 /* Check for unmount in progress */
1010 if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
1011 *vpp = NULL;
1012 return (EPERM);
1013 }
1014
1015 if ((*vpp = ufs_ihashget(dev, ino)) != NULL) {
1016 vp = *vpp;
1017 UBCINFOCHECK("ffs_vget", vp);
1018 return (0);
1019 }
1020 /* Allocate a new vnode/inode. */
1021 type = ump->um_devvp->v_tag == VT_MFS ? M_MFSNODE : M_FFSNODE; /* XXX */
1022 MALLOC_ZONE(ip, struct inode *, sizeof(struct inode), type, M_WAITOK);
1023 bzero((caddr_t)ip, sizeof(struct inode));
1024 lockinit(&ip->i_lock, PINOD, "inode", 0, 0);
1025 /* lock the inode */
1026 lockmgr(&ip->i_lock, LK_EXCLUSIVE, (struct slock *)0, p);
1027
1028 ip->i_fs = fs = ump->um_fs;
1029 ip->i_dev = dev;
1030 ip->i_number = ino;
1031 ip->i_flag |= IN_ALLOC;
1032 #if QUOTA
1033 for (i = 0; i < MAXQUOTAS; i++)
1034 ip->i_dquot[i] = NODQUOT;
1035 #endif
1036
1037 /*
1038 * MALLOC_ZONE is blocking call. Check for race.
1039 */
1040 if ((*vpp = ufs_ihashget(dev, ino)) != NULL) {
1041 /* Clean up */
1042 FREE_ZONE(ip, sizeof(struct inode), type);
1043 vp = *vpp;
1044 UBCINFOCHECK("ffs_vget", vp);
1045 return (0);
1046 }
1047
1048 /*
1049 * Put it onto its hash chain locked so that other requests for
1050 * this inode will block if they arrive while we are sleeping waiting
1051 * for old data structures to be purged or for the contents of the
1052 * disk portion of this inode to be read.
1053 */
1054 ufs_ihashins(ip);
1055
1056 if (error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) {
1057 ufs_ihashrem(ip);
1058 if (ISSET(ip->i_flag, IN_WALLOC))
1059 wakeup(ip);
1060 FREE_ZONE(ip, sizeof(struct inode), type);
1061 *vpp = NULL;
1062 return (error);
1063 }
1064 vp->v_data = ip;
1065 ip->i_vnode = vp;
1066
1067 /*
1068 * A vnode is associated with the inode now,
1069 * vget() can deal with the serialization.
1070 */
1071 CLR(ip->i_flag, IN_ALLOC);
1072 if (ISSET(ip->i_flag, IN_WALLOC))
1073 wakeup(ip);
1074
1075 /* Read in the disk contents for the inode, copy into the inode. */
1076 if (error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1077 (int)fs->fs_bsize, NOCRED, &bp)) {
1078 /*
1079 * The inode does not contain anything useful, so it would
1080 * be misleading to leave it on its hash chain. With mode
1081 * still zero, it will be unlinked and returned to the free
1082 * list by vput().
1083 */
1084 vput(vp);
1085 brelse(bp);
1086 *vpp = NULL;
1087 return (error);
1088 }
1089 #if REV_ENDIAN_FS
1090 if (mp->mnt_flag & MNT_REVEND) {
1091 byte_swap_inode_in(((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino)),ip);
1092 } else {
1093 #endif /* REV_ENDIAN_FS */
1094 ip->i_din = *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino));
1095 #if REV_ENDIAN_FS
1096 }
1097 #endif /* REV_ENDIAN_FS */
1098 brelse(bp);
1099
1100 /*
1101 * Initialize the vnode from the inode, check for aliases.
1102 * Note that the underlying vnode may have changed.
1103 */
1104 if (error = ufs_vinit(mp, ffs_specop_p, FFS_FIFOOPS, &vp)) {
1105 vput(vp);
1106 *vpp = NULL;
1107 return (error);
1108 }
1109 /*
1110 * Finish inode initialization now that aliasing has been resolved.
1111 */
1112 ip->i_devvp = ump->um_devvp;
1113 VREF(ip->i_devvp);
1114 /*
1115 * Set up a generation number for this inode if it does not
1116 * already have one. This should only happen on old filesystems.
1117 */
1118 if (ip->i_gen == 0) {
1119 if (++nextgennumber < (u_long)time.tv_sec)
1120 nextgennumber = time.tv_sec;
1121 ip->i_gen = nextgennumber;
1122 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
1123 ip->i_flag |= IN_MODIFIED;
1124 }
1125 /*
1126 * Ensure that uid and gid are correct. This is a temporary
1127 * fix until fsck has been changed to do the update.
1128 */
1129 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1130 ip->i_uid = ip->i_din.di_ouid; /* XXX */
1131 ip->i_gid = ip->i_din.di_ogid; /* XXX */
1132 } /* XXX */
1133
1134 *vpp = vp;
1135 if (UBCINFOMISSING(vp) || UBCINFORECLAIMED(vp))
1136 ubc_info_init(vp);
1137 return (0);
1138 }
1139
1140 /*
1141 * File handle to vnode
1142 *
1143 * Have to be really careful about stale file handles:
1144 * - check that the inode number is valid
1145 * - call ffs_vget() to get the locked inode
1146 * - check for an unallocated inode (i_mode == 0)
1147 * - check that the given client host has export rights and return
1148 * those rights via. exflagsp and credanonp
1149 */
1150 int
1151 ffs_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp)
1152 register struct mount *mp;
1153 struct fid *fhp;
1154 struct mbuf *nam;
1155 struct vnode **vpp;
1156 int *exflagsp;
1157 struct ucred **credanonp;
1158 {
1159 register struct ufid *ufhp;
1160 struct fs *fs;
1161
1162 ufhp = (struct ufid *)fhp;
1163 fs = VFSTOUFS(mp)->um_fs;
1164 if (ufhp->ufid_ino < ROOTINO ||
1165 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1166 return (ESTALE);
1167 return (ufs_check_export(mp, ufhp, nam, vpp, exflagsp, credanonp));
1168 }
1169
1170 /*
1171 * Vnode pointer to File handle
1172 */
1173 /* ARGSUSED */
1174 ffs_vptofh(vp, fhp)
1175 struct vnode *vp;
1176 struct fid *fhp;
1177 {
1178 register struct inode *ip;
1179 register struct ufid *ufhp;
1180
1181 ip = VTOI(vp);
1182 ufhp = (struct ufid *)fhp;
1183 ufhp->ufid_len = sizeof(struct ufid);
1184 ufhp->ufid_ino = ip->i_number;
1185 ufhp->ufid_gen = ip->i_gen;
1186 return (0);
1187 }
1188
1189 /*
1190 * Initialize the filesystem; just use ufs_init.
1191 */
1192 int
1193 ffs_init(vfsp)
1194 struct vfsconf *vfsp;
1195 {
1196
1197 return (ufs_init(vfsp));
1198 }
1199
1200 /*
1201 * fast filesystem related variables.
1202 */
1203 ffs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1204 int *name;
1205 u_int namelen;
1206 void *oldp;
1207 size_t *oldlenp;
1208 void *newp;
1209 size_t newlen;
1210 struct proc *p;
1211 {
1212 extern int doclusterread, doclusterwrite, doreallocblks, doasyncfree;
1213
1214 /* all sysctl names at this level are terminal */
1215 if (namelen != 1)
1216 return (ENOTDIR); /* overloaded */
1217
1218 switch (name[0]) {
1219 case FFS_CLUSTERREAD:
1220 return (sysctl_int(oldp, oldlenp, newp, newlen,
1221 &doclusterread));
1222 case FFS_CLUSTERWRITE:
1223 return (sysctl_int(oldp, oldlenp, newp, newlen,
1224 &doclusterwrite));
1225 case FFS_REALLOCBLKS:
1226 return (sysctl_int(oldp, oldlenp, newp, newlen,
1227 &doreallocblks));
1228 case FFS_ASYNCFREE:
1229 return (sysctl_int(oldp, oldlenp, newp, newlen, &doasyncfree));
1230 default:
1231 return (EOPNOTSUPP);
1232 }
1233 /* NOTREACHED */
1234 }
1235
1236 /*
1237 * Write a superblock and associated information back to disk.
1238 */
1239 int
1240 ffs_sbupdate(mp, waitfor)
1241 struct ufsmount *mp;
1242 int waitfor;
1243 {
1244 register struct fs *dfs, *fs = mp->um_fs;
1245 register struct buf *bp;
1246 int blks;
1247 caddr_t space;
1248 int i, size, error, allerror = 0;
1249 int devBlockSize=0;
1250 #if REV_ENDIAN_FS
1251 int rev_endian=(mp->um_mountp->mnt_flag & MNT_REVEND);
1252 #endif /* REV_ENDIAN_FS */
1253
1254 /*
1255 * First write back the summary information.
1256 */
1257 blks = howmany(fs->fs_cssize, fs->fs_fsize);
1258 space = (caddr_t)fs->fs_csp[0];
1259 for (i = 0; i < blks; i += fs->fs_frag) {
1260 size = fs->fs_bsize;
1261 if (i + fs->fs_frag > blks)
1262 size = (blks - i) * fs->fs_fsize;
1263 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1264 size, 0, 0, BLK_META);
1265 bcopy(space, bp->b_data, (u_int)size);
1266 #if REV_ENDIAN_FS
1267 if (rev_endian) {
1268 byte_swap_ints((int *)bp->b_data, size / sizeof(int));
1269 }
1270 #endif /* REV_ENDIAN_FS */
1271 space += size;
1272 if (waitfor != MNT_WAIT)
1273 bawrite(bp);
1274 else if (error = bwrite(bp))
1275 allerror = error;
1276 }
1277 /*
1278 * Now write back the superblock itself. If any errors occurred
1279 * up to this point, then fail so that the superblock avoids
1280 * being written out as clean.
1281 */
1282 if (allerror)
1283 return (allerror);
1284 VOP_DEVBLOCKSIZE(mp->um_devvp,&devBlockSize);
1285 bp = getblk(mp->um_devvp, (SBOFF/devBlockSize), (int)fs->fs_sbsize, 0, 0, BLK_META);
1286 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
1287 /* Restore compatibility to old file systems. XXX */
1288 dfs = (struct fs *)bp->b_data; /* XXX */
1289 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
1290 dfs->fs_nrpos = -1; /* XXX */
1291 #if REV_ENDIAN_FS
1292 /*
1293 * Swapping bytes here ; so that in case
1294 * of inode format < FS_44INODEFMT appropriate
1295 * fields get moved
1296 */
1297 if (rev_endian) {
1298 byte_swap_sbout((struct fs *)bp->b_data);
1299 }
1300 #endif /* REV_ENDIAN_FS */
1301 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1302 int32_t *lp, tmp; /* XXX */
1303 /* XXX */
1304 lp = (int32_t *)&dfs->fs_qbmask; /* XXX */
1305 tmp = lp[4]; /* XXX */
1306 for (i = 4; i > 0; i--) /* XXX */
1307 lp[i] = lp[i-1]; /* XXX */
1308 lp[0] = tmp; /* XXX */
1309 } /* XXX */
1310 #if REV_ENDIAN_FS
1311 /* Note that dfs is already swapped so swap the filesize
1312 * before writing
1313 */
1314 if (rev_endian) {
1315 dfs->fs_maxfilesize = NXSwapLongLong(mp->um_savedmaxfilesize); /* XXX */
1316 } else {
1317 #endif /* REV_ENDIAN_FS */
1318 dfs->fs_maxfilesize = mp->um_savedmaxfilesize; /* XXX */
1319 #if REV_ENDIAN_FS
1320 }
1321 #endif /* REV_ENDIAN_FS */
1322 if (waitfor != MNT_WAIT)
1323 bawrite(bp);
1324 else if (error = bwrite(bp))
1325 allerror = error;
1326
1327 return (allerror);
1328 }