2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24 * Copyright (c) 1989, 1991, 1993, 1994
25 * The Regents of the University of California. All rights reserved.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
58 #include <rev_endian_fs.h>
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/namei.h>
63 #include <sys/kauth.h>
64 #include <sys/kernel.h>
65 #include <sys/vnode_internal.h>
66 #include <sys/socket.h>
67 #include <sys/mount_internal.h>
68 #include <sys/mount.h>
73 #include <sys/ioctl.h>
74 #include <sys/errno.h>
75 #include <sys/malloc.h>
77 #include <sys/quota.h>
79 #include <miscfs/specfs/specdev.h>
81 #include <ufs/ufs/quota.h>
82 #include <ufs/ufs/ufsmount.h>
83 #include <ufs/ufs/inode.h>
84 #include <ufs/ufs/ufs_extern.h>
86 #include <ufs/ffs/fs.h>
87 #include <ufs/ffs/ffs_extern.h>
89 #include <ufs/ufs/ufs_byte_order.h>
90 #include <architecture/byte_order.h>
91 #endif /* REV_ENDIAN_FS */
93 int ffs_sbupdate(struct ufsmount
*, int);
95 struct vfsops ufs_vfsops
= {
112 extern u_long nextgennumber
;
118 #define SETHIGH(q, h) { \
121 tmp.val[_QUAD_HIGHWORD] = (h); \
124 #define SETLOW(q, l) { \
127 tmp.val[_QUAD_LOWWORD] = (l); \
132 * Called by main() when ufs is going to be mounted as root.
135 ffs_mountroot(mount_t mp
, vnode_t rvp
, vfs_context_t context
)
137 struct proc
*p
= current_proc(); /* XXX */
140 /* Set asynchronous flag by default */
141 vfs_setflags(mp
, MNT_ASYNC
);
143 if (error
= ffs_mountfs(rvp
, mp
, context
))
146 (void)ffs_statfs(mp
, vfs_statfs(mp
), NULL
);
157 ffs_mount(struct mount
*mp
, vnode_t devvp
, __unused user_addr_t data
, vfs_context_t context
)
159 struct proc
*p
= vfs_context_proc(context
);
160 struct ufsmount
*ump
;
161 register struct fs
*fs
;
163 int error
= 0, flags
;
169 * If updating, check whether changing from read-write to
170 * read-only; if there is no device name, that's all we do.
172 if (mp
->mnt_flag
& MNT_UPDATE
) {
175 if (fs
->fs_ronly
== 0 && (mp
->mnt_flag
& MNT_RDONLY
)) {
177 * Flush any dirty data.
179 VFS_SYNC(mp
, MNT_WAIT
, context
);
181 * Check for and optionally get rid of files open
185 if (mp
->mnt_flag
& MNT_FORCE
)
187 if (error
= ffs_flushfiles(mp
, flags
, p
))
191 if (error
= ffs_sbupdate(ump
, MNT_WAIT
)) {
197 /* save fs_ronly to later use */
198 ronly
= fs
->fs_ronly
;
199 if ((mp
->mnt_flag
& MNT_RELOAD
) || ronly
)
202 (error
= ffs_reload(mp
, vfs_context_ucred(context
), p
)))
204 /* replace the ronly after load */
205 fs
->fs_ronly
= ronly
;
207 * Do not update the file system if the user was in singleuser
208 * and then tries to mount -uw without fscking
210 if (!fs
->fs_clean
&& ronly
) {
211 printf("WARNING: trying to mount a dirty file system\n");
212 if (issingleuser() && (mp
->mnt_flag
& MNT_ROOTFS
)) {
213 printf("WARNING: R/W mount of %s denied. Filesystem is not clean - run fsck\n",fs
->fs_fsmnt
);
215 * Reset the readonly bit as reload might have
223 if (ronly
&& (mp
->mnt_kern_flag
& MNTK_WANTRDWR
)) {
226 (void) ffs_sbupdate(ump
, MNT_WAIT
);
232 if ((mp
->mnt_flag
& MNT_UPDATE
) == 0)
233 error
= ffs_mountfs(devvp
, mp
, context
);
235 if (devvp
!= ump
->um_devvp
)
236 error
= EINVAL
; /* needs translation */
243 bzero(fs
->fs_fsmnt
, sizeof(fs
->fs_fsmnt
));
244 strncpy(fs
->fs_fsmnt
, (caddr_t
)mp
->mnt_vfsstat
.f_mntonname
, sizeof(fs
->fs_fsmnt
) - 1);
245 (void)ffs_statfs(mp
, &mp
->mnt_vfsstat
, p
);
250 struct ffs_reload_cargs
{
258 #endif /* REV_ENDIAN_FS */
263 ffs_reload_callback(struct vnode
*vp
, void *cargs
)
268 struct ffs_reload_cargs
*args
;
270 args
= (struct ffs_reload_cargs
*)cargs
;
273 * flush all the buffers associated with this node
275 if (buf_invalidateblks(vp
, 0, 0, 0))
276 panic("ffs_reload: dirty2");
279 * Step 6: re-read inode data
284 if (args
->error
= (int)buf_bread(args
->devvp
, (daddr64_t
)((unsigned)fsbtodb(fs
, ino_to_fsba(fs
, ip
->i_number
))),
285 (int)fs
->fs_bsize
, NOCRED
, &bp
)) {
288 return (VNODE_RETURNED_DONE
);
292 if (args
->rev_endian
) {
293 byte_swap_inode_in(((struct dinode
*)buf_dataptr(bp
) +
294 ino_to_fsbo(fs
, ip
->i_number
)), ip
);
296 #endif /* REV_ENDIAN_FS */
297 ip
->i_din
= *((struct dinode
*)buf_dataptr(bp
) +
298 ino_to_fsbo(fs
, ip
->i_number
));
301 #endif /* REV_ENDIAN_FS */
305 return (VNODE_RETURNED
);
310 * Reload all incore data for a filesystem (used after running fsck on
311 * the root filesystem and finding things to fix). The filesystem must
312 * be mounted read-only.
314 * Things to do to update the mount:
315 * 1) invalidate all cached meta-data.
316 * 2) re-read superblock from disk.
317 * 3) re-read summary information from disk.
318 * 4) invalidate all inactive vnodes.
319 * 5) invalidate all cached file data.
320 * 6) re-read inode data for all active vnodes.
322 ffs_reload(struct mount
*mountp
, kauth_cred_t cred
, struct proc
*p
)
324 register struct vnode
*devvp
;
327 struct fs
*fs
, *newfs
;
328 int i
, blks
, size
, error
;
329 u_int64_t maxfilesize
; /* XXX */
331 struct ffs_reload_cargs args
;
333 int rev_endian
= (mountp
->mnt_flag
& MNT_REVEND
);
334 #endif /* REV_ENDIAN_FS */
336 if ((mountp
->mnt_flag
& MNT_RDONLY
) == 0)
339 * Step 1: invalidate all cached meta-data.
341 devvp
= VFSTOUFS(mountp
)->um_devvp
;
342 if (buf_invalidateblks(devvp
, 0, 0, 0))
343 panic("ffs_reload: dirty1");
345 * Step 2: re-read superblock from disk.
347 size
= vfs_devblocksize(mountp
);
349 if (error
= (int)buf_bread(devvp
, (daddr64_t
)((unsigned)(SBOFF
/size
)), SBSIZE
, NOCRED
,&bp
)) {
353 newfs
= (struct fs
*)buf_dataptr(bp
);
356 byte_swap_sbin(newfs
);
358 #endif /* REV_ENDIAN_FS */
359 if (newfs
->fs_magic
!= FS_MAGIC
|| newfs
->fs_bsize
> MAXBSIZE
||
360 newfs
->fs_bsize
< sizeof(struct fs
)) {
363 byte_swap_sbout(newfs
);
364 #endif /* REV_ENDIAN_FS */
367 return (EIO
); /* XXX needs translation */
369 fs
= VFSTOUFS(mountp
)->um_fs
;
371 * Copy pointer fields back into superblock before copying in XXX
372 * new superblock. These should really be in the ufsmount. XXX
373 * Note that important parameters (eg fs_ncg) are unchanged.
375 newfs
->fs_csp
= fs
->fs_csp
;
376 newfs
->fs_maxcluster
= fs
->fs_maxcluster
;
377 newfs
->fs_contigdirs
= fs
->fs_contigdirs
;
378 bcopy(newfs
, fs
, (u_int
)fs
->fs_sbsize
);
379 if (fs
->fs_sbsize
< SBSIZE
)
383 byte_swap_sbout(newfs
);
384 #endif /* REV_ENDIAN_FS */
386 mountp
->mnt_maxsymlinklen
= fs
->fs_maxsymlinklen
;
388 maxfilesize
= 0x100000000ULL
; /* 4GB */
389 if (fs
->fs_maxfilesize
> maxfilesize
) /* XXX */
390 fs
->fs_maxfilesize
= maxfilesize
; /* XXX */
392 * Step 3: re-read summary information from disk.
394 blks
= howmany(fs
->fs_cssize
, fs
->fs_fsize
);
396 for (i
= 0; i
< blks
; i
+= fs
->fs_frag
) {
398 if (i
+ fs
->fs_frag
> blks
)
399 size
= (blks
- i
) * fs
->fs_fsize
;
400 if (error
= (int)buf_bread(devvp
, (daddr64_t
)((unsigned)fsbtodb(fs
, fs
->fs_csaddr
+ i
)), size
,
408 byte_swap_ints((int *)buf_dataptr(bp
), size
/ sizeof(int));
410 #endif /* REV_ENDIAN_FS */
411 bcopy((char *)buf_dataptr(bp
), space
, (u_int
)size
);
415 byte_swap_ints((int *)buf_dataptr(bp
), size
/ sizeof(int));
417 #endif /* REV_ENDIAN_FS */
418 space
= (char *) space
+ size
;
422 * We no longer know anything about clusters per cylinder group.
424 if (fs
->fs_contigsumsize
> 0) {
425 lp
= fs
->fs_maxcluster
;
426 for (i
= 0; i
< fs
->fs_ncg
; i
++)
427 *lp
++ = fs
->fs_contigsumsize
;
430 args
.rev_endian
= rev_endian
;
431 #endif /* REV_ENDIAN_FS */
438 * ffs_reload_callback will be called for each vnode
439 * hung off of this mount point that can't be recycled...
440 * vnode_iterate will recycle those that it can (the VNODE_RELOAD option)
441 * the vnode will be in an 'unbusy' state (VNODE_WAIT) and
442 * properly referenced and unreferenced around the callback
444 vnode_iterate(mountp
, VNODE_RELOAD
| VNODE_WAIT
, ffs_reload_callback
, (void *)&args
);
450 * Common code for mount and mountroot
453 ffs_mountfs(devvp
, mp
, context
)
456 vfs_context_t context
;
458 struct ufsmount
*ump
;
464 int32_t clustersumoff
;
466 int error
, i
, blks
, ronly
;
470 u_int64_t maxfilesize
; /* XXX */
471 u_int dbsize
= DEV_BSIZE
;
474 #endif /* REV_ENDIAN_FS */
476 cred
= vfs_context_ucred(context
);
478 ronly
= vfs_isrdonly(mp
);
482 /* Advisory locking should be handled at the VFS layer */
483 vfs_setlocklocal(mp
);
485 /* Obtain the actual device block size */
486 if (VNOP_IOCTL(devvp
, DKIOCGETBLOCKSIZE
, (caddr_t
)&size
, 0, context
)) {
491 if (error
= (int)buf_bread(devvp
, (daddr64_t
)((unsigned)(SBOFF
/size
)),
494 fs
= (struct fs
*)buf_dataptr(bp
);
496 if (fs
->fs_magic
!= FS_MAGIC
|| fs
->fs_bsize
> MAXBSIZE
||
497 fs
->fs_bsize
< sizeof(struct fs
)) {
498 int magic
= fs
->fs_magic
;
500 byte_swap_ints(&magic
, 1);
501 if (magic
!= FS_MAGIC
) {
506 if (fs
->fs_magic
!= FS_MAGIC
|| fs
->fs_bsize
> MAXBSIZE
||
507 fs
->fs_bsize
< sizeof(struct fs
)) {
509 error
= EINVAL
; /* XXX needs translation */
514 #endif /* REV_ENDIAN_FS */
515 if (fs
->fs_magic
!= FS_MAGIC
|| fs
->fs_bsize
> MAXBSIZE
||
516 fs
->fs_bsize
< sizeof(struct fs
)) {
520 #endif /* REV_ENDIAN_FS */
521 error
= EINVAL
; /* XXX needs translation */
527 * Buffer cache does not handle multiple pages in a buf when
528 * invalidating incore buffer in pageout. There are no locks
529 * in the pageout path. So there is a danger of loosing data when
530 * block allocation happens at the same time a pageout of buddy
531 * page occurs. incore() returns buf with both
532 * pages, this leads vnode-pageout to incorrectly flush of entire.
533 * buf. Till the low level ffs code is modified to deal with these
534 * do not mount any FS more than 4K size.
537 * Can't mount filesystems with a fragment size less than DIRBLKSIZ
540 * Don't mount dirty filesystems, except for the root filesystem
542 if ((fs
->fs_bsize
> PAGE_SIZE
) || (fs
->fs_fsize
< DIRBLKSIZ
) ||
543 ((!(mp
->mnt_flag
& MNT_ROOTFS
)) && (!fs
->fs_clean
))) {
547 #endif /* REV_ENDIAN_FS */
552 /* Let's figure out the devblock size the file system is with */
553 /* the device block size = fragment size / number of sectors per frag */
555 dbsize
= fs
->fs_fsize
/ NSPF(fs
);
557 kprintf("device blocksize computaion failed\n");
559 if (VNOP_IOCTL(devvp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&dbsize
,
560 FWRITE
, context
) != 0) {
561 kprintf("failed to set device blocksize\n");
563 /* force the specfs to reread blocksize from size() */
564 set_fsblocksize(devvp
);
567 /* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
568 if (fs
->fs_postblformat
== FS_42POSTBLFMT
&& !ronly
) {
572 #endif /* REV_ENDIAN_FS */
573 error
= EROFS
; /* needs translation */
577 /* If we are not mounting read only, then check for overlap
578 * condition in cylinder group's free block map.
579 * If overlap exists, then force this into a read only mount
580 * to avoid further corruption. PR#2216969
583 if (error
= (int)buf_bread (devvp
, (daddr64_t
)((unsigned)fsbtodb(fs
, cgtod(fs
, 0))),
584 (int)fs
->fs_cgsize
, NOCRED
, &cgbp
)) {
588 cgp
= (struct cg
*)buf_dataptr(cgbp
);
591 byte_swap_cgin(cgp
,fs
);
592 #endif /* REV_ENDIAN_FS */
593 if (!cg_chkmagic(cgp
)){
596 byte_swap_cgout(cgp
,fs
);
597 #endif /* REV_ENDIAN_FS */
601 if (cgp
->cg_clustersumoff
!= 0) {
602 /* Check for overlap */
603 clustersumoff
= cgp
->cg_freeoff
+
604 howmany(fs
->fs_cpg
* fs
->fs_spc
/ NSPF(fs
), NBBY
);
605 clustersumoff
= roundup(clustersumoff
, sizeof(long));
606 if (cgp
->cg_clustersumoff
< clustersumoff
) {
608 mp
->mnt_flag
|= MNT_RDONLY
;
614 byte_swap_cgout(cgp
,fs
);
615 #endif /* REV_ENDIAN_FS */
619 ump
= _MALLOC(sizeof *ump
, M_UFSMNT
, M_WAITOK
);
620 bzero((caddr_t
)ump
, sizeof *ump
);
621 ump
->um_fs
= _MALLOC((u_long
)fs
->fs_sbsize
, M_UFSMNT
,
623 bcopy((char *)buf_dataptr(bp
), ump
->um_fs
, (u_int
)fs
->fs_sbsize
);
624 if (fs
->fs_sbsize
< SBSIZE
)
629 #endif /* REV_ENDIAN_FS */
633 fs
->fs_ronly
= ronly
;
634 size
= fs
->fs_cssize
;
635 blks
= howmany(size
, fs
->fs_fsize
);
636 if (fs
->fs_contigsumsize
> 0)
637 size
+= fs
->fs_ncg
* sizeof(int32_t);
638 size
+= fs
->fs_ncg
* sizeof(u_int8_t
);
639 space
= _MALLOC((u_long
)size
, M_UFSMNT
, M_WAITOK
);
641 for (i
= 0; i
< blks
; i
+= fs
->fs_frag
) {
643 if (i
+ fs
->fs_frag
> blks
)
644 size
= (blks
- i
) * fs
->fs_fsize
;
645 if (error
= (int)buf_bread(devvp
, (daddr64_t
)((unsigned)fsbtodb(fs
, fs
->fs_csaddr
+ i
)),
647 _FREE(fs
->fs_csp
, M_UFSMNT
);
650 bcopy((char *)buf_dataptr(bp
), space
, (u_int
)size
);
653 byte_swap_ints((int *) space
, size
/ sizeof(int));
654 #endif /* REV_ENDIAN_FS */
655 space
= (char *)space
+ size
;
659 if (fs
->fs_contigsumsize
> 0) {
660 fs
->fs_maxcluster
= lp
= space
;
661 for (i
= 0; i
< fs
->fs_ncg
; i
++)
662 *lp
++ = fs
->fs_contigsumsize
;
665 size
= fs
->fs_ncg
* sizeof(u_int8_t
);
666 fs
->fs_contigdirs
= (u_int8_t
*)space
;
667 space
= (u_int8_t
*)space
+ size
;
668 bzero(fs
->fs_contigdirs
, size
);
669 /* XXX Compatibility for old filesystems */
670 if (fs
->fs_avgfilesize
<= 0)
671 fs
->fs_avgfilesize
= AVFILESIZ
;
672 if (fs
->fs_avgfpdir
<= 0)
673 fs
->fs_avgfpdir
= AFPDIR
;
674 /* XXX End of compatibility */
675 mp
->mnt_data
= (qaddr_t
)ump
;
676 mp
->mnt_vfsstat
.f_fsid
.val
[0] = (long)dev
;
677 mp
->mnt_vfsstat
.f_fsid
.val
[1] = vfs_typenum(mp
);
678 /* XXX warning hardcoded max symlen and not "mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;" */
679 mp
->mnt_maxsymlinklen
= 60;
682 mp
->mnt_flag
|= MNT_REVEND
;
683 #endif /* REV_ENDIAN_FS */
686 ump
->um_devvp
= devvp
;
687 ump
->um_nindir
= fs
->fs_nindir
;
688 ump
->um_bptrtodb
= fs
->fs_fsbtodb
;
689 ump
->um_seqinc
= fs
->fs_frag
;
690 for (i
= 0; i
< MAXQUOTAS
; i
++)
691 dqfileinit(&ump
->um_qfiles
[i
]);
693 ump
->um_savedmaxfilesize
= fs
->fs_maxfilesize
; /* XXX */
694 maxfilesize
= 0x100000000ULL
; /* 4GB */
696 maxfilesize
= (u_int64_t
)0x40000000 * fs
->fs_bsize
- 1; /* XXX */
698 if (fs
->fs_maxfilesize
> maxfilesize
) /* XXX */
699 fs
->fs_maxfilesize
= maxfilesize
; /* XXX */
702 (void) ffs_sbupdate(ump
, MNT_WAIT
);
709 _FREE(ump
->um_fs
, M_UFSMNT
);
710 _FREE(ump
, M_UFSMNT
);
716 * Sanity checks for old file systems.
718 * XXX - goes away some day.
725 fs
->fs_npsect
= max(fs
->fs_npsect
, fs
->fs_nsect
); /* XXX */
726 fs
->fs_interleave
= max(fs
->fs_interleave
, 1); /* XXX */
727 if (fs
->fs_postblformat
== FS_42POSTBLFMT
) /* XXX */
728 fs
->fs_nrpos
= 8; /* XXX */
729 if (fs
->fs_inodefmt
< FS_44INODEFMT
) { /* XXX */
730 u_int64_t sizepb
= fs
->fs_bsize
; /* XXX */
732 fs
->fs_maxfilesize
= fs
->fs_bsize
* NDADDR
- 1; /* XXX */
733 for (i
= 0; i
< NIADDR
; i
++) { /* XXX */
734 sizepb
*= NINDIR(fs
); /* XXX */
735 fs
->fs_maxfilesize
+= sizepb
; /* XXX */
737 fs
->fs_qbmask
= ~fs
->fs_bmask
; /* XXX */
738 fs
->fs_qfmask
= ~fs
->fs_fmask
; /* XXX */
744 * unmount system call
747 ffs_unmount(mp
, mntflags
, context
)
750 vfs_context_t context
;
752 struct proc
*p
= vfs_context_proc(context
);
753 register struct ufsmount
*ump
;
754 register struct fs
*fs
;
760 if (mntflags
& MNT_FORCE
) {
764 if ( (error
= ffs_flushfiles(mp
, flags
, p
)) && !force
)
769 if (fs
->fs_ronly
== 0) {
771 if (error
= ffs_sbupdate(ump
, MNT_WAIT
)) {
774 /* we can atleast cleanup ; as the media could be WP */
775 /* & during mount, we do not check for write failures */
776 /* FIXME LATER : the Correct fix would be to have */
777 /* mount detect the WP media and downgrade to readonly mount */
778 /* For now, here it is */
783 _FREE(fs
->fs_csp
, M_UFSMNT
);
785 _FREE(ump
, M_UFSMNT
);
791 * Flush out all the files in a filesystem.
793 ffs_flushfiles(mp
, flags
, p
)
794 register struct mount
*mp
;
798 register struct ufsmount
*ump
;
805 * NOTE: The open quota files have an indirect reference
806 * on the root directory vnode. We must account for this
807 * extra reference when doing the intial vflush.
809 if (mp
->mnt_flag
& MNT_QUOTA
) {
810 struct vnode
*rootvp
= NULLVP
;
811 int quotafilecnt
= 0;
813 /* Find out how many quota files we have open. */
814 for (i
= 0; i
< MAXQUOTAS
; i
++) {
815 if (ump
->um_qfiles
[i
].qf_vp
!= NULLVP
)
820 * Check if the root vnode is in our inode hash
821 * (so we can skip over it).
823 rootvp
= ufs_ihashget(ump
->um_dev
, ROOTINO
);
825 error
= vflush(mp
, rootvp
, SKIPSYSTEM
|flags
);
829 * See if there are additional references on the
830 * root vp besides the ones obtained from the open
831 * quota files and the hfs_chashget call above.
834 (rootvp
->v_usecount
> (1 + quotafilecnt
))) {
835 error
= EBUSY
; /* root dir is still open */
839 if (error
&& (flags
& FORCECLOSE
) == 0)
842 for (i
= 0; i
< MAXQUOTAS
; i
++) {
843 if (ump
->um_qfiles
[i
].qf_vp
== NULLVP
)
848 * Here we fall through to vflush again to ensure
849 * that we have gotten rid of all the system vnodes.
853 error
= vflush(mp
, NULLVP
, SKIPSWAP
|flags
);
854 error
= vflush(mp
, NULLVP
, flags
);
859 * Get file system statistics.
862 ffs_statfs(mp
, sbp
, context
)
864 register struct vfsstatfs
*sbp
;
865 vfs_context_t context
;
867 register struct ufsmount
*ump
;
868 register struct fs
*fs
;
872 if (fs
->fs_magic
!= FS_MAGIC
)
874 sbp
->f_bsize
= fs
->fs_fsize
;
875 sbp
->f_iosize
= fs
->fs_bsize
;
876 sbp
->f_blocks
= (uint64_t)((unsigned long)fs
->fs_dsize
);
877 sbp
->f_bfree
= (uint64_t) ((unsigned long)(fs
->fs_cstotal
.cs_nbfree
* fs
->fs_frag
+
878 fs
->fs_cstotal
.cs_nffree
));
879 sbp
->f_bavail
= (uint64_t) ((unsigned long)freespace(fs
, fs
->fs_minfree
));
880 sbp
->f_files
= (uint64_t) ((unsigned long)(fs
->fs_ncg
* fs
->fs_ipg
- ROOTINO
));
881 sbp
->f_ffree
= (uint64_t) ((unsigned long)fs
->fs_cstotal
.cs_nifree
);
886 ffs_vfs_getattr(mp
, fsap
, context
)
888 struct vfs_attr
*fsap
;
889 vfs_context_t context
;
891 struct ufsmount
*ump
;
896 struct ufslabel
*ulp
;
898 int bs
, error
, length
;
902 cred
= vfs_context_ucred(context
);
904 VFSATTR_RETURN(fsap
, f_bsize
, fs
->fs_fsize
);
905 VFSATTR_RETURN(fsap
, f_iosize
, fs
->fs_bsize
);
906 VFSATTR_RETURN(fsap
, f_blocks
, (uint64_t)((unsigned long)fs
->fs_dsize
));
907 VFSATTR_RETURN(fsap
, f_bfree
, (uint64_t)((unsigned long)
908 (fs
->fs_cstotal
.cs_nbfree
* fs
->fs_frag
+
909 fs
->fs_cstotal
.cs_nffree
)));
910 VFSATTR_RETURN(fsap
, f_bavail
, (uint64_t)((unsigned long)freespace(fs
,
912 VFSATTR_RETURN(fsap
, f_files
, (uint64_t)((unsigned long)
913 (fs
->fs_ncg
* fs
->fs_ipg
- ROOTINO
)));
914 VFSATTR_RETURN(fsap
, f_ffree
, (uint64_t)((unsigned long)
915 fs
->fs_cstotal
.cs_nifree
));
917 if (VFSATTR_IS_ACTIVE(fsap
, f_fsid
)) {
918 fsap
->f_fsid
.val
[0] = mp
->mnt_vfsstat
.f_fsid
.val
[0];
919 fsap
->f_fsid
.val
[1] = mp
->mnt_vfsstat
.f_fsid
.val
[1];
920 VFSATTR_SET_SUPPORTED(fsap
, f_fsid
);
923 if (VFSATTR_IS_ACTIVE(fsap
, f_vol_name
)) {
924 devvp
= ump
->um_devvp
;
925 bs
= vfs_devblocksize(mp
);
927 if (error
= (int)buf_meta_bread(devvp
,
928 (daddr64_t
)(UFS_LABEL_OFFSET
/ bs
),
929 MAX(bs
, UFS_LABEL_SIZE
), cred
, &bp
)) {
936 * Since the disklabel is read directly by older user space
937 * code, make sure this buffer won't remain in the cache when
940 buf_setflags(bp
, B_NOCACHE
);
942 offset
= buf_dataptr(bp
) + (UFS_LABEL_OFFSET
% bs
);
943 ulp
= (struct ufslabel
*)offset
;
945 if (ufs_label_check(ulp
)) {
946 length
= ulp
->ul_namelen
;
948 if (mp
->mnt_flag
& MNT_REVEND
)
949 length
= NXSwapShort(length
);
951 if (length
> 0 && length
<= UFS_MAX_LABEL_NAME
) {
952 bcopy(ulp
->ul_name
, fsap
->f_vol_name
, length
);
953 fsap
->f_vol_name
[UFS_MAX_LABEL_NAME
- 1] = '\0';
954 fsap
->f_vol_name
[length
] = '\0';
959 VFSATTR_SET_SUPPORTED(fsap
, f_vol_name
);
962 if (VFSATTR_IS_ACTIVE(fsap
, f_capabilities
)) {
963 fsap
->f_capabilities
.capabilities
[VOL_CAPABILITIES_FORMAT
] =
964 VOL_CAP_FMT_SYMBOLICLINKS
|
965 VOL_CAP_FMT_HARDLINKS
|
966 VOL_CAP_FMT_SPARSE_FILES
|
967 VOL_CAP_FMT_CASE_SENSITIVE
|
968 VOL_CAP_FMT_CASE_PRESERVING
|
969 VOL_CAP_FMT_FAST_STATFS
;
970 fsap
->f_capabilities
.capabilities
[VOL_CAPABILITIES_INTERFACES
]
971 = VOL_CAP_INT_NFSEXPORT
|
972 VOL_CAP_INT_VOL_RENAME
|
973 VOL_CAP_INT_ADVLOCK
|
975 fsap
->f_capabilities
.capabilities
[VOL_CAPABILITIES_RESERVED1
]
977 fsap
->f_capabilities
.capabilities
[VOL_CAPABILITIES_RESERVED2
]
980 /* Capabilities we know about: */
981 fsap
->f_capabilities
.valid
[VOL_CAPABILITIES_FORMAT
] =
982 VOL_CAP_FMT_PERSISTENTOBJECTIDS
|
983 VOL_CAP_FMT_SYMBOLICLINKS
|
984 VOL_CAP_FMT_HARDLINKS
|
985 VOL_CAP_FMT_JOURNAL
|
986 VOL_CAP_FMT_JOURNAL_ACTIVE
|
987 VOL_CAP_FMT_NO_ROOT_TIMES
|
988 VOL_CAP_FMT_SPARSE_FILES
|
989 VOL_CAP_FMT_ZERO_RUNS
|
990 VOL_CAP_FMT_CASE_SENSITIVE
|
991 VOL_CAP_FMT_CASE_PRESERVING
|
992 VOL_CAP_FMT_FAST_STATFS
|
993 VOL_CAP_FMT_2TB_FILESIZE
;
994 fsap
->f_capabilities
.valid
[VOL_CAPABILITIES_INTERFACES
] =
995 VOL_CAP_INT_SEARCHFS
|
996 VOL_CAP_INT_ATTRLIST
|
997 VOL_CAP_INT_NFSEXPORT
|
998 VOL_CAP_INT_READDIRATTR
|
999 VOL_CAP_INT_EXCHANGEDATA
|
1000 VOL_CAP_INT_COPYFILE
|
1001 VOL_CAP_INT_ALLOCATE
|
1002 VOL_CAP_INT_VOL_RENAME
|
1003 VOL_CAP_INT_ADVLOCK
|
1005 fsap
->f_capabilities
.valid
[VOL_CAPABILITIES_RESERVED1
] = 0;
1006 fsap
->f_capabilities
.valid
[VOL_CAPABILITIES_RESERVED2
] = 0;
1008 VFSATTR_SET_SUPPORTED(fsap
, f_capabilities
);
1011 if (VFSATTR_IS_ACTIVE(fsap
, f_attributes
)) {
1012 fsap
->f_attributes
.validattr
.commonattr
= 0;
1013 fsap
->f_attributes
.validattr
.volattr
=
1014 ATTR_VOL_NAME
| ATTR_VOL_CAPABILITIES
| ATTR_VOL_ATTRIBUTES
;
1015 fsap
->f_attributes
.validattr
.dirattr
= 0;
1016 fsap
->f_attributes
.validattr
.fileattr
= 0;
1017 fsap
->f_attributes
.validattr
.forkattr
= 0;
1019 fsap
->f_attributes
.nativeattr
.commonattr
= 0;
1020 fsap
->f_attributes
.nativeattr
.volattr
=
1021 ATTR_VOL_NAME
| ATTR_VOL_CAPABILITIES
| ATTR_VOL_ATTRIBUTES
;
1022 fsap
->f_attributes
.nativeattr
.dirattr
= 0;
1023 fsap
->f_attributes
.nativeattr
.fileattr
= 0;
1024 fsap
->f_attributes
.nativeattr
.forkattr
= 0;
1026 VFSATTR_SET_SUPPORTED(fsap
, f_attributes
);
1034 ffs_vfs_setattr(mp
, fsap
, context
)
1036 struct vfs_attr
*fsap
;
1037 vfs_context_t context
;
1039 struct ufsmount
*ump
;
1040 struct vnode
*devvp
;
1042 struct ufslabel
*ulp
;
1049 cred
= vfs_context_ucred(context
);
1051 if (VFSATTR_IS_ACTIVE(fsap
, f_vol_name
)) {
1052 devvp
= ump
->um_devvp
;
1053 bs
= vfs_devblocksize(mp
);
1054 if (error
= buf_meta_bread(devvp
,
1055 (daddr64_t
)(UFS_LABEL_OFFSET
/ bs
),
1056 MAX(bs
, UFS_LABEL_SIZE
), cred
, &bp
)) {
1063 * Since the disklabel is read directly by older user space
1064 * code, make sure this buffer won't remain in the cache when
1067 buf_setflags(bp
, B_NOCACHE
);
1069 /* Validate the label structure; init if not valid */
1070 offset
= buf_dataptr(bp
) + (UFS_LABEL_OFFSET
% bs
);
1071 ulp
= (struct ufslabel
*)offset
;
1072 if (!ufs_label_check(ulp
))
1073 ufs_label_init(ulp
);
1075 /* Copy new name over existing name */
1076 ulp
->ul_namelen
= strlen(fsap
->f_vol_name
);
1078 if (mp
->mnt_flag
& MNT_REVEND
)
1079 ulp
->ul_namelen
= NXSwapShort(ulp
->ul_namelen
);
1081 bcopy(fsap
->f_vol_name
, ulp
->ul_name
, ulp
->ul_namelen
);
1082 ulp
->ul_name
[UFS_MAX_LABEL_NAME
- 1] = '\0';
1083 ulp
->ul_name
[ulp
->ul_namelen
] = '\0';
1085 /* Update the checksum */
1086 ulp
->ul_checksum
= 0;
1087 ulp
->ul_checksum
= ul_cksum(ulp
, sizeof(*ulp
));
1089 /* Write the label back to disk */
1093 VFSATTR_SET_SUPPORTED(fsap
, f_vol_name
);
1098 struct ffs_sync_cargs
{
1099 vfs_context_t context
;
1106 ffs_sync_callback(struct vnode
*vp
, void *cargs
)
1109 struct ffs_sync_cargs
*args
;
1112 args
= (struct ffs_sync_cargs
*)cargs
;
1116 if ((ip
->i_flag
& (IN_ACCESS
| IN_CHANGE
| IN_MODIFIED
| IN_UPDATE
)) || vnode_hasdirtyblks(vp
)) {
1117 error
= VNOP_FSYNC(vp
, args
->waitfor
, args
->context
);
1120 args
->error
= error
;
1123 return (VNODE_RETURNED
);
1127 * Go through the disk queues to initiate sandbagged IO;
1128 * go through the inodes to write those that have been modified;
1129 * initiate the writing of the super block if it has been modified.
1131 * Note: we are always called with the filesystem marked `MPBUSY'.
1134 ffs_sync(mp
, waitfor
, context
)
1137 vfs_context_t context
;
1139 struct vnode
*nvp
, *vp
;
1140 struct ufsmount
*ump
= VFSTOUFS(mp
);
1143 int error
, allerror
= 0;
1144 struct ffs_sync_cargs args
;
1147 if (fs
->fs_fmod
!= 0 && fs
->fs_ronly
!= 0) { /* XXX */
1148 printf("fs = %s\n", fs
->fs_fsmnt
);
1149 panic("update: rofs mod");
1152 * Write back each (modified) inode.
1154 args
.context
= context
;
1155 args
.waitfor
= waitfor
;
1158 * ffs_sync_callback will be called for each vnode
1159 * hung off of this mount point... the vnode will be
1160 * properly referenced and unreferenced around the callback
1162 vnode_iterate(mp
, 0, ffs_sync_callback
, (void *)&args
);
1165 allerror
= args
.error
;
1168 * Force stale file system control information to be flushed.
1170 if (error
= VNOP_FSYNC(ump
->um_devvp
, waitfor
, context
))
1176 * Write back modified superblock.
1178 if (fs
->fs_fmod
!= 0) {
1181 fs
->fs_time
= tv
.tv_sec
;
1182 if (error
= ffs_sbupdate(ump
, waitfor
))
1189 * Look up a FFS dinode number to find its incore vnode, otherwise read it
1190 * in from disk. If it is in core, wait for the lock bit to clear, then
1191 * return the inode locked. Detection and handling of mount points must be
1192 * done by the calling routine.
1195 ffs_vget(mp
, ino
, vpp
, context
)
1199 vfs_context_t context
;
1201 return(ffs_vget_internal(mp
, (ino_t
)ino
, vpp
, NULL
, NULL
, 0, 0));
1206 ffs_vget_internal(mp
, ino
, vpp
, dvp
, cnp
, mode
, fhwanted
)
1211 struct componentname
*cnp
;
1215 struct proc
*p
= current_proc(); /* XXX */
1218 struct ufsmount
*ump
;
1221 struct vnode_fsparam vfsp
;
1225 int i
, type
, error
= 0;
1231 /* Check for unmount in progress */
1232 if (mp
->mnt_kern_flag
& MNTK_UNMOUNT
) {
1237 * Allocate a new inode... do it before we check the
1238 * cache, because the MALLOC_ZONE may block
1241 MALLOC_ZONE(ip
, struct inode
*, sizeof(struct inode
), type
, M_WAITOK
);
1244 * check in the inode hash
1246 if ((*vpp
= ufs_ihashget(dev
, ino
)) != NULL
) {
1248 * found it... get rid of the allocation
1249 * that we didn't need and return
1252 FREE_ZONE(ip
, sizeof(struct inode
), type
);
1256 bzero((caddr_t
)ip
, sizeof(struct inode
));
1260 // lockinit(&ip->i_lock, PINOD, "inode", 0, 0);
1261 // lockmgr(&ip->i_lock, LK_EXCLUSIVE, (struct slock *)0, p);
1263 ip
->i_fs
= fs
= ump
->um_fs
;
1267 for (i
= 0; i
< MAXQUOTAS
; i
++)
1268 ip
->i_dquot
[i
] = NODQUOT
;
1270 SET(ip
->i_flag
, IN_ALLOC
);
1272 * Put it onto its hash chain locked so that other requests for
1273 * this inode will block if they arrive while we are sleeping waiting
1274 * for old data structures to be purged or for the contents of the
1275 * disk portion of this inode to be read.
1279 /* Read in the disk contents for the inode, copy into the inode. */
1280 if (error
= (int)buf_bread(ump
->um_devvp
, (daddr64_t
)((unsigned)fsbtodb(fs
, ino_to_fsba(fs
, ino
))),
1281 (int)fs
->fs_bsize
, NOCRED
, &bp
)) {
1286 if (mp
->mnt_flag
& MNT_REVEND
) {
1287 byte_swap_inode_in(((struct dinode
*)buf_dataptr(bp
) + ino_to_fsbo(fs
, ino
)),ip
);
1289 ip
->i_din
= *((struct dinode
*)buf_dataptr(bp
) + ino_to_fsbo(fs
, ino
));
1292 ip
->i_din
= *((struct dinode
*)buf_dataptr(bp
) + ino_to_fsbo(fs
, ino
));
1293 #endif /* REV_ENDIAN_FS */
1297 vtype
= IFTOVT(ip
->i_mode
);
1299 vtype
= IFTOVT(mode
);
1301 if (vtype
== VNON
) {
1303 /* NFS is in play */
1313 vfsp
.vnfs_vtype
= vtype
;
1314 vfsp
.vnfs_str
= "ufs";
1315 vfsp
.vnfs_dvp
= dvp
;
1316 vfsp
.vnfs_fsnode
= ip
;
1317 vfsp
.vnfs_cnp
= cnp
;
1320 vfsp
.vnfs_filesize
= ip
->i_din
.di_size
;
1322 vfsp
.vnfs_filesize
= 0;
1324 if (vtype
== VFIFO
)
1325 vfsp
.vnfs_vops
= FFS_FIFOOPS
;
1326 else if (vtype
== VBLK
|| vtype
== VCHR
)
1327 vfsp
.vnfs_vops
= ffs_specop_p
;
1329 vfsp
.vnfs_vops
= ffs_vnodeop_p
;
1331 if (vtype
== VBLK
|| vtype
== VCHR
)
1332 vfsp
.vnfs_rdev
= ip
->i_rdev
;
1336 if (dvp
&& cnp
&& (cnp
->cn_flags
& MAKEENTRY
))
1337 vfsp
.vnfs_flags
= 0;
1339 vfsp
.vnfs_flags
= VNFS_NOCACHE
;
1342 * Tag root directory
1344 vfsp
.vnfs_markroot
= (ip
->i_number
== ROOTINO
);
1345 vfsp
.vnfs_marksystem
= 0;
1347 if ((error
= vnode_create(VNCREATE_FLAVOR
, VCREATESIZE
, &vfsp
, &vp
)))
1351 * Finish inode initialization now that aliasing has been resolved.
1353 ip
->i_devvp
= ump
->um_devvp
;
1356 vnode_ref(ip
->i_devvp
);
1358 vnode_settag(vp
, VT_UFS
);
1361 * Initialize modrev times
1364 SETHIGH(ip
->i_modrev
, tv
.tv_sec
);
1365 SETLOW(ip
->i_modrev
, tv
.tv_usec
* 4294);
1368 * Set up a generation number for this inode if it does not
1369 * already have one. This should only happen on old filesystems.
1371 if (ip
->i_gen
== 0) {
1372 if (++nextgennumber
< (u_long
)tv
.tv_sec
)
1373 nextgennumber
= tv
.tv_sec
;
1374 ip
->i_gen
= nextgennumber
;
1375 if ((vp
->v_mount
->mnt_flag
& MNT_RDONLY
) == 0)
1376 ip
->i_flag
|= IN_MODIFIED
;
1379 * Ensure that uid and gid are correct. This is a temporary
1380 * fix until fsck has been changed to do the update.
1382 if (fs
->fs_inodefmt
< FS_44INODEFMT
) { /* XXX */
1383 ip
->i_uid
= ip
->i_din
.di_ouid
; /* XXX */
1384 ip
->i_gid
= ip
->i_din
.di_ogid
; /* XXX */
1388 CLR(ip
->i_flag
, IN_ALLOC
);
1390 if (ISSET(ip
->i_flag
, IN_WALLOC
))
1398 if (ISSET(ip
->i_flag
, IN_WALLOC
))
1400 FREE_ZONE(ip
, sizeof(struct inode
), type
);
1406 * File handle to vnode
1408 * Have to be really careful about stale file handles:
1409 * - check that the inode number is valid
1410 * - call vget to get the locked inode
1411 * - check for an unallocated inode (i_mode == 0)
1414 ffs_fhtovp(mp
, fhlen
, fhp
, vpp
, context
)
1415 register struct mount
*mp
;
1419 vfs_context_t context
;
1421 register struct ufid
*ufhp
;
1422 register struct inode
*ip
;
1427 if (fhlen
< (int)sizeof(struct ufid
))
1429 ufhp
= (struct ufid
*)fhp
;
1430 fs
= VFSTOUFS(mp
)->um_fs
;
1431 if (ufhp
->ufid_ino
< ROOTINO
||
1432 ufhp
->ufid_ino
>= fs
->fs_ncg
* fs
->fs_ipg
)
1434 error
= ffs_vget_internal(mp
, ufhp
->ufid_ino
, &nvp
, NULL
, NULL
, 0, 1);
1440 if (ip
->i_mode
== 0 || ip
->i_gen
!= ufhp
->ufid_gen
) {
1450 * Vnode pointer to File handle
1454 ffs_vptofh(vp
, fhlenp
, fhp
, context
)
1458 vfs_context_t context
;
1460 register struct inode
*ip
;
1461 register struct ufid
*ufhp
;
1463 if (*fhlenp
< (int)sizeof(struct ufid
))
1466 ufhp
= (struct ufid
*)fhp
;
1467 ufhp
->ufid_ino
= ip
->i_number
;
1468 ufhp
->ufid_gen
= ip
->i_gen
;
1469 *fhlenp
= sizeof(struct ufid
);
1474 * Initialize the filesystem; just use ufs_init.
1478 struct vfsconf
*vfsp
;
1481 return (ufs_init(vfsp
));
1485 * fast filesystem related variables.
1487 ffs_sysctl(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
1488 user_addr_t newp
, size_t newlen
, vfs_context_t context
)
1490 extern int doclusterread
, doclusterwrite
, doreallocblks
, doasyncfree
;
1492 /* all sysctl names at this level are terminal */
1494 return (ENOTDIR
); /* overloaded */
1497 case FFS_CLUSTERREAD
:
1498 return (sysctl_int(oldp
, oldlenp
, newp
, newlen
,
1500 case FFS_CLUSTERWRITE
:
1501 return (sysctl_int(oldp
, oldlenp
, newp
, newlen
,
1503 case FFS_REALLOCBLKS
:
1504 return (sysctl_int(oldp
, oldlenp
, newp
, newlen
,
1507 return (sysctl_int(oldp
, oldlenp
, newp
, newlen
, &doasyncfree
));
1515 * Write a superblock and associated information back to disk.
1518 ffs_sbupdate(mp
, waitfor
)
1519 struct ufsmount
*mp
;
1522 register struct fs
*dfs
, *fs
= mp
->um_fs
;
1523 register struct buf
*bp
;
1526 int i
, size
, error
, allerror
= 0;
1529 int rev_endian
=(mp
->um_mountp
->mnt_flag
& MNT_REVEND
);
1530 #endif /* REV_ENDIAN_FS */
1533 * First write back the summary information.
1535 blks
= howmany(fs
->fs_cssize
, fs
->fs_fsize
);
1537 for (i
= 0; i
< blks
; i
+= fs
->fs_frag
) {
1538 size
= fs
->fs_bsize
;
1539 if (i
+ fs
->fs_frag
> blks
)
1540 size
= (blks
- i
) * fs
->fs_fsize
;
1541 bp
= buf_getblk(mp
->um_devvp
, (daddr64_t
)((unsigned)fsbtodb(fs
, fs
->fs_csaddr
+ i
)),
1542 size
, 0, 0, BLK_META
);
1543 bcopy(space
, (char *)buf_dataptr(bp
), (u_int
)size
);
1546 byte_swap_ints((int *)buf_dataptr(bp
), size
/ sizeof(int));
1548 #endif /* REV_ENDIAN_FS */
1549 space
= (char *)space
+ size
;
1550 if (waitfor
!= MNT_WAIT
)
1552 else if (error
= (int)buf_bwrite(bp
))
1556 * Now write back the superblock itself. If any errors occurred
1557 * up to this point, then fail so that the superblock avoids
1558 * being written out as clean.
1562 devBlockSize
= vfs_devblocksize(mp
->um_mountp
);
1564 bp
= buf_getblk(mp
->um_devvp
, (daddr64_t
)((unsigned)(SBOFF
/devBlockSize
)), (int)fs
->fs_sbsize
, 0, 0, BLK_META
);
1565 bcopy((caddr_t
)fs
, (char *)buf_dataptr(bp
), (u_int
)fs
->fs_sbsize
);
1566 /* Restore compatibility to old file systems. XXX */
1567 dfs
= (struct fs
*)buf_dataptr(bp
); /* XXX */
1568 if (fs
->fs_postblformat
== FS_42POSTBLFMT
) /* XXX */
1569 dfs
->fs_nrpos
= -1; /* XXX */
1572 * Swapping bytes here ; so that in case
1573 * of inode format < FS_44INODEFMT appropriate
1577 byte_swap_sbout((struct fs
*)buf_dataptr(bp
));
1579 #endif /* REV_ENDIAN_FS */
1580 if (fs
->fs_inodefmt
< FS_44INODEFMT
) { /* XXX */
1581 int32_t *lp
, tmp
; /* XXX */
1583 lp
= (int32_t *)&dfs
->fs_qbmask
; /* XXX */
1584 tmp
= lp
[4]; /* XXX */
1585 for (i
= 4; i
> 0; i
--) /* XXX */
1586 lp
[i
] = lp
[i
-1]; /* XXX */
1587 lp
[0] = tmp
; /* XXX */
1590 /* Note that dfs is already swapped so swap the filesize
1594 dfs
->fs_maxfilesize
= NXSwapLongLong(mp
->um_savedmaxfilesize
); /* XXX */
1596 #endif /* REV_ENDIAN_FS */
1597 dfs
->fs_maxfilesize
= mp
->um_savedmaxfilesize
; /* XXX */
1600 #endif /* REV_ENDIAN_FS */
1601 if (waitfor
!= MNT_WAIT
)
1603 else if (error
= (int)buf_bwrite(bp
))