3 * Copyright (c) 1988 University of Utah.
4 * Copyright (c) 1990, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * from: Utah Hdr: vn.c 1.13 94/04/02
41 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94
42 * $FreeBSD: src/sys/dev/vn/vn.c,v 1.105.2.4 2001/11/18 07:11:00 dillon Exp $
48 * Block/character interface to a vnode. Allows one to treat a file
49 * as a disk (e.g. build a filesystem in it, mount it, etc.).
51 * NOTE 1: This uses the VOP_BMAP/VOP_STRATEGY interface to the vnode
52 * instead of a simple VOP_RDWR. We do this to avoid distorting the
55 * NOTE 2: There is a security issue involved with this driver.
56 * Once mounted all access to the contents of the "mapped" file via
57 * the special file is controlled by the permissions on the special
58 * file, the protection of the mapped file is ignored (effectively,
59 * by using root credentials in all transactions).
61 * NOTE 3: Doesn't interact with leases, should it?
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/kernel.h>
71 #include <sys/mount.h>
72 #include <sys/namei.h>
75 #include <sys/malloc.h>
76 #include <sys/mount.h>
77 #include <sys/vnode.h>
78 #include <sys/fcntl.h>
84 #include <sys/vnioctl.h>
88 #include <vm/vm_pager.h>
89 #include <vm/vm_pageout.h>
90 #include <mach/memory_object_types.h>
92 #include <miscfs/devfs/devfs.h>
96 static ioctl_fcn_t vnioctl_chr
;
97 static ioctl_fcn_t vnioctl_blk
;
98 static open_close_fcn_t vnopen
;
99 static open_close_fcn_t vnclose
;
100 static psize_fcn_t vnsize
;
101 static strategy_fcn_t vnstrategy
;
102 static read_write_fcn_t vnread
;
103 static read_write_fcn_t vnwrite
;
105 static int vndevice_bdev_major
;
106 static int vndevice_cdev_major
;
110 * D_DISK we want to look like a disk
111 * D_CANFREE We support B_FREEBUF
114 static struct bdevsw vn_bdevsw
= {
117 /* strategy */ vnstrategy
,
118 /* ioctl */ vnioctl_blk
,
124 static struct cdevsw vn_cdevsw
= {
129 /* ioctl */ vnioctl_chr
,
131 /* reset */ eno_reset
,
133 /* select */ eno_select
,
135 /* strategy */ eno_strat
,
142 u_int64_t sc_fsize
; /* file size in bytes */
143 u_int64_t sc_size
; /* size of vn, sc_secsize scale */
144 int sc_flags
; /* flags */
145 int sc_secsize
; /* sector size */
146 struct vnode
*sc_vp
; /* vnode if not NULL */
148 struct vnode
*sc_shadow_vp
; /* shadow vnode if not NULL */
149 shadow_map_t
* sc_shadow_map
; /* shadow map if not NULL */
150 struct ucred
*sc_cred
; /* credentials */
151 u_long sc_options
; /* options */
154 } vn_table
[NVNDEVICE
];
156 #define ROOT_IMAGE_UNIT 0
159 #define VNF_INITED 0x01
160 #define VNF_READONLY 0x02
162 static u_long vn_options
;
164 #define IFOPT(vn,opt) if (((vn)->sc_options|vn_options) & (opt))
165 #define TESTOPT(vn,opt) (((vn)->sc_options|vn_options) & (opt))
167 static int vnsetcred (struct vn_softc
*vn
, struct proc
*p
);
168 static void vnclear (struct vn_softc
*vn
);
171 vniocattach_file(struct vn_softc
*vn
,
172 struct vn_ioctl
*vio
,
177 vniocattach_shadow(struct vn_softc
* vn
,
178 struct vn_ioctl
*vio
,
189 vnclose(dev_t dev
, int flags
, int devtype
, struct proc
*p
)
195 vnopen(dev_t dev
, int flags
, int devtype
, struct proc
*p
)
201 if (vnunit(dev
) >= NVNDEVICE
) {
204 vn
= vn_table
+ unit
;
205 if ((flags
& FWRITE
) && (vn
->sc_flags
& VNF_READONLY
))
212 vnread(dev_t dev
, struct uio
*uio
, int ioflag
)
214 struct proc
* p
= current_proc();
216 struct vn_softc
* vn
;
220 if (vnunit(dev
) >= NVNDEVICE
) {
223 vn
= vn_table
+ unit
;
224 if ((vn
->sc_flags
& VNF_INITED
) == 0) {
227 if (vn
->sc_shadow_vp
!= NULL
) {
230 vn_lock(vn
->sc_vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
231 status
= VOP_READ(vn
->sc_vp
, uio
, ioflag
, vn
->sc_cred
);
232 VOP_UNLOCK(vn
->sc_vp
, 0, p
);
238 vnwrite(dev_t dev
, struct uio
*uio
, int ioflag
)
240 struct proc
* p
= current_proc();
242 struct vn_softc
* vn
;
246 if (vnunit(dev
) >= NVNDEVICE
) {
249 vn
= vn_table
+ unit
;
250 if ((vn
->sc_flags
& VNF_INITED
) == 0) {
253 if (vn
->sc_shadow_vp
!= NULL
) {
256 if (vn
->sc_flags
& VNF_READONLY
) {
260 vn_lock(vn
->sc_vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
261 status
= VOP_WRITE(vn
->sc_vp
, uio
, ioflag
, vn
->sc_cred
);
262 VOP_UNLOCK(vn
->sc_vp
, 0, p
);
268 bp_is_mapped(struct buf
* bp
, vm_offset_t
* vaddr
)
270 boolean_t is_mapped
= FALSE
;
272 if (bp
->b_flags
& B_NEED_IODONE
) {
273 struct buf
* real_bp
= (struct buf
*)bp
->b_real_bp
;
275 if (real_bp
&& real_bp
->b_data
) {
276 *vaddr
= (vm_offset_t
)real_bp
->b_data
;
283 static __inline__
int
284 file_io(struct vnode
* vp
, struct ucred
* cred
,
285 enum uio_rw op
, char * base
, off_t offset
, long count
,
286 struct proc
* p
, long * resid
)
292 bzero(&auio
, sizeof(auio
));
293 aiov
.iov_base
= base
;
294 aiov
.iov_len
= count
;
295 auio
.uio_iov
= &aiov
;
297 auio
.uio_segflg
= UIO_SYSSPACE
;
298 auio
.uio_offset
= offset
;
300 auio
.uio_resid
= count
;
302 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
304 error
= VOP_READ(vp
, &auio
, IO_SYNC
, cred
);
306 error
= VOP_WRITE(vp
, &auio
, IO_SYNC
, cred
);
307 VOP_UNLOCK(vp
, 0, p
);
308 *resid
= auio
.uio_resid
;
313 shadow_read(struct vn_softc
* vn
, struct buf
* bp
, char * base
, struct proc
* p
)
317 boolean_t read_shadow
;
321 offset
= bp
->b_blkno
;
322 resid
= bp
->b_bcount
/ vn
->sc_secsize
;
330 read_shadow
= shadow_map_read(vn
->sc_shadow_map
,
332 &this_offset
, &this_resid
);
334 vp
= vn
->sc_shadow_vp
;
339 error
= file_io(vp
, vn
->sc_cred
, UIO_READ
, base
+ start
,
340 (off_t
)this_offset
* vn
->sc_secsize
,
341 this_resid
* vn
->sc_secsize
, p
, &temp_resid
);
344 temp_resid
= this_resid
- temp_resid
/ vn
->sc_secsize
;
345 if (temp_resid
== 0) {
346 static int printed
= 0;
347 printf("vn device: shadow_write zero length read (printed %d)\n", printed
);
352 offset
+= temp_resid
;
353 start
+= temp_resid
* vn
->sc_secsize
;;
355 bp
->b_resid
= resid
* vn
->sc_secsize
;
360 shadow_write(struct vn_softc
* vn
, struct buf
* bp
, char * base
,
365 boolean_t shadow_grew
;
369 offset
= bp
->b_blkno
;
370 resid
= bp
->b_bcount
/ vn
->sc_secsize
;
378 shadow_grew
= shadow_map_write(vn
->sc_shadow_map
,
380 &this_offset
, &this_resid
);
384 /* truncate the file to its new length before write */
385 size
= (off_t
)shadow_map_shadow_size(vn
->sc_shadow_map
)
387 vn_lock(vn
->sc_shadow_vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
388 VOP_TRUNCATE(vn
->sc_shadow_vp
, size
,
389 IO_SYNC
, vn
->sc_cred
, p
);
390 VOP_UNLOCK(vn
->sc_shadow_vp
, 0, p
);
393 error
= file_io(vn
->sc_shadow_vp
, vn
->sc_cred
, UIO_WRITE
,
395 (off_t
)this_offset
* vn
->sc_secsize
,
396 this_resid
* vn
->sc_secsize
, p
, &temp_resid
);
400 temp_resid
= this_resid
- temp_resid
/ vn
->sc_secsize
;
401 if (temp_resid
== 0) {
402 static int printed
= 0;
403 printf("vn device: shadow_write zero length write (printed %d)\n", printed
);
408 offset
+= temp_resid
;
409 start
+= temp_resid
* vn
->sc_secsize
;;
411 bp
->b_resid
= resid
* vn
->sc_secsize
;
416 vn_readwrite_io(struct vn_softc
* vn
, struct buf
* bp
)
420 boolean_t need_unmap
= FALSE
;
421 struct proc
* p
= current_proc();
422 vm_offset_t vaddr
= NULL
;
424 if (bp
->b_flags
& B_VECTORLIST
) {
425 if (bp_is_mapped(bp
, &vaddr
) == FALSE
) {
426 if (ubc_upl_map(bp
->b_pagelist
, &vaddr
)
428 panic("vn device: ubc_upl_map failed");
439 iov_base
= (caddr_t
)(vaddr
+ bp
->b_uploffset
);
441 iov_base
= bp
->b_data
;
442 if (vn
->sc_shadow_vp
== NULL
) {
443 error
= file_io(vn
->sc_vp
, vn
->sc_cred
,
444 bp
->b_flags
& B_READ
? UIO_READ
: UIO_WRITE
,
445 iov_base
, (off_t
)bp
->b_blkno
* vn
->sc_secsize
,
446 bp
->b_bcount
, p
, &bp
->b_resid
);
449 if (bp
->b_flags
& B_READ
)
450 error
= shadow_read(vn
, bp
, iov_base
, p
);
452 error
= shadow_write(vn
, bp
, iov_base
, p
);
458 ubc_upl_unmap(bp
->b_pagelist
);
464 vnstrategy(struct buf
*bp
)
468 long sz
; /* in sc_secsize chunks */
470 vn
= vn_table
+ vnunit(bp
->b_dev
);
471 if ((vn
->sc_flags
& VNF_INITED
) == 0) {
473 bp
->b_flags
|= B_ERROR
;
478 bp
->b_resid
= bp
->b_bcount
;
480 * Check for required alignment. Transfers must be a valid
481 * multiple of the sector size.
483 if (bp
->b_bcount
% vn
->sc_secsize
!= 0 ||
484 bp
->b_blkno
% (vn
->sc_secsize
/ DEV_BSIZE
) != 0) {
485 bp
->b_error
= EINVAL
;
486 bp
->b_flags
|= B_ERROR
| B_INVAL
;
490 sz
= howmany(bp
->b_bcount
, vn
->sc_secsize
);
493 * If out of bounds return an error. If at the EOF point,
494 * simply read or write less.
496 if (bp
->b_blkno
>= vn
->sc_size
) {
497 bp
->b_error
= EINVAL
;
498 bp
->b_flags
|= B_ERROR
| B_INVAL
;
503 * If the request crosses EOF, truncate the request.
505 if ((bp
->b_blkno
+ sz
) > vn
->sc_size
) {
506 bp
->b_bcount
= (vn
->sc_size
- bp
->b_blkno
) * vn
->sc_secsize
;
507 bp
->b_resid
= bp
->b_bcount
;
511 error
= vn_readwrite_io(vn
, bp
);
514 bp
->b_flags
|= B_ERROR
;
519 bp
->b_flags
|= B_ERROR
;
520 bp
->b_error
= EINVAL
;
527 vnioctl(dev_t dev
, u_long cmd
, caddr_t data
, int flag
, struct proc
*p
,
531 struct vn_ioctl
*vio
;
538 if (vnunit(dev
) >= NVNDEVICE
) {
541 vn
= vn_table
+ unit
;
542 error
= suser(p
->p_ucred
, &p
->p_acflag
);
546 vio
= (struct vn_ioctl
*)data
;
548 o
= (u_int64_t
*)data
;
551 case DKIOCGETMAXBLOCKCOUNTREAD
:
552 case DKIOCGETMAXBLOCKCOUNTWRITE
:
553 case DKIOCGETMAXSEGMENTCOUNTREAD
:
554 case DKIOCGETMAXSEGMENTCOUNTWRITE
:
555 case DKIOCGETBLOCKCOUNT32
:
556 if ((vn
->sc_flags
& VNF_INITED
) == 0) {
564 case DKIOCGETMAXBLOCKCOUNTREAD
:
565 *o
= vn
->sc_vp
->v_mount
->mnt_maxreadcnt
/ vn
->sc_secsize
;
567 case DKIOCGETMAXBLOCKCOUNTWRITE
:
568 *o
= vn
->sc_vp
->v_mount
->mnt_maxwritecnt
/ vn
->sc_secsize
;
570 case DKIOCGETMAXSEGMENTCOUNTREAD
:
571 *o
= vn
->sc_vp
->v_mount
->mnt_segreadcnt
;
573 case DKIOCGETMAXSEGMENTCOUNTWRITE
:
574 *o
= vn
->sc_vp
->v_mount
->mnt_segwritecnt
;
576 case DKIOCGETBLOCKSIZE
:
579 case DKIOCSETBLOCKSIZE
:
581 /* can only set block size on block device */
584 if (vn
->sc_shadow_vp
!= NULL
) {
585 /* can't set the block size if already shadowing */
588 if (*f
< DEV_BSIZE
) {
592 /* recompute the size in terms of the new blocksize */
593 vn
->sc_size
= vn
->sc_fsize
/ vn
->sc_secsize
;
595 case DKIOCISWRITABLE
:
598 case DKIOCGETBLOCKCOUNT32
:
601 case DKIOCGETBLOCKCOUNT64
:
605 if (vn
->sc_shadow_vp
!= NULL
) {
608 if (vn
->sc_vp
== NULL
) {
609 /* much be attached before we can shadow */
612 if (vio
->vn_file
== NULL
) {
615 error
= vniocattach_shadow(vn
, vio
, dev
, 0, p
);
620 /* attach only on block device */
623 if (vn
->sc_flags
& VNF_INITED
) {
626 if (vio
->vn_file
== NULL
) {
629 error
= vniocattach_file(vn
, vio
, dev
, 0, p
);
634 /* detach only on block device */
637 /* Note: spec_open won't open a mounted block device */
640 * XXX handle i/o in progress. Return EBUSY, or wait, or
642 * XXX handle multiple opens of the device. Return EBUSY,
643 * or revoke the fd's.
644 * How are these problems handled for removable and failing
645 * hardware devices? (Hint: They are not)
661 vn
->sc_options
|= *f
;
666 vn
->sc_options
&= ~(*f
);
678 vnioctl_chr(dev_t dev
, u_long cmd
, caddr_t data
, int flag
, struct proc
*p
)
680 return (vnioctl(dev
, cmd
, data
, flag
, p
, TRUE
));
684 vnioctl_blk(dev_t dev
, u_long cmd
, caddr_t data
, int flag
, struct proc
*p
)
686 return (vnioctl(dev
, cmd
, data
, flag
, p
, FALSE
));
692 * Attach a file to a VN partition. Return the size in the vn_size
697 vniocattach_file(struct vn_softc
*vn
,
698 struct vn_ioctl
*vio
,
707 flags
= FREAD
|FWRITE
;
709 NDINIT(&nd
, LOOKUP
, FOLLOW
, UIO_SYSSPACE
, vio
->vn_file
, p
);
712 NDINIT(&nd
, LOOKUP
, FOLLOW
, UIO_USERSPACE
, vio
->vn_file
, p
);
714 error
= vn_open(&nd
, flags
, 0);
716 if (error
!= EACCES
&& error
!= EPERM
&& error
!= EROFS
)
720 NDINIT(&nd
, LOOKUP
, FOLLOW
, UIO_SYSSPACE
,
724 NDINIT(&nd
, LOOKUP
, FOLLOW
, UIO_USERSPACE
,
727 error
= vn_open(&nd
, flags
, 0);
731 if (nd
.ni_vp
->v_type
!= VREG
) {
734 else if (ubc_isinuse(nd
.ni_vp
, 1)) {
738 error
= VOP_GETATTR(nd
.ni_vp
, &vattr
, p
->p_ucred
, p
);
741 VOP_UNLOCK(nd
.ni_vp
, 0, p
);
742 (void) vn_close(nd
.ni_vp
, flags
, p
->p_ucred
, p
);
745 vn
->sc_vp
= nd
.ni_vp
;
746 vn
->sc_vp
->v_flag
|= VNOCACHE_DATA
;
747 VOP_UNLOCK(nd
.ni_vp
, 0, p
);
749 vn
->sc_open_flags
= flags
;
752 * If the size is specified, override the file attributes. Note that
753 * the vn_size argument is in PAGE_SIZE sized blocks.
757 vn
->sc_size
= (quad_t
)vio
->vn_size
* PAGE_SIZE
/ vn
->sc_secsize
;
759 vn
->sc_size
= vattr
.va_size
/ vn
->sc_secsize
;
761 vn
->sc_secsize
= DEV_BSIZE
;
762 vn
->sc_fsize
= vattr
.va_size
;
763 vn
->sc_size
= vattr
.va_size
/ vn
->sc_secsize
;
764 error
= vnsetcred(vn
, p
);
766 (void) vn_close(nd
.ni_vp
, flags
, p
->p_ucred
, p
);
770 dev_t cdev
= makedev(vndevice_cdev_major
,
772 vn
->sc_cdev
= devfs_make_node(cdev
, DEVFS_CHAR
,
773 UID_ROOT
, GID_OPERATOR
,
777 vn
->sc_flags
|= VNF_INITED
;
779 vn
->sc_flags
|= VNF_READONLY
;
784 vniocattach_shadow(vn
, vio
, dev
, in_kernel
, p
)
786 struct vn_ioctl
*vio
;
796 flags
= FREAD
|FWRITE
;
798 NDINIT(&nd
, LOOKUP
, FOLLOW
, UIO_SYSSPACE
, vio
->vn_file
, p
);
801 NDINIT(&nd
, LOOKUP
, FOLLOW
, UIO_USERSPACE
, vio
->vn_file
, p
);
803 error
= vn_open(&nd
, flags
, 0);
805 /* shadow MUST be writable! */
808 if (nd
.ni_vp
->v_type
!= VREG
||
809 (error
= VOP_GETATTR(nd
.ni_vp
, &vattr
, p
->p_ucred
, p
))) {
810 VOP_UNLOCK(nd
.ni_vp
, 0, p
);
811 (void) vn_close(nd
.ni_vp
, flags
, p
->p_ucred
, p
);
812 return (error
? error
: EINVAL
);
814 vn
->sc_shadow_vp
= nd
.ni_vp
;
815 vn
->sc_shadow_vp
->v_flag
|= VNOCACHE_DATA
;
816 VOP_UNLOCK(nd
.ni_vp
, 0, p
);
818 map
= shadow_map_create(vn
->sc_fsize
, vattr
.va_size
,
821 (void) vn_close(nd
.ni_vp
, flags
, p
->p_ucred
, p
);
822 vn
->sc_shadow_vp
= NULL
;
825 vn
->sc_shadow_map
= map
;
826 vn
->sc_flags
&= ~VNF_READONLY
; /* we're now read/write */
831 vndevice_root_image(char * path
, char devname
[], dev_t
* dev_p
)
835 struct vn_softc
* vn
;
841 vn
= vn_table
+ ROOT_IMAGE_UNIT
;
842 *dev_p
= makedev(vndevice_bdev_major
,
844 sprintf(devname
, "vn%d", ROOT_IMAGE_UNIT
);
845 error
= vniocattach_file(vn
, &vio
, *dev_p
, 1, current_proc());
850 * Duplicate the current processes' credentials. Since we are called only
851 * as the result of a SET ioctl and only root can do that, any future access
852 * to this "disk" is essentially as root. Note that credentials may change
853 * if some other uid can write directly to the mapped file (NFS).
856 vnsetcred(struct vn_softc
*vn
, struct proc
* p
)
860 struct proc
* current_proc();
861 struct ucred
* cred
= p
->p_ucred
;
864 * Set credits in our softc
869 vn
->sc_cred
= crdup(cred
);
872 * Horrible kludge to establish credentials for NFS XXX.
879 tmpbuf
= _MALLOC(vn
->sc_secsize
, M_TEMP
, M_WAITOK
);
880 bzero(&auio
, sizeof(auio
));
882 aiov
.iov_base
= tmpbuf
;
883 aiov
.iov_len
= vn
->sc_secsize
;
884 auio
.uio_iov
= &aiov
;
887 auio
.uio_rw
= UIO_READ
;
888 auio
.uio_segflg
= UIO_SYSSPACE
;
889 auio
.uio_resid
= aiov
.iov_len
;
890 vn_lock(vn
->sc_vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
891 error
= VOP_READ(vn
->sc_vp
, &auio
, 0, vn
->sc_cred
);
892 VOP_UNLOCK(vn
->sc_vp
, 0, p
);
893 FREE(tmpbuf
, M_TEMP
);
899 vnclear(struct vn_softc
*vn
)
902 struct proc
* p
= current_proc(); /* XXX */
904 if (vn
->sc_vp
!= NULL
) {
905 (void)vn_close(vn
->sc_vp
, vn
->sc_open_flags
, vn
->sc_cred
, p
);
908 if (vn
->sc_shadow_vp
!= NULL
) {
909 (void)vn_close(vn
->sc_shadow_vp
, FREAD
| FWRITE
,
911 vn
->sc_shadow_vp
= NULL
;
913 if (vn
->sc_shadow_map
!= NULL
) {
914 shadow_map_free(vn
->sc_shadow_map
);
915 vn
->sc_shadow_map
= NULL
;
917 vn
->sc_flags
= ~(VNF_INITED
| VNF_READONLY
);
925 devfs_remove(vn
->sc_cdev
);
937 if (vnunit(dev
) >= NVNDEVICE
) {
940 vn
= vn_table
+ unit
;
942 if ((vn
->sc_flags
& VNF_INITED
) == 0)
945 return(vn
->sc_secsize
);
948 #define CDEV_MAJOR -1
949 #define BDEV_MAJOR -1
950 static int vndevice_inited
= 0;
959 vndevice_bdev_major
= bdevsw_add(BDEV_MAJOR
, &vn_bdevsw
);
961 if (vndevice_bdev_major
< 0) {
962 printf("vndevice_init: bdevsw_add() returned %d\n",
963 vndevice_bdev_major
);
966 vndevice_cdev_major
= cdevsw_add_with_bdev(CDEV_MAJOR
, &vn_cdevsw
,
967 vndevice_bdev_major
);
968 if (vndevice_cdev_major
< 0) {
969 printf("vndevice_init: cdevsw_add() returned %d\n",
970 vndevice_cdev_major
);
973 for (i
= 0; i
< NVNDEVICE
; i
++) {
974 dev_t dev
= makedev(vndevice_bdev_major
, i
);
975 vn_table
[i
].sc_bdev
= devfs_make_node(dev
, DEVFS_BLOCK
,
976 UID_ROOT
, GID_OPERATOR
,
979 if (vn_table
[i
].sc_bdev
== NULL
)
980 printf("vninit: devfs_make_node failed!\n");