3 * Copyright (c) 1988 University of Utah.
4 * Copyright (c) 1990, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * from: Utah Hdr: vn.c 1.13 94/04/02
41 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94
42 * $FreeBSD: src/sys/dev/vn/vn.c,v 1.105.2.4 2001/11/18 07:11:00 dillon Exp $
48 * Block/character interface to a vnode. Allows one to treat a file
49 * as a disk (e.g. build a filesystem in it, mount it, etc.).
51 * NOTE 1: This uses the VOP_BMAP/VOP_STRATEGY interface to the vnode
52 * instead of a simple VOP_RDWR. We do this to avoid distorting the
55 * NOTE 2: There is a security issue involved with this driver.
56 * Once mounted all access to the contents of the "mapped" file via
57 * the special file is controlled by the permissions on the special
58 * file, the protection of the mapped file is ignored (effectively,
59 * by using root credentials in all transactions).
61 * NOTE 3: Doesn't interact with leases, should it?
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/kernel.h>
71 #include <sys/mount.h>
72 #include <sys/namei.h>
75 #include <sys/malloc.h>
76 #include <sys/vnode.h>
77 #include <sys/fcntl.h>
83 #include <sys/vnioctl.h>
87 #include <vm/vm_pager.h>
88 #include <vm/vm_pageout.h>
89 #include <mach/memory_object_types.h>
91 #include <miscfs/devfs/devfs.h>
94 vfs_io_maxsegsize(struct vnode
*vp
,
95 int flags
, /* B_READ or B_WRITE */
99 vfs_io_attributes(struct vnode
*vp
,
100 int flags
, /* B_READ or B_WRITE */
106 static ioctl_fcn_t vnioctl_chr
;
107 static ioctl_fcn_t vnioctl_blk
;
108 static open_close_fcn_t vnopen
;
109 static open_close_fcn_t vnclose
;
110 static psize_fcn_t vnsize
;
111 static strategy_fcn_t vnstrategy
;
112 static read_write_fcn_t vnread
;
113 static read_write_fcn_t vnwrite
;
115 static int vndevice_bdev_major
;
116 static int vndevice_cdev_major
;
120 * D_DISK we want to look like a disk
121 * D_CANFREE We support B_FREEBUF
124 static struct bdevsw vn_bdevsw
= {
127 /* strategy */ vnstrategy
,
128 /* ioctl */ vnioctl_blk
,
134 static struct cdevsw vn_cdevsw
= {
139 /* ioctl */ vnioctl_chr
,
141 /* reset */ eno_reset
,
143 /* select */ eno_select
,
145 /* strategy */ eno_strat
,
152 u_int64_t sc_fsize
; /* file size in bytes */
153 u_int64_t sc_size
; /* size of vn, sc_secsize scale */
154 int sc_flags
; /* flags */
155 int sc_secsize
; /* sector size */
156 struct vnode
*sc_vp
; /* vnode if not NULL */
158 struct vnode
*sc_shadow_vp
; /* shadow vnode if not NULL */
159 shadow_map_t
* sc_shadow_map
; /* shadow map if not NULL */
160 struct ucred
*sc_cred
; /* credentials */
161 u_long sc_options
; /* options */
164 } vn_table
[NVNDEVICE
];
166 #define ROOT_IMAGE_UNIT 0
169 #define VNF_INITED 0x01
170 #define VNF_READONLY 0x02
172 static u_long vn_options
;
174 #define IFOPT(vn,opt) if (((vn)->sc_options|vn_options) & (opt))
175 #define TESTOPT(vn,opt) (((vn)->sc_options|vn_options) & (opt))
177 static int vnsetcred (struct vn_softc
*vn
, struct proc
*p
);
178 static void vnclear (struct vn_softc
*vn
);
181 vniocattach_file(struct vn_softc
*vn
,
182 struct vn_ioctl
*vio
,
187 vniocattach_shadow(struct vn_softc
* vn
,
188 struct vn_ioctl
*vio
,
199 vnclose(dev_t dev
, int flags
, int devtype
, struct proc
*p
)
205 vnopen(dev_t dev
, int flags
, int devtype
, struct proc
*p
)
211 if (vnunit(dev
) >= NVNDEVICE
) {
214 vn
= vn_table
+ unit
;
215 if ((flags
& FWRITE
) && (vn
->sc_flags
& VNF_READONLY
))
222 vnread(dev_t dev
, struct uio
*uio
, int ioflag
)
224 struct proc
* p
= current_proc();
226 struct vn_softc
* vn
;
230 if (vnunit(dev
) >= NVNDEVICE
) {
233 vn
= vn_table
+ unit
;
234 if ((vn
->sc_flags
& VNF_INITED
) == 0) {
237 if (vn
->sc_shadow_vp
!= NULL
) {
240 vn_lock(vn
->sc_vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
241 status
= VOP_READ(vn
->sc_vp
, uio
, ioflag
, vn
->sc_cred
);
242 VOP_UNLOCK(vn
->sc_vp
, 0, p
);
248 vnwrite(dev_t dev
, struct uio
*uio
, int ioflag
)
250 struct proc
* p
= current_proc();
252 struct vn_softc
* vn
;
256 if (vnunit(dev
) >= NVNDEVICE
) {
259 vn
= vn_table
+ unit
;
260 if ((vn
->sc_flags
& VNF_INITED
) == 0) {
263 if (vn
->sc_shadow_vp
!= NULL
) {
266 if (vn
->sc_flags
& VNF_READONLY
) {
270 vn_lock(vn
->sc_vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
271 status
= VOP_WRITE(vn
->sc_vp
, uio
, ioflag
, vn
->sc_cred
);
272 VOP_UNLOCK(vn
->sc_vp
, 0, p
);
278 bp_is_mapped(struct buf
* bp
, vm_offset_t
* vaddr
)
280 boolean_t is_mapped
= FALSE
;
282 if (bp
->b_flags
& B_NEED_IODONE
) {
283 struct buf
* real_bp
= (struct buf
*)bp
->b_real_bp
;
285 if (real_bp
&& real_bp
->b_data
) {
286 *vaddr
= (vm_offset_t
)real_bp
->b_data
;
293 static __inline__
int
294 file_io(struct vnode
* vp
, struct ucred
* cred
,
295 enum uio_rw op
, char * base
, off_t offset
, long count
,
296 struct proc
* p
, long * resid
)
302 bzero(&auio
, sizeof(auio
));
303 aiov
.iov_base
= base
;
304 aiov
.iov_len
= count
;
305 auio
.uio_iov
= &aiov
;
307 auio
.uio_segflg
= UIO_SYSSPACE
;
308 auio
.uio_offset
= offset
;
310 auio
.uio_resid
= count
;
312 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
314 error
= VOP_READ(vp
, &auio
, IO_SYNC
, cred
);
316 error
= VOP_WRITE(vp
, &auio
, IO_SYNC
, cred
);
317 VOP_UNLOCK(vp
, 0, p
);
318 *resid
= auio
.uio_resid
;
323 shadow_read(struct vn_softc
* vn
, struct buf
* bp
, char * base
, struct proc
* p
)
327 boolean_t read_shadow
;
331 offset
= bp
->b_blkno
;
332 resid
= bp
->b_bcount
/ vn
->sc_secsize
;
340 read_shadow
= shadow_map_read(vn
->sc_shadow_map
,
342 &this_offset
, &this_resid
);
344 vp
= vn
->sc_shadow_vp
;
349 error
= file_io(vp
, vn
->sc_cred
, UIO_READ
, base
+ start
,
350 (off_t
)this_offset
* vn
->sc_secsize
,
351 this_resid
* vn
->sc_secsize
, p
, &temp_resid
);
354 temp_resid
= this_resid
- temp_resid
/ vn
->sc_secsize
;
355 if (temp_resid
== 0) {
356 static int printed
= 0;
357 printf("vn device: shadow_write zero length read (printed %d)\n", printed
);
362 offset
+= temp_resid
;
363 start
+= temp_resid
* vn
->sc_secsize
;;
365 bp
->b_resid
= resid
* vn
->sc_secsize
;
370 shadow_write(struct vn_softc
* vn
, struct buf
* bp
, char * base
,
375 boolean_t shadow_grew
;
379 offset
= bp
->b_blkno
;
380 resid
= bp
->b_bcount
/ vn
->sc_secsize
;
388 shadow_grew
= shadow_map_write(vn
->sc_shadow_map
,
390 &this_offset
, &this_resid
);
394 /* truncate the file to its new length before write */
395 size
= (off_t
)shadow_map_shadow_size(vn
->sc_shadow_map
)
397 vn_lock(vn
->sc_shadow_vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
398 VOP_TRUNCATE(vn
->sc_shadow_vp
, size
,
399 IO_SYNC
, vn
->sc_cred
, p
);
400 VOP_UNLOCK(vn
->sc_shadow_vp
, 0, p
);
403 error
= file_io(vn
->sc_shadow_vp
, vn
->sc_cred
, UIO_WRITE
,
405 (off_t
)this_offset
* vn
->sc_secsize
,
406 this_resid
* vn
->sc_secsize
, p
, &temp_resid
);
410 temp_resid
= this_resid
- temp_resid
/ vn
->sc_secsize
;
411 if (temp_resid
== 0) {
412 static int printed
= 0;
413 printf("vn device: shadow_write zero length write (printed %d)\n", printed
);
418 offset
+= temp_resid
;
419 start
+= temp_resid
* vn
->sc_secsize
;;
421 bp
->b_resid
= resid
* vn
->sc_secsize
;
426 vn_readwrite_io(struct vn_softc
* vn
, struct buf
* bp
)
430 boolean_t need_unmap
= FALSE
;
431 struct proc
* p
= current_proc();
432 vm_offset_t vaddr
= NULL
;
434 if (bp
->b_flags
& B_VECTORLIST
) {
435 if (bp_is_mapped(bp
, &vaddr
) == FALSE
) {
436 if (ubc_upl_map(bp
->b_pagelist
, &vaddr
)
438 panic("vn device: ubc_upl_map failed");
449 iov_base
= (caddr_t
)(vaddr
+ bp
->b_uploffset
);
451 iov_base
= bp
->b_data
;
452 if (vn
->sc_shadow_vp
== NULL
) {
453 error
= file_io(vn
->sc_vp
, vn
->sc_cred
,
454 bp
->b_flags
& B_READ
? UIO_READ
: UIO_WRITE
,
455 iov_base
, (off_t
)bp
->b_blkno
* vn
->sc_secsize
,
456 bp
->b_bcount
, p
, &bp
->b_resid
);
459 if (bp
->b_flags
& B_READ
)
460 error
= shadow_read(vn
, bp
, iov_base
, p
);
462 error
= shadow_write(vn
, bp
, iov_base
, p
);
468 ubc_upl_unmap(bp
->b_pagelist
);
474 vnstrategy(struct buf
*bp
)
478 long sz
; /* in sc_secsize chunks */
480 vn
= vn_table
+ vnunit(bp
->b_dev
);
481 if ((vn
->sc_flags
& VNF_INITED
) == 0) {
483 bp
->b_flags
|= B_ERROR
;
488 bp
->b_resid
= bp
->b_bcount
;
490 * Check for required alignment. Transfers must be a valid
491 * multiple of the sector size.
493 if (bp
->b_bcount
% vn
->sc_secsize
!= 0 ||
494 bp
->b_blkno
% (vn
->sc_secsize
/ DEV_BSIZE
) != 0) {
495 bp
->b_error
= EINVAL
;
496 bp
->b_flags
|= B_ERROR
| B_INVAL
;
500 sz
= howmany(bp
->b_bcount
, vn
->sc_secsize
);
503 * If out of bounds return an error. If at the EOF point,
504 * simply read or write less.
506 if (bp
->b_blkno
>= vn
->sc_size
) {
507 if (bp
->b_blkno
> vn
->sc_size
) {
508 bp
->b_error
= EINVAL
;
509 bp
->b_flags
|= B_ERROR
| B_INVAL
;
515 * If the request crosses EOF, truncate the request.
517 if ((bp
->b_blkno
+ sz
) > vn
->sc_size
) {
518 bp
->b_bcount
= (vn
->sc_size
- bp
->b_blkno
) * vn
->sc_secsize
;
519 bp
->b_resid
= bp
->b_bcount
;
523 error
= vn_readwrite_io(vn
, bp
);
526 bp
->b_flags
|= B_ERROR
;
531 bp
->b_flags
|= B_ERROR
;
532 bp
->b_error
= EINVAL
;
539 vnioctl(dev_t dev
, u_long cmd
, caddr_t data
, int flag
, struct proc
*p
,
543 struct vn_ioctl
*vio
;
552 if (vnunit(dev
) >= NVNDEVICE
) {
555 vn
= vn_table
+ unit
;
556 error
= suser(p
->p_ucred
, &p
->p_acflag
);
560 vio
= (struct vn_ioctl
*)data
;
562 o
= (u_int64_t
*)data
;
565 case DKIOCGETBLOCKSIZE
:
566 case DKIOCSETBLOCKSIZE
:
567 case DKIOCGETMAXBLOCKCOUNTREAD
:
568 case DKIOCGETMAXBLOCKCOUNTWRITE
:
569 case DKIOCGETMAXSEGMENTCOUNTREAD
:
570 case DKIOCGETMAXSEGMENTCOUNTWRITE
:
571 case DKIOCGETMAXSEGMENTBYTECOUNTREAD
:
572 case DKIOCGETMAXSEGMENTBYTECOUNTWRITE
:
573 case DKIOCGETBLOCKCOUNT
:
574 case DKIOCGETBLOCKCOUNT32
:
575 if ((vn
->sc_flags
& VNF_INITED
) == 0) {
583 case DKIOCGETMAXBLOCKCOUNTREAD
:
584 vfs_io_attributes(vn
->sc_vp
, B_READ
, &size
, &num
);
585 *o
= size
/ vn
->sc_secsize
;
587 case DKIOCGETMAXBLOCKCOUNTWRITE
:
588 vfs_io_attributes(vn
->sc_vp
, B_WRITE
, &size
, &num
);
589 *o
= size
/ vn
->sc_secsize
;
591 case DKIOCGETMAXBYTECOUNTREAD
:
592 vfs_io_attributes(vn
->sc_vp
, B_READ
, &size
, &num
);
595 case DKIOCGETMAXBYTECOUNTWRITE
:
596 vfs_io_attributes(vn
->sc_vp
, B_WRITE
, &size
, &num
);
599 case DKIOCGETMAXSEGMENTCOUNTREAD
:
600 vfs_io_attributes(vn
->sc_vp
, B_READ
, &size
, &num
);
603 case DKIOCGETMAXSEGMENTCOUNTWRITE
:
604 vfs_io_attributes(vn
->sc_vp
, B_WRITE
, &size
, &num
);
607 case DKIOCGETMAXSEGMENTBYTECOUNTREAD
:
608 vfs_io_maxsegsize(vn
->sc_vp
, B_READ
, &size
);
611 case DKIOCGETMAXSEGMENTBYTECOUNTWRITE
:
612 vfs_io_maxsegsize(vn
->sc_vp
, B_WRITE
, &size
);
615 case DKIOCGETBLOCKSIZE
:
618 case DKIOCSETBLOCKSIZE
:
620 /* can only set block size on block device */
623 if (vn
->sc_shadow_vp
!= NULL
) {
624 /* can't set the block size if already shadowing */
627 if (*f
< DEV_BSIZE
) {
631 /* recompute the size in terms of the new blocksize */
632 vn
->sc_size
= vn
->sc_fsize
/ vn
->sc_secsize
;
634 case DKIOCISWRITABLE
:
637 case DKIOCGETBLOCKCOUNT32
:
640 case DKIOCGETBLOCKCOUNT
:
644 if (vn
->sc_shadow_vp
!= NULL
) {
647 if (vn
->sc_vp
== NULL
) {
648 /* much be attached before we can shadow */
651 if (vio
->vn_file
== NULL
) {
654 error
= vniocattach_shadow(vn
, vio
, dev
, 0, p
);
659 /* attach only on block device */
662 if (vn
->sc_flags
& VNF_INITED
) {
665 if (vio
->vn_file
== NULL
) {
668 error
= vniocattach_file(vn
, vio
, dev
, 0, p
);
673 /* detach only on block device */
676 /* Note: spec_open won't open a mounted block device */
679 * XXX handle i/o in progress. Return EBUSY, or wait, or
681 * XXX handle multiple opens of the device. Return EBUSY,
682 * or revoke the fd's.
683 * How are these problems handled for removable and failing
684 * hardware devices? (Hint: They are not)
700 vn
->sc_options
|= *f
;
705 vn
->sc_options
&= ~(*f
);
717 vnioctl_chr(dev_t dev
, u_long cmd
, caddr_t data
, int flag
, struct proc
*p
)
719 return (vnioctl(dev
, cmd
, data
, flag
, p
, TRUE
));
723 vnioctl_blk(dev_t dev
, u_long cmd
, caddr_t data
, int flag
, struct proc
*p
)
725 return (vnioctl(dev
, cmd
, data
, flag
, p
, FALSE
));
731 * Attach a file to a VN partition. Return the size in the vn_size
736 vniocattach_file(struct vn_softc
*vn
,
737 struct vn_ioctl
*vio
,
746 flags
= FREAD
|FWRITE
;
748 NDINIT(&nd
, LOOKUP
, FOLLOW
, UIO_SYSSPACE
, vio
->vn_file
, p
);
751 NDINIT(&nd
, LOOKUP
, FOLLOW
, UIO_USERSPACE
, vio
->vn_file
, p
);
753 error
= vn_open(&nd
, flags
, 0);
755 if (error
!= EACCES
&& error
!= EPERM
&& error
!= EROFS
)
759 NDINIT(&nd
, LOOKUP
, FOLLOW
, UIO_SYSSPACE
,
763 NDINIT(&nd
, LOOKUP
, FOLLOW
, UIO_USERSPACE
,
766 error
= vn_open(&nd
, flags
, 0);
770 if (nd
.ni_vp
->v_type
!= VREG
) {
773 else if (ubc_isinuse(nd
.ni_vp
, 1)) {
777 error
= VOP_GETATTR(nd
.ni_vp
, &vattr
, p
->p_ucred
, p
);
780 VOP_UNLOCK(nd
.ni_vp
, 0, p
);
781 (void) vn_close(nd
.ni_vp
, flags
, p
->p_ucred
, p
);
784 vn
->sc_vp
= nd
.ni_vp
;
785 vn
->sc_vp
->v_flag
|= VNOCACHE_DATA
;
786 VOP_UNLOCK(nd
.ni_vp
, 0, p
);
788 vn
->sc_open_flags
= flags
;
791 * If the size is specified, override the file attributes. Note that
792 * the vn_size argument is in PAGE_SIZE sized blocks.
796 vn
->sc_size
= (quad_t
)vio
->vn_size
* PAGE_SIZE
/ vn
->sc_secsize
;
798 vn
->sc_size
= vattr
.va_size
/ vn
->sc_secsize
;
800 vn
->sc_secsize
= DEV_BSIZE
;
801 vn
->sc_fsize
= vattr
.va_size
;
802 vn
->sc_size
= vattr
.va_size
/ vn
->sc_secsize
;
803 error
= vnsetcred(vn
, p
);
805 (void) vn_close(nd
.ni_vp
, flags
, p
->p_ucred
, p
);
809 dev_t cdev
= makedev(vndevice_cdev_major
,
811 vn
->sc_cdev
= devfs_make_node(cdev
, DEVFS_CHAR
,
812 UID_ROOT
, GID_OPERATOR
,
816 vn
->sc_flags
|= VNF_INITED
;
818 vn
->sc_flags
|= VNF_READONLY
;
823 vniocattach_shadow(vn
, vio
, dev
, in_kernel
, p
)
825 struct vn_ioctl
*vio
;
835 flags
= FREAD
|FWRITE
;
837 NDINIT(&nd
, LOOKUP
, FOLLOW
, UIO_SYSSPACE
, vio
->vn_file
, p
);
840 NDINIT(&nd
, LOOKUP
, FOLLOW
, UIO_USERSPACE
, vio
->vn_file
, p
);
842 error
= vn_open(&nd
, flags
, 0);
844 /* shadow MUST be writable! */
847 if (nd
.ni_vp
->v_type
!= VREG
||
848 (error
= VOP_GETATTR(nd
.ni_vp
, &vattr
, p
->p_ucred
, p
))) {
849 VOP_UNLOCK(nd
.ni_vp
, 0, p
);
850 (void) vn_close(nd
.ni_vp
, flags
, p
->p_ucred
, p
);
851 return (error
? error
: EINVAL
);
853 vn
->sc_shadow_vp
= nd
.ni_vp
;
854 vn
->sc_shadow_vp
->v_flag
|= VNOCACHE_DATA
;
855 VOP_UNLOCK(nd
.ni_vp
, 0, p
);
857 map
= shadow_map_create(vn
->sc_fsize
, vattr
.va_size
,
860 (void) vn_close(nd
.ni_vp
, flags
, p
->p_ucred
, p
);
861 vn
->sc_shadow_vp
= NULL
;
864 vn
->sc_shadow_map
= map
;
865 vn
->sc_flags
&= ~VNF_READONLY
; /* we're now read/write */
870 vndevice_root_image(char * path
, char devname
[], dev_t
* dev_p
)
874 struct vn_softc
* vn
;
880 vn
= vn_table
+ ROOT_IMAGE_UNIT
;
881 *dev_p
= makedev(vndevice_bdev_major
,
883 sprintf(devname
, "vn%d", ROOT_IMAGE_UNIT
);
884 error
= vniocattach_file(vn
, &vio
, *dev_p
, 1, current_proc());
889 * Duplicate the current processes' credentials. Since we are called only
890 * as the result of a SET ioctl and only root can do that, any future access
891 * to this "disk" is essentially as root. Note that credentials may change
892 * if some other uid can write directly to the mapped file (NFS).
895 vnsetcred(struct vn_softc
*vn
, struct proc
* p
)
899 struct proc
* current_proc();
900 struct ucred
* cred
= p
->p_ucred
;
903 * Set credits in our softc
908 vn
->sc_cred
= crdup(cred
);
911 * Horrible kludge to establish credentials for NFS XXX.
918 tmpbuf
= _MALLOC(vn
->sc_secsize
, M_TEMP
, M_WAITOK
);
919 bzero(&auio
, sizeof(auio
));
921 aiov
.iov_base
= tmpbuf
;
922 aiov
.iov_len
= vn
->sc_secsize
;
923 auio
.uio_iov
= &aiov
;
926 auio
.uio_rw
= UIO_READ
;
927 auio
.uio_segflg
= UIO_SYSSPACE
;
928 auio
.uio_resid
= aiov
.iov_len
;
929 vn_lock(vn
->sc_vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
930 error
= VOP_READ(vn
->sc_vp
, &auio
, 0, vn
->sc_cred
);
931 VOP_UNLOCK(vn
->sc_vp
, 0, p
);
932 FREE(tmpbuf
, M_TEMP
);
938 vnclear(struct vn_softc
*vn
)
941 struct proc
* p
= current_proc(); /* XXX */
943 if (vn
->sc_vp
!= NULL
) {
944 (void)vn_close(vn
->sc_vp
, vn
->sc_open_flags
, vn
->sc_cred
, p
);
947 if (vn
->sc_shadow_vp
!= NULL
) {
948 (void)vn_close(vn
->sc_shadow_vp
, FREAD
| FWRITE
,
950 vn
->sc_shadow_vp
= NULL
;
952 if (vn
->sc_shadow_map
!= NULL
) {
953 shadow_map_free(vn
->sc_shadow_map
);
954 vn
->sc_shadow_map
= NULL
;
956 vn
->sc_flags
= ~(VNF_INITED
| VNF_READONLY
);
964 devfs_remove(vn
->sc_cdev
);
976 if (vnunit(dev
) >= NVNDEVICE
) {
979 vn
= vn_table
+ unit
;
981 if ((vn
->sc_flags
& VNF_INITED
) == 0)
984 return(vn
->sc_secsize
);
987 #define CDEV_MAJOR -1
988 #define BDEV_MAJOR -1
989 static int vndevice_inited
= 0;
998 vndevice_bdev_major
= bdevsw_add(BDEV_MAJOR
, &vn_bdevsw
);
1000 if (vndevice_bdev_major
< 0) {
1001 printf("vndevice_init: bdevsw_add() returned %d\n",
1002 vndevice_bdev_major
);
1005 vndevice_cdev_major
= cdevsw_add_with_bdev(CDEV_MAJOR
, &vn_cdevsw
,
1006 vndevice_bdev_major
);
1007 if (vndevice_cdev_major
< 0) {
1008 printf("vndevice_init: cdevsw_add() returned %d\n",
1009 vndevice_cdev_major
);
1012 for (i
= 0; i
< NVNDEVICE
; i
++) {
1013 dev_t dev
= makedev(vndevice_bdev_major
, i
);
1014 vn_table
[i
].sc_bdev
= devfs_make_node(dev
, DEVFS_BLOCK
,
1015 UID_ROOT
, GID_OPERATOR
,
1018 if (vn_table
[i
].sc_bdev
== NULL
)
1019 printf("vninit: devfs_make_node failed!\n");
1022 #endif /* NVNDEVICE */