2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24 * Copyright (c) 1989, 1993, 1995
25 * The Regents of the University of California. All rights reserved.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * @(#)spec_vnops.c 8.14 (Berkeley) 5/21/95
58 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/kernel.h>
64 #include <sys/mount.h>
65 #include <sys/namei.h>
66 #include <sys/vnode.h>
68 #include <sys/errno.h>
69 #include <sys/ioctl.h>
71 #include <sys/malloc.h>
73 #include <miscfs/specfs/specdev.h>
74 #include <miscfs/specfs/lockf.h>
75 #include <vfs/vfs_support.h>
77 #include <sys/kdebug.h>
79 struct vnode
*speclisth
[SPECHSZ
];
81 /* symbolic sleep message strings for devices */
82 char devopn
[] = "devopn";
83 char devio
[] = "devio";
84 char devwait
[] = "devwait";
85 char devin
[] = "devin";
86 char devout
[] = "devout";
87 char devioc
[] = "devioc";
88 char devcls
[] = "devcls";
90 #define VOPFUNC int (*)(void *)
92 int (**spec_vnodeop_p
)(void *);
93 struct vnodeopv_entry_desc spec_vnodeop_entries
[] = {
94 { &vop_default_desc
, (VOPFUNC
)vn_default_error
},
95 { &vop_lookup_desc
, (VOPFUNC
)spec_lookup
}, /* lookup */
96 { &vop_create_desc
, (VOPFUNC
)err_create
}, /* create */
97 { &vop_mknod_desc
, (VOPFUNC
)err_mknod
}, /* mknod */
98 { &vop_open_desc
, (VOPFUNC
)spec_open
}, /* open */
99 { &vop_close_desc
, (VOPFUNC
)spec_close
}, /* close */
100 { &vop_access_desc
, (VOPFUNC
)spec_access
}, /* access */
101 { &vop_getattr_desc
, (VOPFUNC
)spec_getattr
}, /* getattr */
102 { &vop_setattr_desc
, (VOPFUNC
)spec_setattr
}, /* setattr */
103 { &vop_read_desc
, (VOPFUNC
)spec_read
}, /* read */
104 { &vop_write_desc
, (VOPFUNC
)spec_write
}, /* write */
105 { &vop_lease_desc
, (VOPFUNC
)nop_lease
}, /* lease */
106 { &vop_ioctl_desc
, (VOPFUNC
)spec_ioctl
}, /* ioctl */
107 { &vop_select_desc
, (VOPFUNC
)spec_select
}, /* select */
108 { &vop_revoke_desc
, (VOPFUNC
)nop_revoke
}, /* revoke */
109 { &vop_mmap_desc
, (VOPFUNC
)err_mmap
}, /* mmap */
110 { &vop_fsync_desc
, (VOPFUNC
)spec_fsync
}, /* fsync */
111 { &vop_seek_desc
, (VOPFUNC
)err_seek
}, /* seek */
112 { &vop_remove_desc
, (VOPFUNC
)err_remove
}, /* remove */
113 { &vop_link_desc
, (VOPFUNC
)err_link
}, /* link */
114 { &vop_rename_desc
, (VOPFUNC
)err_rename
}, /* rename */
115 { &vop_mkdir_desc
, (VOPFUNC
)err_mkdir
}, /* mkdir */
116 { &vop_rmdir_desc
, (VOPFUNC
)err_rmdir
}, /* rmdir */
117 { &vop_symlink_desc
, (VOPFUNC
)err_symlink
}, /* symlink */
118 { &vop_readdir_desc
, (VOPFUNC
)err_readdir
}, /* readdir */
119 { &vop_readlink_desc
, (VOPFUNC
)err_readlink
}, /* readlink */
120 { &vop_abortop_desc
, (VOPFUNC
)err_abortop
}, /* abortop */
121 { &vop_inactive_desc
, (VOPFUNC
)nop_inactive
}, /* inactive */
122 { &vop_reclaim_desc
, (VOPFUNC
)nop_reclaim
}, /* reclaim */
123 { &vop_lock_desc
, (VOPFUNC
)nop_lock
}, /* lock */
124 { &vop_unlock_desc
, (VOPFUNC
)nop_unlock
}, /* unlock */
125 { &vop_bmap_desc
, (VOPFUNC
)spec_bmap
}, /* bmap */
126 { &vop_strategy_desc
, (VOPFUNC
)spec_strategy
}, /* strategy */
127 { &vop_print_desc
, (VOPFUNC
)spec_print
}, /* print */
128 { &vop_islocked_desc
, (VOPFUNC
)nop_islocked
}, /* islocked */
129 { &vop_pathconf_desc
, (VOPFUNC
)spec_pathconf
}, /* pathconf */
130 { &vop_advlock_desc
, (VOPFUNC
)spec_advlock
}, /* advlock */
131 { &vop_blkatoff_desc
, (VOPFUNC
)err_blkatoff
}, /* blkatoff */
132 { &vop_valloc_desc
, (VOPFUNC
)err_valloc
}, /* valloc */
133 { &vop_vfree_desc
, (VOPFUNC
)err_vfree
}, /* vfree */
134 { &vop_truncate_desc
, (VOPFUNC
)nop_truncate
}, /* truncate */
135 { &vop_update_desc
, (VOPFUNC
)nop_update
}, /* update */
136 { &vop_bwrite_desc
, (VOPFUNC
)spec_bwrite
}, /* bwrite */
137 { &vop_devblocksize_desc
, (VOPFUNC
)spec_devblocksize
}, /* devblocksize */
138 { &vop_pagein_desc
, (VOPFUNC
)err_pagein
}, /* Pagein */
139 { &vop_pageout_desc
, (VOPFUNC
)err_pageout
}, /* Pageout */
140 { &vop_copyfile_desc
, (VOPFUNC
)err_copyfile
}, /* Copyfile */
141 { &vop_blktooff_desc
, (VOPFUNC
)spec_blktooff
}, /* blktooff */
142 { &vop_offtoblk_desc
, (VOPFUNC
)spec_offtoblk
}, /* offtoblk */
143 { &vop_cmap_desc
, (VOPFUNC
)spec_cmap
}, /* cmap */
144 { (struct vnodeop_desc
*)NULL
, (int(*)())NULL
}
146 struct vnodeopv_desc spec_vnodeop_opv_desc
=
147 { &spec_vnodeop_p
, spec_vnodeop_entries
};
150 * Trivial lookup routine that always fails.
154 struct vop_lookup_args
/* {
156 struct vnode **a_vpp;
157 struct componentname *a_cnp;
166 set_blocksize(struct vnode
*vp
, dev_t dev
)
171 if ((major(dev
) < nblkdev
) && (size
= bdevsw
[major(dev
)].d_psize
)) {
172 rsize
= (*size
)(dev
);
173 if (rsize
<= 0) /* did size fail? */
174 vp
->v_specsize
= DEV_BSIZE
;
176 vp
->v_specsize
= rsize
;
179 vp
->v_specsize
= DEV_BSIZE
;
183 set_fsblocksize(struct vnode
*vp
)
186 if (vp
->v_type
== VBLK
) {
187 dev_t dev
= (dev_t
)vp
->v_rdev
;
188 int maj
= major(dev
);
190 if ((u_int
)maj
>= nblkdev
)
193 set_blocksize(vp
, dev
);
200 * Open a special file.
204 struct vop_open_args
/* {
207 struct ucred *a_cred;
211 struct proc
*p
= ap
->a_p
;
212 struct vnode
*bvp
, *vp
= ap
->a_vp
;
213 dev_t bdev
, dev
= (dev_t
)vp
->v_rdev
;
214 int maj
= major(dev
);
218 * Don't allow open if fs is mounted -nodev.
220 if (vp
->v_mount
&& (vp
->v_mount
->mnt_flag
& MNT_NODEV
))
223 switch (vp
->v_type
) {
226 if ((u_int
)maj
>= nchrdev
)
228 if (ap
->a_cred
!= FSCRED
&& (ap
->a_mode
& FWRITE
)) {
230 * When running in very secure mode, do not allow
231 * opens for writing of any disk character devices.
233 if (securelevel
>= 2 && isdisk(dev
, VCHR
))
236 * When running in secure mode, do not allow opens
237 * for writing of /dev/mem, /dev/kmem, or character
238 * devices whose corresponding block devices are
241 if (securelevel
>= 1) {
242 if ((bdev
= chrtoblk(dev
)) != NODEV
&&
243 vfinddev(bdev
, VBLK
, &bvp
) &&
244 bvp
->v_usecount
> 0 &&
245 (error
= vfs_mountedon(bvp
)))
251 if (cdevsw
[maj
].d_type
== D_TTY
)
252 vp
->v_flag
|= VISTTY
;
253 VOP_UNLOCK(vp
, 0, p
);
254 error
= (*cdevsw
[maj
].d_open
)(dev
, ap
->a_mode
, S_IFCHR
, p
);
255 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
259 if ((u_int
)maj
>= nblkdev
)
262 * When running in very secure mode, do not allow
263 * opens for writing of any disk block devices.
265 if (securelevel
>= 2 && ap
->a_cred
!= FSCRED
&&
266 (ap
->a_mode
& FWRITE
) && bdevsw
[maj
].d_type
== D_DISK
)
269 * Do not allow opens of block devices that are
272 if (error
= vfs_mountedon(vp
))
274 error
= (*bdevsw
[maj
].d_open
)(dev
, ap
->a_mode
, S_IFBLK
, p
);
279 set_blocksize(vp
, dev
);
282 * Cache the size in bytes of the block device for later
283 * use by spec_write().
285 vp
->v_specdevsize
= (u_int64_t
)0; /* Default: Can't get */
286 if (!VOP_IOCTL(vp
, DKIOCGETBLOCKSIZE
, (caddr_t
)&blksize
, 0, NOCRED
, p
)) {
287 /* Switch to 512 byte sectors (temporarily) */
288 u_int32_t size512
= 512;
290 if (!VOP_IOCTL(vp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&size512
, FWRITE
, NOCRED
, p
)) {
291 /* Get the number of 512 byte physical blocks. */
292 if (!VOP_IOCTL(vp
, DKIOCGETBLOCKCOUNT
, (caddr_t
)&blkcnt
, 0, NOCRED
, p
)) {
293 vp
->v_specdevsize
= blkcnt
* (u_int64_t
)size512
;
296 /* If it doesn't set back, we can't recover */
297 if (VOP_IOCTL(vp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&blksize
, FWRITE
, NOCRED
, p
))
311 struct vop_read_args
/* {
315 struct ucred *a_cred;
318 register struct vnode
*vp
= ap
->a_vp
;
319 register struct uio
*uio
= ap
->a_uio
;
320 struct proc
*p
= uio
->uio_procp
;
325 int n
, on
, majordev
, (*ioctl
)();
330 if (uio
->uio_rw
!= UIO_READ
)
331 panic("spec_read mode");
332 if (uio
->uio_segflg
== UIO_USERSPACE
&& uio
->uio_procp
!= current_proc())
333 panic("spec_read proc");
335 if (uio
->uio_resid
== 0)
338 switch (vp
->v_type
) {
341 VOP_UNLOCK(vp
, 0, p
);
342 error
= (*cdevsw
[major(vp
->v_rdev
)].d_read
)
343 (vp
->v_rdev
, uio
, ap
->a_ioflag
);
344 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
348 if (uio
->uio_offset
< 0)
353 devBlockSize
= vp
->v_specsize
;
355 if (devBlockSize
> PAGE_SIZE
)
358 bscale
= PAGE_SIZE
/ devBlockSize
;
359 bsize
= bscale
* devBlockSize
;
362 on
= uio
->uio_offset
% bsize
;
364 bn
= (uio
->uio_offset
/ devBlockSize
) &~ (bscale
- 1);
366 if (vp
->v_lastr
+ bscale
== bn
) {
367 nextbn
= bn
+ bscale
;
368 error
= breadn(vp
, bn
, (int)bsize
, &nextbn
,
369 (int *)&bsize
, 1, NOCRED
, &bp
);
371 error
= bread(vp
, bn
, (int)bsize
, NOCRED
, &bp
);
374 n
= bsize
- bp
->b_resid
;
375 if ((on
> n
) || error
) {
381 n
= min((unsigned)(n
- on
), uio
->uio_resid
);
383 error
= uiomove((char *)bp
->b_data
+ on
, n
, uio
);
385 bp
->b_flags
|= B_AGE
;
387 } while (error
== 0 && uio
->uio_resid
> 0 && n
!= 0);
391 panic("spec_read type");
401 struct vop_write_args
/* {
405 struct ucred *a_cred;
408 register struct vnode
*vp
= ap
->a_vp
;
409 register struct uio
*uio
= ap
->a_uio
;
410 struct proc
*p
= uio
->uio_procp
;
413 int bsize
, blkmask
, bscale
;
414 register int io_sync
;
415 register int io_size
;
422 if (uio
->uio_rw
!= UIO_WRITE
)
423 panic("spec_write mode");
424 if (uio
->uio_segflg
== UIO_USERSPACE
&& uio
->uio_procp
!= current_proc())
425 panic("spec_write proc");
428 switch (vp
->v_type
) {
431 VOP_UNLOCK(vp
, 0, p
);
432 error
= (*cdevsw
[major(vp
->v_rdev
)].d_write
)
433 (vp
->v_rdev
, uio
, ap
->a_ioflag
);
434 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
438 if (uio
->uio_resid
== 0)
440 if (uio
->uio_offset
< 0)
443 io_sync
= (ap
->a_ioflag
& IO_SYNC
);
444 io_size
= uio
->uio_resid
;
448 devBlockSize
= vp
->v_specsize
;
449 if (devBlockSize
> PAGE_SIZE
)
452 bscale
= PAGE_SIZE
/ devBlockSize
;
453 blkmask
= bscale
- 1;
454 bsize
= bscale
* devBlockSize
;
458 bn
= (uio
->uio_offset
/ devBlockSize
) &~ blkmask
;
459 on
= uio
->uio_offset
% bsize
;
461 n
= min((unsigned)(bsize
- on
), uio
->uio_resid
);
464 * Use getblk() as an optimization IFF:
466 * 1) We are reading exactly a block on a block
468 * 2) We know the size of the device from spec_open
469 * 3) The read doesn't span the end of the device
471 * Otherwise, we fall back on bread().
474 vp
->v_specdevsize
!= (u_int64_t
)0 &&
475 (uio
->uio_offset
+ (u_int64_t
)n
) > vp
->v_specdevsize
) {
476 /* reduce the size of the read to what is there */
477 n
= (uio
->uio_offset
+ (u_int64_t
)n
) - vp
->v_specdevsize
;
481 bp
= getblk(vp
, bn
, bsize
, 0, 0, BLK_WRITE
);
483 error
= bread(vp
, bn
, bsize
, NOCRED
, &bp
);
485 /* Translate downstream error for upstream, if needed */
488 if (!error
&& (bp
->b_flags
& B_ERROR
) != 0) {
496 n
= min(n
, bsize
- bp
->b_resid
);
498 error
= uiomove((char *)bp
->b_data
+ on
, n
, uio
);
500 bp
->b_flags
|= B_AGE
;
505 if ((n
+ on
) == bsize
)
510 } while (error
== 0 && uio
->uio_resid
> 0 && n
!= 0);
514 panic("spec_write type");
520 * Device ioctl operation.
524 struct vop_ioctl_args
/* {
529 struct ucred *a_cred;
533 dev_t dev
= ap
->a_vp
->v_rdev
;
535 switch (ap
->a_vp
->v_type
) {
538 return ((*cdevsw
[major(dev
)].d_ioctl
)(dev
, ap
->a_command
, ap
->a_data
,
539 ap
->a_fflag
, ap
->a_p
));
542 if (ap
->a_command
== 0 && (int)ap
->a_data
== B_TAPE
)
543 if (bdevsw
[major(dev
)].d_type
== D_TAPE
)
547 return ((*bdevsw
[major(dev
)].d_ioctl
)(dev
, ap
->a_command
, ap
->a_data
,
548 ap
->a_fflag
, ap
->a_p
));
558 struct vop_select_args
/* {
562 struct ucred *a_cred;
569 switch (ap
->a_vp
->v_type
) {
572 return (1); /* XXX */
575 dev
= ap
->a_vp
->v_rdev
;
576 return (*cdevsw
[major(dev
)].d_select
)(dev
, ap
->a_which
, ap
->a_wql
, ap
->a_p
);
580 * Synch buffers associated with a block device
585 struct vop_fsync_args
/* {
587 struct ucred *a_cred;
592 register struct vnode
*vp
= ap
->a_vp
;
593 register struct buf
*bp
;
597 if (vp
->v_type
== VCHR
)
600 * Flush all dirty buffers associated with a block device.
604 for (bp
= vp
->v_dirtyblkhd
.lh_first
; bp
; bp
= nbp
) {
605 nbp
= bp
->b_vnbufs
.le_next
;
606 // XXXdbg - don't flush locked blocks. they may be journaled.
607 if ((bp
->b_flags
& B_BUSY
) || (bp
->b_flags
& B_LOCKED
))
609 if ((bp
->b_flags
& B_DELWRI
) == 0)
610 panic("spec_fsync: not dirty");
612 bp
->b_flags
|= B_BUSY
;
617 if (ap
->a_waitfor
== MNT_WAIT
) {
618 while (vp
->v_numoutput
) {
619 vp
->v_flag
|= VBWAIT
;
620 tsleep((caddr_t
)&vp
->v_numoutput
, PRIBIO
+ 1, "spec_fsync", 0);
623 if (vp
->v_dirtyblkhd
.lh_first
) {
624 vprint("spec_fsync: dirty", vp
);
635 * Just call the device strategy routine
638 struct vop_strategy_args
/* {
643 extern int hard_throttle_on_root
;
650 if (bp
->b_flags
& B_READ
)
652 if (bp
->b_flags
& B_ASYNC
)
655 if (bp
->b_flags
& B_META
)
657 else if (bp
->b_flags
& (B_PGIN
| B_PAGEOUT
))
660 KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_DKRW
, code
) | DBG_FUNC_NONE
,
661 (unsigned int)bp
, bp
->b_dev
, bp
->b_blkno
, bp
->b_bcount
, 0);
663 if ((bp
->b_flags
& B_PGIN
) && (bp
->b_vp
->v_mount
->mnt_kern_flag
& MNTK_ROOTDEV
))
664 hard_throttle_on_root
= 1;
666 (*bdevsw
[major(bp
->b_dev
)].d_strategy
)(bp
);
671 * Advisory record locking support
675 struct vop_advlock_args
/* {
683 register struct flock
*fl
= ap
->a_fl
;
684 register struct lockf
*lock
;
689 * Avoid the common case of unlocking when inode has no locks.
691 if (ap
->a_vp
->v_specinfo
->si_lockf
== (struct lockf
*)0) {
692 if (ap
->a_op
!= F_SETLK
) {
693 fl
->l_type
= F_UNLCK
;
698 * Convert the flock structure into a start and end.
700 switch (fl
->l_whence
) {
705 * Caller is responsible for adding any necessary offset
706 * when SEEK_CUR is used.
712 start
= ap
->a_vp
->v_specinfo
->si_devsize
+ fl
->l_start
;
720 else if (fl
->l_len
> 0)
721 end
= start
+ fl
->l_len
- 1;
722 else { /* l_len is negative */
729 * Create the lockf structure
731 MALLOC(lock
, struct lockf
*, sizeof *lock
, M_LOCKF
, M_WAITOK
);
732 lock
->lf_start
= start
;
734 lock
->lf_id
= ap
->a_id
;
735 lock
->lf_specinfo
= ap
->a_vp
->v_specinfo
;
736 lock
->lf_type
= fl
->l_type
;
737 lock
->lf_next
= (struct lockf
*)0;
738 TAILQ_INIT(&lock
->lf_blkhd
);
739 lock
->lf_flags
= ap
->a_flags
;
741 * Do the requested operation.
745 return (spec_lf_setlock(lock
));
748 error
= spec_lf_clearlock(lock
);
753 error
= spec_lf_getlock(lock
, fl
);
758 _FREE(lock
, M_LOCKF
);
765 * This is a noop, simply returning what one has been given.
768 struct vop_bmap_args
/* {
771 struct vnode **a_vpp;
777 if (ap
->a_vpp
!= NULL
)
778 *ap
->a_vpp
= ap
->a_vp
;
779 if (ap
->a_bnp
!= NULL
)
780 *ap
->a_bnp
= ap
->a_bn
* (PAGE_SIZE
/ ap
->a_vp
->v_specsize
);
781 if (ap
->a_runp
!= NULL
)
782 *ap
->a_runp
= (MAXPHYSIO
/ PAGE_SIZE
) - 1;
787 * This is a noop, simply returning what one has been given.
790 struct vop_cmap_args
/* {
804 * Device close routine
808 struct vop_close_args
/* {
811 struct ucred *a_cred;
815 register struct vnode
*vp
= ap
->a_vp
;
816 dev_t dev
= vp
->v_rdev
;
817 int (*devclose
) __P((dev_t
, int, int, struct proc
*));
820 switch (vp
->v_type
) {
824 * Hack: a tty device that is a controlling terminal
825 * has a reference from the session structure.
826 * We cannot easily tell that a character device is
827 * a controlling terminal, unless it is the closing
828 * process' controlling terminal. In that case,
829 * if the reference count is 2 (this last descriptor
830 * plus the session), release the reference from the session.
832 if (vcount(vp
) == 2 && ap
->a_p
&&
833 vp
== ap
->a_p
->p_session
->s_ttyvp
) {
834 ap
->a_p
->p_session
->s_ttyvp
= NULL
;
838 * If the vnode is locked, then we are in the midst
839 * of forcably closing the device, otherwise we only
840 * close on last reference.
842 if (vcount(vp
) > 1 && (vp
->v_flag
& VXLOCK
) == 0)
844 devclose
= cdevsw
[major(dev
)].d_close
;
849 #ifdef DEVFS_IMPLEMENTS_LOCKING
851 * On last close of a block device (that isn't mounted)
852 * we must invalidate any in core blocks, so that
853 * we can, for instance, change floppy disks.
855 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, ap
->a_p
);
856 error
= vinvalbuf(vp
, V_SAVE
, ap
->a_cred
, ap
->a_p
, 0, 0);
857 VOP_UNLOCK(vp
, 0, ap
->a_p
);
861 * We do not want to really close the device if it
862 * is still in use unless we are trying to close it
863 * forcibly. Since every use (buffer, vnode, swap, cmap)
864 * holds a reference to the vnode, and because we mark
865 * any other vnodes that alias this device, when the
866 * sum of the reference counts on all the aliased
867 * vnodes descends to one, we are on last close.
869 if (vcount(vp
) > 1 && (vp
->v_flag
& VXLOCK
) == 0)
871 #else /* DEVFS_IMPLEMENTS_LOCKING */
873 * We do not want to really close the device if it
874 * is still in use unless we are trying to close it
875 * forcibly. Since every use (buffer, vnode, swap, cmap)
876 * holds a reference to the vnode, and because we mark
877 * any other vnodes that alias this device, when the
878 * sum of the reference counts on all the aliased
879 * vnodes descends to one, we are on last close.
881 if (vcount(vp
) > 1 && (vp
->v_flag
& VXLOCK
) == 0)
885 * On last close of a block device (that isn't mounted)
886 * we must invalidate any in core blocks, so that
887 * we can, for instance, change floppy disks.
889 error
= vinvalbuf(vp
, V_SAVE
, ap
->a_cred
, ap
->a_p
, 0, 0);
892 #endif /* DEVFS_IMPLEMENTS_LOCKING */
893 devclose
= bdevsw
[major(dev
)].d_close
;
898 panic("spec_close: not special");
901 return ((*devclose
)(dev
, ap
->a_fflag
, mode
, ap
->a_p
));
905 * Print out the contents of a special device vnode.
908 struct vop_print_args
/* {
913 printf("tag VT_NON, dev %d, %d\n", major(ap
->a_vp
->v_rdev
),
914 minor(ap
->a_vp
->v_rdev
));
918 * Return POSIX pathconf information applicable to special devices.
921 struct vop_pathconf_args
/* {
928 switch (ap
->a_name
) {
930 *ap
->a_retval
= LINK_MAX
;
933 *ap
->a_retval
= MAX_CANON
;
936 *ap
->a_retval
= MAX_INPUT
;
939 *ap
->a_retval
= PIPE_BUF
;
941 case _PC_CHOWN_RESTRICTED
:
945 *ap
->a_retval
= _POSIX_VDISABLE
;
954 spec_devblocksize(ap
)
955 struct vop_devblocksize_args
/* {
960 *ap
->a_retval
= (ap
->a_vp
->v_specsize
);
965 * Special device failed operation
974 * Special device bad operation
979 panic("spec_badop called");
983 /* Blktooff derives file offset from logical block number */
986 struct vop_blktooff_args
/* {
992 register struct vnode
*vp
= ap
->a_vp
;
994 switch (vp
->v_type
) {
996 *ap
->a_offset
= (off_t
)-1; /* failure */
1000 printf("spec_blktooff: not implemented for VBLK\n");
1001 *ap
->a_offset
= (off_t
)-1; /* failure */
1002 return (EOPNOTSUPP
);
1005 panic("spec_blktooff type");
1010 /* Offtoblk derives logical block number from file offset */
1013 struct vop_offtoblk_args
/* {
1019 register struct vnode
*vp
= ap
->a_vp
;
1021 switch (vp
->v_type
) {
1023 *ap
->a_lblkno
= (daddr_t
)-1; /* failure */
1024 return (EOPNOTSUPP
);
1027 printf("spec_offtoblk: not implemented for VBLK\n");
1028 *ap
->a_lblkno
= (daddr_t
)-1; /* failure */
1029 return (EOPNOTSUPP
);
1032 panic("spec_offtoblk type");