2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1989, 1993, 1995
31 * The Regents of the University of California. All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * @(#)spec_vnops.c 8.14 (Berkeley) 5/21/95
64 #include <sys/param.h>
65 #include <sys/proc_internal.h>
66 #include <sys/kauth.h>
67 #include <sys/systm.h>
68 #include <sys/kernel.h>
70 #include <sys/buf_internal.h>
71 #include <sys/mount_internal.h>
72 #include <sys/namei.h>
73 #include <sys/vnode_internal.h>
75 #include <sys/errno.h>
76 #include <sys/ioctl.h>
79 #include <sys/malloc.h>
81 #include <sys/uio_internal.h>
82 #include <miscfs/specfs/specdev.h>
83 #include <vfs/vfs_support.h>
85 #include <sys/kdebug.h>
87 struct vnode
*speclisth
[SPECHSZ
];
89 /* symbolic sleep message strings for devices */
90 char devopn
[] = "devopn";
91 char devio
[] = "devio";
92 char devwait
[] = "devwait";
93 char devin
[] = "devin";
94 char devout
[] = "devout";
95 char devioc
[] = "devioc";
96 char devcls
[] = "devcls";
98 #define VOPFUNC int (*)(void *)
100 int (**spec_vnodeop_p
)(void *);
101 struct vnodeopv_entry_desc spec_vnodeop_entries
[] = {
102 { &vnop_default_desc
, (VOPFUNC
)vn_default_error
},
103 { &vnop_lookup_desc
, (VOPFUNC
)spec_lookup
}, /* lookup */
104 { &vnop_create_desc
, (VOPFUNC
)err_create
}, /* create */
105 { &vnop_mknod_desc
, (VOPFUNC
)err_mknod
}, /* mknod */
106 { &vnop_open_desc
, (VOPFUNC
)spec_open
}, /* open */
107 { &vnop_close_desc
, (VOPFUNC
)spec_close
}, /* close */
108 { &vnop_access_desc
, (VOPFUNC
)spec_access
}, /* access */
109 { &vnop_getattr_desc
, (VOPFUNC
)spec_getattr
}, /* getattr */
110 { &vnop_setattr_desc
, (VOPFUNC
)spec_setattr
}, /* setattr */
111 { &vnop_read_desc
, (VOPFUNC
)spec_read
}, /* read */
112 { &vnop_write_desc
, (VOPFUNC
)spec_write
}, /* write */
113 { &vnop_ioctl_desc
, (VOPFUNC
)spec_ioctl
}, /* ioctl */
114 { &vnop_select_desc
, (VOPFUNC
)spec_select
}, /* select */
115 { &vnop_revoke_desc
, (VOPFUNC
)nop_revoke
}, /* revoke */
116 { &vnop_mmap_desc
, (VOPFUNC
)err_mmap
}, /* mmap */
117 { &vnop_fsync_desc
, (VOPFUNC
)spec_fsync
}, /* fsync */
118 { &vnop_remove_desc
, (VOPFUNC
)err_remove
}, /* remove */
119 { &vnop_link_desc
, (VOPFUNC
)err_link
}, /* link */
120 { &vnop_rename_desc
, (VOPFUNC
)err_rename
}, /* rename */
121 { &vnop_mkdir_desc
, (VOPFUNC
)err_mkdir
}, /* mkdir */
122 { &vnop_rmdir_desc
, (VOPFUNC
)err_rmdir
}, /* rmdir */
123 { &vnop_symlink_desc
, (VOPFUNC
)err_symlink
}, /* symlink */
124 { &vnop_readdir_desc
, (VOPFUNC
)err_readdir
}, /* readdir */
125 { &vnop_readlink_desc
, (VOPFUNC
)err_readlink
}, /* readlink */
126 { &vnop_inactive_desc
, (VOPFUNC
)nop_inactive
}, /* inactive */
127 { &vnop_reclaim_desc
, (VOPFUNC
)nop_reclaim
}, /* reclaim */
128 { &vnop_strategy_desc
, (VOPFUNC
)spec_strategy
}, /* strategy */
129 { &vnop_pathconf_desc
, (VOPFUNC
)spec_pathconf
}, /* pathconf */
130 { &vnop_advlock_desc
, (VOPFUNC
)err_advlock
}, /* advlock */
131 { &vnop_bwrite_desc
, (VOPFUNC
)spec_bwrite
}, /* bwrite */
132 { &vnop_devblocksize_desc
, (VOPFUNC
)spec_devblocksize
}, /* devblocksize */
133 { &vnop_pagein_desc
, (VOPFUNC
)err_pagein
}, /* Pagein */
134 { &vnop_pageout_desc
, (VOPFUNC
)err_pageout
}, /* Pageout */
135 { &vnop_copyfile_desc
, (VOPFUNC
)err_copyfile
}, /* Copyfile */
136 { &vnop_blktooff_desc
, (VOPFUNC
)spec_blktooff
}, /* blktooff */
137 { &vnop_offtoblk_desc
, (VOPFUNC
)spec_offtoblk
}, /* offtoblk */
138 { &vnop_blockmap_desc
, (VOPFUNC
)spec_blockmap
}, /* blockmap */
139 { (struct vnodeop_desc
*)NULL
, (int(*)())NULL
}
141 struct vnodeopv_desc spec_vnodeop_opv_desc
=
142 { &spec_vnodeop_p
, spec_vnodeop_entries
};
145 static void set_blocksize(vnode_t
, dev_t
);
149 * Trivial lookup routine that always fails.
153 struct vnop_lookup_args
/* {
155 struct vnode **a_vpp;
156 struct componentname *a_cnp;
157 vfs_context_t a_context;
166 set_blocksize(struct vnode
*vp
, dev_t dev
)
171 if ((major(dev
) < nblkdev
) && (size
= bdevsw
[major(dev
)].d_psize
)) {
172 rsize
= (*size
)(dev
);
173 if (rsize
<= 0) /* did size fail? */
174 vp
->v_specsize
= DEV_BSIZE
;
176 vp
->v_specsize
= rsize
;
179 vp
->v_specsize
= DEV_BSIZE
;
183 set_fsblocksize(struct vnode
*vp
)
186 if (vp
->v_type
== VBLK
) {
187 dev_t dev
= (dev_t
)vp
->v_rdev
;
188 int maj
= major(dev
);
190 if ((u_int
)maj
>= (u_int
)nblkdev
)
194 set_blocksize(vp
, dev
);
202 * Open a special file.
206 struct vnop_open_args
/* {
209 vfs_context_t a_context;
212 struct proc
*p
= vfs_context_proc(ap
->a_context
);
213 kauth_cred_t cred
= vfs_context_ucred(ap
->a_context
);
214 struct vnode
*vp
= ap
->a_vp
;
215 dev_t bdev
, dev
= (dev_t
)vp
->v_rdev
;
216 int maj
= major(dev
);
220 * Don't allow open if fs is mounted -nodev.
222 if (vp
->v_mount
&& (vp
->v_mount
->mnt_flag
& MNT_NODEV
))
225 switch (vp
->v_type
) {
228 if ((u_int
)maj
>= (u_int
)nchrdev
)
230 if (cred
!= FSCRED
&& (ap
->a_mode
& FWRITE
)) {
232 * When running in very secure mode, do not allow
233 * opens for writing of any disk character devices.
235 if (securelevel
>= 2 && isdisk(dev
, VCHR
))
238 * When running in secure mode, do not allow opens
239 * for writing of /dev/mem, /dev/kmem, or character
240 * devices whose corresponding block devices are
243 if (securelevel
>= 1) {
244 if ((bdev
= chrtoblk(dev
)) != NODEV
&& check_mountedon(bdev
, VBLK
, &error
))
250 if (cdevsw
[maj
].d_type
== D_TTY
) {
252 vp
->v_flag
|= VISTTY
;
255 error
= (*cdevsw
[maj
].d_open
)(dev
, ap
->a_mode
, S_IFCHR
, p
);
259 if ((u_int
)maj
>= (u_int
)nblkdev
)
262 * When running in very secure mode, do not allow
263 * opens for writing of any disk block devices.
265 if (securelevel
>= 2 && cred
!= FSCRED
&&
266 (ap
->a_mode
& FWRITE
) && bdevsw
[maj
].d_type
== D_DISK
)
269 * Do not allow opens of block devices that are
272 if ( (error
= vfs_mountedon(vp
)) )
274 error
= (*bdevsw
[maj
].d_open
)(dev
, ap
->a_mode
, S_IFBLK
, p
);
279 u_int32_t size512
= 512;
282 if (!VNOP_IOCTL(vp
, DKIOCGETBLOCKSIZE
, (caddr_t
)&blksize
, 0, ap
->a_context
)) {
283 /* Switch to 512 byte sectors (temporarily) */
285 if (!VNOP_IOCTL(vp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&size512
, FWRITE
, ap
->a_context
)) {
286 /* Get the number of 512 byte physical blocks. */
287 if (!VNOP_IOCTL(vp
, DKIOCGETBLOCKCOUNT
, (caddr_t
)&blkcnt
, 0, ap
->a_context
)) {
291 /* If it doesn't set back, we can't recover */
292 if (VNOP_IOCTL(vp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&blksize
, FWRITE
, ap
->a_context
))
298 set_blocksize(vp
, dev
);
301 * Cache the size in bytes of the block device for later
302 * use by spec_write().
305 vp
->v_specdevsize
= blkcnt
* (u_int64_t
)size512
;
307 vp
->v_specdevsize
= (u_int64_t
)0; /* Default: Can't get */
314 panic("spec_open type");
324 struct vnop_read_args
/* {
328 vfs_context_t a_context;
331 register struct vnode
*vp
= ap
->a_vp
;
332 register struct uio
*uio
= ap
->a_uio
;
334 daddr64_t bn
, nextbn
;
342 if (uio
->uio_rw
!= UIO_READ
)
343 panic("spec_read mode");
344 if (UIO_SEG_IS_USER_SPACE(uio
->uio_segflg
))
345 panic("spec_read proc");
347 if (uio_resid(uio
) == 0)
350 switch (vp
->v_type
) {
353 error
= (*cdevsw
[major(vp
->v_rdev
)].d_read
)
354 (vp
->v_rdev
, uio
, ap
->a_ioflag
);
358 if (uio
->uio_offset
< 0)
363 devBlockSize
= vp
->v_specsize
;
365 if (devBlockSize
> PAGE_SIZE
)
368 bscale
= PAGE_SIZE
/ devBlockSize
;
369 bsize
= bscale
* devBlockSize
;
372 on
= uio
->uio_offset
% bsize
;
374 bn
= (daddr64_t
)((uio
->uio_offset
/ devBlockSize
) &~ (bscale
- 1));
376 if (vp
->v_speclastr
+ bscale
== bn
) {
377 nextbn
= bn
+ bscale
;
378 error
= buf_breadn(vp
, bn
, (int)bsize
, &nextbn
,
379 (int *)&bsize
, 1, NOCRED
, &bp
);
381 error
= buf_bread(vp
, bn
, (int)bsize
, NOCRED
, &bp
);
384 vp
->v_speclastr
= bn
;
387 n
= bsize
- buf_resid(bp
);
388 if ((on
> n
) || error
) {
394 // LP64todo - fix this!
395 n
= min((unsigned)(n
- on
), uio_resid(uio
));
397 error
= uiomove((char *)buf_dataptr(bp
) + on
, n
, uio
);
401 } while (error
== 0 && uio_resid(uio
) > 0 && n
!= 0);
405 panic("spec_read type");
417 struct vnop_write_args
/* {
421 vfs_context_t a_context;
424 register struct vnode
*vp
= ap
->a_vp
;
425 register struct uio
*uio
= ap
->a_uio
;
428 int bsize
, blkmask
, bscale
;
429 register int io_sync
;
430 register int io_size
;
437 if (uio
->uio_rw
!= UIO_WRITE
)
438 panic("spec_write mode");
439 if (UIO_SEG_IS_USER_SPACE(uio
->uio_segflg
))
440 panic("spec_write proc");
443 switch (vp
->v_type
) {
446 error
= (*cdevsw
[major(vp
->v_rdev
)].d_write
)
447 (vp
->v_rdev
, uio
, ap
->a_ioflag
);
451 if (uio_resid(uio
) == 0)
453 if (uio
->uio_offset
< 0)
456 io_sync
= (ap
->a_ioflag
& IO_SYNC
);
457 // LP64todo - fix this!
458 io_size
= uio_resid(uio
);
462 devBlockSize
= vp
->v_specsize
;
463 if (devBlockSize
> PAGE_SIZE
)
466 bscale
= PAGE_SIZE
/ devBlockSize
;
467 blkmask
= bscale
- 1;
468 bsize
= bscale
* devBlockSize
;
472 bn
= (daddr64_t
)((uio
->uio_offset
/ devBlockSize
) &~ blkmask
);
473 on
= uio
->uio_offset
% bsize
;
475 // LP64todo - fix this!
476 n
= min((unsigned)(bsize
- on
), uio_resid(uio
));
479 * Use buf_getblk() as an optimization IFF:
481 * 1) We are reading exactly a block on a block
483 * 2) We know the size of the device from spec_open
484 * 3) The read doesn't span the end of the device
486 * Otherwise, we fall back on buf_bread().
489 vp
->v_specdevsize
!= (u_int64_t
)0 &&
490 (uio
->uio_offset
+ (u_int64_t
)n
) > vp
->v_specdevsize
) {
491 /* reduce the size of the read to what is there */
492 n
= (uio
->uio_offset
+ (u_int64_t
)n
) - vp
->v_specdevsize
;
496 bp
= buf_getblk(vp
, bn
, bsize
, 0, 0, BLK_WRITE
);
498 error
= (int)buf_bread(vp
, bn
, bsize
, NOCRED
, &bp
);
500 /* Translate downstream error for upstream, if needed */
502 error
= (int)buf_error(bp
);
507 n
= min(n
, bsize
- buf_resid(bp
));
509 error
= uiomove((char *)buf_dataptr(bp
) + on
, n
, uio
);
517 error
= buf_bwrite(bp
);
519 if ((n
+ on
) == bsize
)
520 error
= buf_bawrite(bp
);
522 error
= buf_bdwrite(bp
);
524 } while (error
== 0 && uio_resid(uio
) > 0 && n
!= 0);
528 panic("spec_write type");
536 * Device ioctl operation.
540 struct vnop_ioctl_args
/* {
545 vfs_context_t a_context;
548 proc_t p
= vfs_context_proc(ap
->a_context
);
549 dev_t dev
= ap
->a_vp
->v_rdev
;
551 switch (ap
->a_vp
->v_type
) {
554 return ((*cdevsw
[major(dev
)].d_ioctl
)(dev
, ap
->a_command
, ap
->a_data
,
558 if (ap
->a_command
== 0 && (int)ap
->a_data
== B_TAPE
) {
559 if (bdevsw
[major(dev
)].d_type
== D_TAPE
)
564 return ((*bdevsw
[major(dev
)].d_ioctl
)(dev
, ap
->a_command
, ap
->a_data
,
576 struct vnop_select_args
/* {
581 vfs_context_t a_context;
584 proc_t p
= vfs_context_proc(ap
->a_context
);
587 switch (ap
->a_vp
->v_type
) {
590 return (1); /* XXX */
593 dev
= ap
->a_vp
->v_rdev
;
594 return (*cdevsw
[major(dev
)].d_select
)(dev
, ap
->a_which
, ap
->a_wql
, p
);
599 * Synch buffers associated with a block device
602 spec_fsync_internal(vnode_t vp
, int waitfor
, __unused vfs_context_t context
)
604 if (vp
->v_type
== VCHR
)
607 * Flush all dirty buffers associated with a block device.
609 buf_flushdirtyblks(vp
, waitfor
== MNT_WAIT
, 0, (char *)"spec_fsync");
616 struct vnop_fsync_args
/* {
619 vfs_context_t a_context;
622 return spec_fsync_internal(ap
->a_vp
, ap
->a_waitfor
, ap
->a_context
);
626 * Just call the device strategy routine
628 extern int hard_throttle_on_root
;
631 #define LOWPRI_DELAY_MSECS 200
632 #define LOWPRI_WINDOW_MSECS 200
634 int lowpri_IO_window_msecs
= LOWPRI_WINDOW_MSECS
;
635 int lowpri_IO_delay_msecs
= LOWPRI_DELAY_MSECS
;
637 struct timeval last_normal_IO_timestamp
;
638 struct timeval last_lowpri_IO_timestamp
;
639 struct timeval lowpri_IO_window
= { 0, LOWPRI_WINDOW_MSECS
* 1000 };
643 struct vnop_strategy_args
/* {
651 struct timeval elapsed
;
654 bdev
= buf_device(bp
);
655 bflags
= buf_flags(bp
);
662 if (bflags
& B_ASYNC
)
667 else if (bflags
& B_PAGEIO
)
670 KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_DKRW
, code
) | DBG_FUNC_NONE
,
671 (unsigned int)bp
, bdev
, (int)buf_blkno(bp
), buf_count(bp
), 0);
673 if (((bflags
& (B_PAGEIO
| B_READ
)) == (B_PAGEIO
| B_READ
)) &&
674 (buf_vnode(bp
)->v_mount
->mnt_kern_flag
& MNTK_ROOTDEV
))
675 hard_throttle_on_root
= 1;
677 if ( lowpri_IO_delay_msecs
&& lowpri_IO_window_msecs
) {
680 if ( (p
== NULL
) || !(p
->p_lflag
& P_LLOW_PRI_IO
)) {
681 if (!(p
->p_lflag
& P_LBACKGROUND_IO
))
682 microuptime(&last_normal_IO_timestamp
);
684 microuptime(&last_lowpri_IO_timestamp
);
686 elapsed
= last_lowpri_IO_timestamp
;
687 timevalsub(&elapsed
, &last_normal_IO_timestamp
);
689 lowpri_IO_window
.tv_sec
= lowpri_IO_window_msecs
/ 1000;
690 lowpri_IO_window
.tv_usec
= (lowpri_IO_window_msecs
% 1000) * 1000;
692 if (timevalcmp(&elapsed
, &lowpri_IO_window
, <)) {
696 * I'd really like to do the IOSleep here, but
697 * we may be holding all kinds of filesystem related locks
698 * and the pages for this I/O marked 'busy'...
699 * we don't want to cause a normal task to block on
700 * one of these locks while we're throttling a task marked
701 * for low priority I/O... we'll mark the uthread and
702 * do the delay just before we return from the system
703 * call that triggered this I/O or from vnode_pagein
705 ut
= get_bsdthread_info(current_thread());
706 ut
->uu_lowpri_delay
= lowpri_IO_delay_msecs
;
710 (*bdevsw
[major(bdev
)].d_strategy
)(bp
);
717 * This is a noop, simply returning what one has been given.
720 spec_blockmap(__unused
struct vnop_blockmap_args
*ap
)
727 * Device close routine
731 struct vnop_close_args
/* {
734 vfs_context_t a_context;
737 register struct vnode
*vp
= ap
->a_vp
;
738 dev_t dev
= vp
->v_rdev
;
739 int (*devclose
)(dev_t
, int, int, struct proc
*);
741 struct proc
*p
= vfs_context_proc(ap
->a_context
);
743 switch (vp
->v_type
) {
747 * Hack: a tty device that is a controlling terminal
748 * has a reference from the session structure.
749 * We cannot easily tell that a character device is
750 * a controlling terminal, unless it is the closing
751 * process' controlling terminal. In that case,
752 * if the reference count is 2 (this last descriptor
753 * plus the session), release the reference from the session.
755 if (vcount(vp
) == 2 && p
&&
756 vp
== p
->p_session
->s_ttyvp
) {
757 p
->p_session
->s_ttyvp
= NULL
;
761 * close on last reference.
765 devclose
= cdevsw
[major(dev
)].d_close
;
770 #ifdef DEVFS_IMPLEMENTS_LOCKING
772 * On last close of a block device (that isn't mounted)
773 * we must invalidate any in core blocks, so that
774 * we can, for instance, change floppy disks.
776 if ((error
= spec_fsync_internal(vp
, MNT_WAIT
, ap
->a_context
)))
779 error
= buf_invalidateblks(vp
, BUF_WRITE_DATA
, 0, 0);
783 * Since every use (buffer, vnode, swap, blockmap)
784 * holds a reference to the vnode, and because we mark
785 * any other vnodes that alias this device, when the
786 * sum of the reference counts on all the aliased
787 * vnodes descends to one, we are on last close.
791 #else /* DEVFS_IMPLEMENTS_LOCKING */
793 * Since every use (buffer, vnode, swap, blockmap)
794 * holds a reference to the vnode, and because we mark
795 * any other vnodes that alias this device, when the
796 * sum of the reference counts on all the aliased
797 * vnodes descends to one, we are on last close.
803 * On last close of a block device (that isn't mounted)
804 * we must invalidate any in core blocks, so that
805 * we can, for instance, change floppy disks.
807 if ((error
= spec_fsync_internal(vp
, MNT_WAIT
, ap
->a_context
)))
810 error
= buf_invalidateblks(vp
, BUF_WRITE_DATA
, 0, 0);
813 #endif /* DEVFS_IMPLEMENTS_LOCKING */
814 devclose
= bdevsw
[major(dev
)].d_close
;
819 panic("spec_close: not special");
822 return ((*devclose
)(dev
, ap
->a_fflag
, mode
, p
));
826 * Return POSIX pathconf information applicable to special devices.
830 struct vnop_pathconf_args
/* {
834 vfs_context_t a_context;
838 switch (ap
->a_name
) {
840 *ap
->a_retval
= LINK_MAX
;
843 *ap
->a_retval
= MAX_CANON
;
846 *ap
->a_retval
= MAX_INPUT
;
849 *ap
->a_retval
= PIPE_BUF
;
851 case _PC_CHOWN_RESTRICTED
:
855 *ap
->a_retval
= _POSIX_VDISABLE
;
864 spec_devblocksize(ap
)
865 struct vnop_devblocksize_args
/* {
870 *ap
->a_retval
= (ap
->a_vp
->v_specsize
);
875 * Special device failed operation
878 spec_ebadf(__unused
void *dummy
)
885 * Special device bad operation
891 panic("spec_badop called");
895 /* Blktooff derives file offset from logical block number */
898 struct vnop_blktooff_args
/* {
904 register struct vnode
*vp
= ap
->a_vp
;
906 switch (vp
->v_type
) {
908 *ap
->a_offset
= (off_t
)-1; /* failure */
912 printf("spec_blktooff: not implemented for VBLK\n");
913 *ap
->a_offset
= (off_t
)-1; /* failure */
917 panic("spec_blktooff type");
924 /* Offtoblk derives logical block number from file offset */
927 struct vnop_offtoblk_args
/* {
933 register struct vnode
*vp
= ap
->a_vp
;
935 switch (vp
->v_type
) {
937 *ap
->a_lblkno
= (daddr64_t
)-1; /* failure */
941 printf("spec_offtoblk: not implemented for VBLK\n");
942 *ap
->a_lblkno
= (daddr64_t
)-1; /* failure */
946 panic("spec_offtoblk type");