2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1989, 1993, 1995
31 * The Regents of the University of California. All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * @(#)spec_vnops.c 8.14 (Berkeley) 5/21/95
64 #include <sys/param.h>
65 #include <sys/proc_internal.h>
66 #include <sys/kauth.h>
67 #include <sys/systm.h>
68 #include <sys/kernel.h>
70 #include <sys/buf_internal.h>
71 #include <sys/mount_internal.h>
72 #include <sys/namei.h>
73 #include <sys/vnode_internal.h>
75 #include <sys/errno.h>
76 #include <sys/ioctl.h>
79 #include <sys/malloc.h>
81 #include <sys/uio_internal.h>
82 #include <miscfs/specfs/specdev.h>
83 #include <vfs/vfs_support.h>
85 #include <sys/kdebug.h>
87 struct vnode
*speclisth
[SPECHSZ
];
89 /* symbolic sleep message strings for devices */
90 char devopn
[] = "devopn";
91 char devio
[] = "devio";
92 char devwait
[] = "devwait";
93 char devin
[] = "devin";
94 char devout
[] = "devout";
95 char devioc
[] = "devioc";
96 char devcls
[] = "devcls";
98 #define VOPFUNC int (*)(void *)
100 int (**spec_vnodeop_p
)(void *);
101 struct vnodeopv_entry_desc spec_vnodeop_entries
[] = {
102 { &vnop_default_desc
, (VOPFUNC
)vn_default_error
},
103 { &vnop_lookup_desc
, (VOPFUNC
)spec_lookup
}, /* lookup */
104 { &vnop_create_desc
, (VOPFUNC
)err_create
}, /* create */
105 { &vnop_mknod_desc
, (VOPFUNC
)err_mknod
}, /* mknod */
106 { &vnop_open_desc
, (VOPFUNC
)spec_open
}, /* open */
107 { &vnop_close_desc
, (VOPFUNC
)spec_close
}, /* close */
108 { &vnop_access_desc
, (VOPFUNC
)spec_access
}, /* access */
109 { &vnop_getattr_desc
, (VOPFUNC
)spec_getattr
}, /* getattr */
110 { &vnop_setattr_desc
, (VOPFUNC
)spec_setattr
}, /* setattr */
111 { &vnop_read_desc
, (VOPFUNC
)spec_read
}, /* read */
112 { &vnop_write_desc
, (VOPFUNC
)spec_write
}, /* write */
113 { &vnop_ioctl_desc
, (VOPFUNC
)spec_ioctl
}, /* ioctl */
114 { &vnop_select_desc
, (VOPFUNC
)spec_select
}, /* select */
115 { &vnop_revoke_desc
, (VOPFUNC
)nop_revoke
}, /* revoke */
116 { &vnop_mmap_desc
, (VOPFUNC
)err_mmap
}, /* mmap */
117 { &vnop_fsync_desc
, (VOPFUNC
)spec_fsync
}, /* fsync */
118 { &vnop_remove_desc
, (VOPFUNC
)err_remove
}, /* remove */
119 { &vnop_link_desc
, (VOPFUNC
)err_link
}, /* link */
120 { &vnop_rename_desc
, (VOPFUNC
)err_rename
}, /* rename */
121 { &vnop_mkdir_desc
, (VOPFUNC
)err_mkdir
}, /* mkdir */
122 { &vnop_rmdir_desc
, (VOPFUNC
)err_rmdir
}, /* rmdir */
123 { &vnop_symlink_desc
, (VOPFUNC
)err_symlink
}, /* symlink */
124 { &vnop_readdir_desc
, (VOPFUNC
)err_readdir
}, /* readdir */
125 { &vnop_readlink_desc
, (VOPFUNC
)err_readlink
}, /* readlink */
126 { &vnop_inactive_desc
, (VOPFUNC
)nop_inactive
}, /* inactive */
127 { &vnop_reclaim_desc
, (VOPFUNC
)nop_reclaim
}, /* reclaim */
128 { &vnop_strategy_desc
, (VOPFUNC
)spec_strategy
}, /* strategy */
129 { &vnop_pathconf_desc
, (VOPFUNC
)spec_pathconf
}, /* pathconf */
130 { &vnop_advlock_desc
, (VOPFUNC
)err_advlock
}, /* advlock */
131 { &vnop_bwrite_desc
, (VOPFUNC
)spec_bwrite
}, /* bwrite */
132 { &vnop_pagein_desc
, (VOPFUNC
)err_pagein
}, /* Pagein */
133 { &vnop_pageout_desc
, (VOPFUNC
)err_pageout
}, /* Pageout */
134 { &vnop_copyfile_desc
, (VOPFUNC
)err_copyfile
}, /* Copyfile */
135 { &vnop_blktooff_desc
, (VOPFUNC
)spec_blktooff
}, /* blktooff */
136 { &vnop_offtoblk_desc
, (VOPFUNC
)spec_offtoblk
}, /* offtoblk */
137 { &vnop_blockmap_desc
, (VOPFUNC
)spec_blockmap
}, /* blockmap */
138 { (struct vnodeop_desc
*)NULL
, (int(*)())NULL
}
140 struct vnodeopv_desc spec_vnodeop_opv_desc
=
141 { &spec_vnodeop_p
, spec_vnodeop_entries
};
144 static void set_blocksize(vnode_t
, dev_t
);
148 * Trivial lookup routine that always fails.
152 struct vnop_lookup_args
/* {
154 struct vnode **a_vpp;
155 struct componentname *a_cnp;
156 vfs_context_t a_context;
165 set_blocksize(struct vnode
*vp
, dev_t dev
)
170 if ((major(dev
) < nblkdev
) && (size
= bdevsw
[major(dev
)].d_psize
)) {
171 rsize
= (*size
)(dev
);
172 if (rsize
<= 0) /* did size fail? */
173 vp
->v_specsize
= DEV_BSIZE
;
175 vp
->v_specsize
= rsize
;
178 vp
->v_specsize
= DEV_BSIZE
;
182 set_fsblocksize(struct vnode
*vp
)
185 if (vp
->v_type
== VBLK
) {
186 dev_t dev
= (dev_t
)vp
->v_rdev
;
187 int maj
= major(dev
);
189 if ((u_int
)maj
>= (u_int
)nblkdev
)
193 set_blocksize(vp
, dev
);
201 * Open a special file.
205 struct vnop_open_args
/* {
208 vfs_context_t a_context;
211 struct proc
*p
= vfs_context_proc(ap
->a_context
);
212 kauth_cred_t cred
= vfs_context_ucred(ap
->a_context
);
213 struct vnode
*vp
= ap
->a_vp
;
214 dev_t bdev
, dev
= (dev_t
)vp
->v_rdev
;
215 int maj
= major(dev
);
219 * Don't allow open if fs is mounted -nodev.
221 if (vp
->v_mount
&& (vp
->v_mount
->mnt_flag
& MNT_NODEV
))
224 switch (vp
->v_type
) {
227 if ((u_int
)maj
>= (u_int
)nchrdev
)
229 if (cred
!= FSCRED
&& (ap
->a_mode
& FWRITE
)) {
231 * When running in very secure mode, do not allow
232 * opens for writing of any disk character devices.
234 if (securelevel
>= 2 && isdisk(dev
, VCHR
))
237 * When running in secure mode, do not allow opens
238 * for writing of /dev/mem, /dev/kmem, or character
239 * devices whose corresponding block devices are
242 if (securelevel
>= 1) {
243 if ((bdev
= chrtoblk(dev
)) != NODEV
&& check_mountedon(bdev
, VBLK
, &error
))
249 if (cdevsw
[maj
].d_type
== D_TTY
) {
251 vp
->v_flag
|= VISTTY
;
254 error
= (*cdevsw
[maj
].d_open
)(dev
, ap
->a_mode
, S_IFCHR
, p
);
258 if ((u_int
)maj
>= (u_int
)nblkdev
)
261 * When running in very secure mode, do not allow
262 * opens for writing of any disk block devices.
264 if (securelevel
>= 2 && cred
!= FSCRED
&&
265 (ap
->a_mode
& FWRITE
) && bdevsw
[maj
].d_type
== D_DISK
)
268 * Do not allow opens of block devices that are
271 if ( (error
= vfs_mountedon(vp
)) )
273 error
= (*bdevsw
[maj
].d_open
)(dev
, ap
->a_mode
, S_IFBLK
, p
);
278 u_int32_t size512
= 512;
281 if (!VNOP_IOCTL(vp
, DKIOCGETBLOCKSIZE
, (caddr_t
)&blksize
, 0, ap
->a_context
)) {
282 /* Switch to 512 byte sectors (temporarily) */
284 if (!VNOP_IOCTL(vp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&size512
, FWRITE
, ap
->a_context
)) {
285 /* Get the number of 512 byte physical blocks. */
286 if (!VNOP_IOCTL(vp
, DKIOCGETBLOCKCOUNT
, (caddr_t
)&blkcnt
, 0, ap
->a_context
)) {
290 /* If it doesn't set back, we can't recover */
291 if (VNOP_IOCTL(vp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&blksize
, FWRITE
, ap
->a_context
))
297 set_blocksize(vp
, dev
);
300 * Cache the size in bytes of the block device for later
301 * use by spec_write().
304 vp
->v_specdevsize
= blkcnt
* (u_int64_t
)size512
;
306 vp
->v_specdevsize
= (u_int64_t
)0; /* Default: Can't get */
313 panic("spec_open type");
323 struct vnop_read_args
/* {
327 vfs_context_t a_context;
330 register struct vnode
*vp
= ap
->a_vp
;
331 register struct uio
*uio
= ap
->a_uio
;
333 daddr64_t bn
, nextbn
;
341 if (uio
->uio_rw
!= UIO_READ
)
342 panic("spec_read mode");
343 if (UIO_SEG_IS_USER_SPACE(uio
->uio_segflg
))
344 panic("spec_read proc");
346 if (uio_resid(uio
) == 0)
349 switch (vp
->v_type
) {
352 error
= (*cdevsw
[major(vp
->v_rdev
)].d_read
)
353 (vp
->v_rdev
, uio
, ap
->a_ioflag
);
357 if (uio
->uio_offset
< 0)
362 devBlockSize
= vp
->v_specsize
;
364 if (devBlockSize
> PAGE_SIZE
)
367 bscale
= PAGE_SIZE
/ devBlockSize
;
368 bsize
= bscale
* devBlockSize
;
371 on
= uio
->uio_offset
% bsize
;
373 bn
= (daddr64_t
)((uio
->uio_offset
/ devBlockSize
) &~ (bscale
- 1));
375 if (vp
->v_speclastr
+ bscale
== bn
) {
376 nextbn
= bn
+ bscale
;
377 error
= buf_breadn(vp
, bn
, (int)bsize
, &nextbn
,
378 (int *)&bsize
, 1, NOCRED
, &bp
);
380 error
= buf_bread(vp
, bn
, (int)bsize
, NOCRED
, &bp
);
383 vp
->v_speclastr
= bn
;
386 n
= bsize
- buf_resid(bp
);
387 if ((on
> n
) || error
) {
393 // LP64todo - fix this!
394 n
= min((unsigned)(n
- on
), uio_resid(uio
));
396 error
= uiomove((char *)buf_dataptr(bp
) + on
, n
, uio
);
400 } while (error
== 0 && uio_resid(uio
) > 0 && n
!= 0);
404 panic("spec_read type");
416 struct vnop_write_args
/* {
420 vfs_context_t a_context;
423 register struct vnode
*vp
= ap
->a_vp
;
424 register struct uio
*uio
= ap
->a_uio
;
427 int bsize
, blkmask
, bscale
;
428 register int io_sync
;
429 register int io_size
;
436 if (uio
->uio_rw
!= UIO_WRITE
)
437 panic("spec_write mode");
438 if (UIO_SEG_IS_USER_SPACE(uio
->uio_segflg
))
439 panic("spec_write proc");
442 switch (vp
->v_type
) {
445 error
= (*cdevsw
[major(vp
->v_rdev
)].d_write
)
446 (vp
->v_rdev
, uio
, ap
->a_ioflag
);
450 if (uio_resid(uio
) == 0)
452 if (uio
->uio_offset
< 0)
455 io_sync
= (ap
->a_ioflag
& IO_SYNC
);
456 // LP64todo - fix this!
457 io_size
= uio_resid(uio
);
461 devBlockSize
= vp
->v_specsize
;
462 if (devBlockSize
> PAGE_SIZE
)
465 bscale
= PAGE_SIZE
/ devBlockSize
;
466 blkmask
= bscale
- 1;
467 bsize
= bscale
* devBlockSize
;
471 bn
= (daddr64_t
)((uio
->uio_offset
/ devBlockSize
) &~ blkmask
);
472 on
= uio
->uio_offset
% bsize
;
474 // LP64todo - fix this!
475 n
= min((unsigned)(bsize
- on
), uio_resid(uio
));
478 * Use buf_getblk() as an optimization IFF:
480 * 1) We are reading exactly a block on a block
482 * 2) We know the size of the device from spec_open
483 * 3) The read doesn't span the end of the device
485 * Otherwise, we fall back on buf_bread().
488 vp
->v_specdevsize
!= (u_int64_t
)0 &&
489 (uio
->uio_offset
+ (u_int64_t
)n
) > vp
->v_specdevsize
) {
490 /* reduce the size of the read to what is there */
491 n
= (uio
->uio_offset
+ (u_int64_t
)n
) - vp
->v_specdevsize
;
495 bp
= buf_getblk(vp
, bn
, bsize
, 0, 0, BLK_WRITE
);
497 error
= (int)buf_bread(vp
, bn
, bsize
, NOCRED
, &bp
);
499 /* Translate downstream error for upstream, if needed */
501 error
= (int)buf_error(bp
);
506 n
= min(n
, bsize
- buf_resid(bp
));
508 error
= uiomove((char *)buf_dataptr(bp
) + on
, n
, uio
);
516 error
= buf_bwrite(bp
);
518 if ((n
+ on
) == bsize
)
519 error
= buf_bawrite(bp
);
521 error
= buf_bdwrite(bp
);
523 } while (error
== 0 && uio_resid(uio
) > 0 && n
!= 0);
527 panic("spec_write type");
535 * Device ioctl operation.
539 struct vnop_ioctl_args
/* {
544 vfs_context_t a_context;
547 proc_t p
= vfs_context_proc(ap
->a_context
);
548 dev_t dev
= ap
->a_vp
->v_rdev
;
550 switch (ap
->a_vp
->v_type
) {
553 return ((*cdevsw
[major(dev
)].d_ioctl
)(dev
, ap
->a_command
, ap
->a_data
,
557 if (ap
->a_command
== 0 && (int)ap
->a_data
== B_TAPE
) {
558 if (bdevsw
[major(dev
)].d_type
== D_TAPE
)
563 return ((*bdevsw
[major(dev
)].d_ioctl
)(dev
, ap
->a_command
, ap
->a_data
,
575 struct vnop_select_args
/* {
580 vfs_context_t a_context;
583 proc_t p
= vfs_context_proc(ap
->a_context
);
586 switch (ap
->a_vp
->v_type
) {
589 return (1); /* XXX */
592 dev
= ap
->a_vp
->v_rdev
;
593 return (*cdevsw
[major(dev
)].d_select
)(dev
, ap
->a_which
, ap
->a_wql
, p
);
598 * Synch buffers associated with a block device
601 spec_fsync_internal(vnode_t vp
, int waitfor
, __unused vfs_context_t context
)
603 if (vp
->v_type
== VCHR
)
606 * Flush all dirty buffers associated with a block device.
608 buf_flushdirtyblks(vp
, waitfor
== MNT_WAIT
, 0, (char *)"spec_fsync");
615 struct vnop_fsync_args
/* {
618 vfs_context_t a_context;
621 return spec_fsync_internal(ap
->a_vp
, ap
->a_waitfor
, ap
->a_context
);
625 * Just call the device strategy routine
627 extern int hard_throttle_on_root
;
630 #define LOWPRI_DELAY_MSECS 200
631 #define LOWPRI_WINDOW_MSECS 200
633 int lowpri_IO_window_msecs
= LOWPRI_WINDOW_MSECS
;
634 int lowpri_IO_delay_msecs
= LOWPRI_DELAY_MSECS
;
636 struct timeval last_normal_IO_timestamp
;
637 struct timeval last_lowpri_IO_timestamp
;
638 struct timeval lowpri_IO_window
= { 0, LOWPRI_WINDOW_MSECS
* 1000 };
642 struct vnop_strategy_args
/* {
650 struct timeval elapsed
;
653 bdev
= buf_device(bp
);
654 bflags
= buf_flags(bp
);
661 if (bflags
& B_ASYNC
)
666 else if (bflags
& B_PAGEIO
)
669 KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_DKRW
, code
) | DBG_FUNC_NONE
,
670 (unsigned int)bp
, bdev
, (int)buf_blkno(bp
), buf_count(bp
), 0);
672 if (((bflags
& (B_PAGEIO
| B_READ
)) == (B_PAGEIO
| B_READ
)) &&
673 (buf_vnode(bp
)->v_mount
->mnt_kern_flag
& MNTK_ROOTDEV
))
674 hard_throttle_on_root
= 1;
676 if ( lowpri_IO_delay_msecs
&& lowpri_IO_window_msecs
) {
679 if ( (p
== NULL
) || !(p
->p_lflag
& P_LLOW_PRI_IO
)) {
680 if (!(p
->p_lflag
& P_LBACKGROUND_IO
))
681 microuptime(&last_normal_IO_timestamp
);
683 microuptime(&last_lowpri_IO_timestamp
);
685 elapsed
= last_lowpri_IO_timestamp
;
686 timevalsub(&elapsed
, &last_normal_IO_timestamp
);
688 lowpri_IO_window
.tv_sec
= lowpri_IO_window_msecs
/ 1000;
689 lowpri_IO_window
.tv_usec
= (lowpri_IO_window_msecs
% 1000) * 1000;
691 if (timevalcmp(&elapsed
, &lowpri_IO_window
, <)) {
695 * I'd really like to do the IOSleep here, but
696 * we may be holding all kinds of filesystem related locks
697 * and the pages for this I/O marked 'busy'...
698 * we don't want to cause a normal task to block on
699 * one of these locks while we're throttling a task marked
700 * for low priority I/O... we'll mark the uthread and
701 * do the delay just before we return from the system
702 * call that triggered this I/O or from vnode_pagein
704 ut
= get_bsdthread_info(current_thread());
705 ut
->uu_lowpri_delay
= lowpri_IO_delay_msecs
;
709 (*bdevsw
[major(bdev
)].d_strategy
)(bp
);
716 * This is a noop, simply returning what one has been given.
719 spec_blockmap(__unused
struct vnop_blockmap_args
*ap
)
726 * Device close routine
730 struct vnop_close_args
/* {
733 vfs_context_t a_context;
736 register struct vnode
*vp
= ap
->a_vp
;
737 dev_t dev
= vp
->v_rdev
;
738 int (*devclose
)(dev_t
, int, int, struct proc
*);
740 struct proc
*p
= vfs_context_proc(ap
->a_context
);
742 switch (vp
->v_type
) {
746 * Hack: a tty device that is a controlling terminal
747 * has a reference from the session structure.
748 * We cannot easily tell that a character device is
749 * a controlling terminal, unless it is the closing
750 * process' controlling terminal. In that case,
751 * if the reference count is 2 (this last descriptor
752 * plus the session), release the reference from the session.
754 if (vcount(vp
) == 2 && p
&&
755 vp
== p
->p_session
->s_ttyvp
) {
756 p
->p_session
->s_ttyvp
= NULL
;
760 * close on last reference.
764 devclose
= cdevsw
[major(dev
)].d_close
;
769 #ifdef DEVFS_IMPLEMENTS_LOCKING
771 * On last close of a block device (that isn't mounted)
772 * we must invalidate any in core blocks, so that
773 * we can, for instance, change floppy disks.
775 if ((error
= spec_fsync_internal(vp
, MNT_WAIT
, ap
->a_context
)))
778 error
= buf_invalidateblks(vp
, BUF_WRITE_DATA
, 0, 0);
782 * Since every use (buffer, vnode, swap, blockmap)
783 * holds a reference to the vnode, and because we mark
784 * any other vnodes that alias this device, when the
785 * sum of the reference counts on all the aliased
786 * vnodes descends to one, we are on last close.
790 #else /* DEVFS_IMPLEMENTS_LOCKING */
792 * Since every use (buffer, vnode, swap, blockmap)
793 * holds a reference to the vnode, and because we mark
794 * any other vnodes that alias this device, when the
795 * sum of the reference counts on all the aliased
796 * vnodes descends to one, we are on last close.
802 * On last close of a block device (that isn't mounted)
803 * we must invalidate any in core blocks, so that
804 * we can, for instance, change floppy disks.
806 if ((error
= spec_fsync_internal(vp
, MNT_WAIT
, ap
->a_context
)))
809 error
= buf_invalidateblks(vp
, BUF_WRITE_DATA
, 0, 0);
812 #endif /* DEVFS_IMPLEMENTS_LOCKING */
813 devclose
= bdevsw
[major(dev
)].d_close
;
818 panic("spec_close: not special");
821 return ((*devclose
)(dev
, ap
->a_fflag
, mode
, p
));
825 * Return POSIX pathconf information applicable to special devices.
829 struct vnop_pathconf_args
/* {
833 vfs_context_t a_context;
837 switch (ap
->a_name
) {
839 *ap
->a_retval
= LINK_MAX
;
842 *ap
->a_retval
= MAX_CANON
;
845 *ap
->a_retval
= MAX_INPUT
;
848 *ap
->a_retval
= PIPE_BUF
;
850 case _PC_CHOWN_RESTRICTED
:
854 *ap
->a_retval
= _POSIX_VDISABLE
;
863 * Special device failed operation
866 spec_ebadf(__unused
void *dummy
)
873 * Special device bad operation
879 panic("spec_badop called");
883 /* Blktooff derives file offset from logical block number */
886 struct vnop_blktooff_args
/* {
892 register struct vnode
*vp
= ap
->a_vp
;
894 switch (vp
->v_type
) {
896 *ap
->a_offset
= (off_t
)-1; /* failure */
900 printf("spec_blktooff: not implemented for VBLK\n");
901 *ap
->a_offset
= (off_t
)-1; /* failure */
905 panic("spec_blktooff type");
912 /* Offtoblk derives logical block number from file offset */
915 struct vnop_offtoblk_args
/* {
921 register struct vnode
*vp
= ap
->a_vp
;
923 switch (vp
->v_type
) {
925 *ap
->a_lblkno
= (daddr64_t
)-1; /* failure */
929 printf("spec_offtoblk: not implemented for VBLK\n");
930 *ap
->a_lblkno
= (daddr64_t
)-1; /* failure */
934 panic("spec_offtoblk type");