2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
23 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
25 * Copyright (c) 1989, 1993, 1995
26 * The Regents of the University of California. All rights reserved.
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
31 * 1. Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * 2. Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in the
35 * documentation and/or other materials provided with the distribution.
36 * 3. All advertising materials mentioning features or use of this software
37 * must display the following acknowledgement:
38 * This product includes software developed by the University of
39 * California, Berkeley and its contributors.
40 * 4. Neither the name of the University nor the names of its contributors
41 * may be used to endorse or promote products derived from this software
42 * without specific prior written permission.
44 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
45 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
46 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
47 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
48 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
49 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
50 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
51 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
53 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * @(#)spec_vnops.c 8.14 (Berkeley) 5/21/95
59 #include <sys/param.h>
60 #include <sys/proc_internal.h>
61 #include <sys/kauth.h>
62 #include <sys/systm.h>
63 #include <sys/kernel.h>
65 #include <sys/buf_internal.h>
66 #include <sys/mount_internal.h>
67 #include <sys/namei.h>
68 #include <sys/vnode_internal.h>
70 #include <sys/errno.h>
71 #include <sys/ioctl.h>
74 #include <sys/malloc.h>
76 #include <sys/uio_internal.h>
77 #include <miscfs/specfs/specdev.h>
78 #include <vfs/vfs_support.h>
80 #include <sys/kdebug.h>
82 struct vnode
*speclisth
[SPECHSZ
];
84 /* symbolic sleep message strings for devices */
85 char devopn
[] = "devopn";
86 char devio
[] = "devio";
87 char devwait
[] = "devwait";
88 char devin
[] = "devin";
89 char devout
[] = "devout";
90 char devioc
[] = "devioc";
91 char devcls
[] = "devcls";
93 #define VOPFUNC int (*)(void *)
95 int (**spec_vnodeop_p
)(void *);
96 struct vnodeopv_entry_desc spec_vnodeop_entries
[] = {
97 { &vnop_default_desc
, (VOPFUNC
)vn_default_error
},
98 { &vnop_lookup_desc
, (VOPFUNC
)spec_lookup
}, /* lookup */
99 { &vnop_create_desc
, (VOPFUNC
)err_create
}, /* create */
100 { &vnop_mknod_desc
, (VOPFUNC
)err_mknod
}, /* mknod */
101 { &vnop_open_desc
, (VOPFUNC
)spec_open
}, /* open */
102 { &vnop_close_desc
, (VOPFUNC
)spec_close
}, /* close */
103 { &vnop_access_desc
, (VOPFUNC
)spec_access
}, /* access */
104 { &vnop_getattr_desc
, (VOPFUNC
)spec_getattr
}, /* getattr */
105 { &vnop_setattr_desc
, (VOPFUNC
)spec_setattr
}, /* setattr */
106 { &vnop_read_desc
, (VOPFUNC
)spec_read
}, /* read */
107 { &vnop_write_desc
, (VOPFUNC
)spec_write
}, /* write */
108 { &vnop_ioctl_desc
, (VOPFUNC
)spec_ioctl
}, /* ioctl */
109 { &vnop_select_desc
, (VOPFUNC
)spec_select
}, /* select */
110 { &vnop_revoke_desc
, (VOPFUNC
)nop_revoke
}, /* revoke */
111 { &vnop_mmap_desc
, (VOPFUNC
)err_mmap
}, /* mmap */
112 { &vnop_fsync_desc
, (VOPFUNC
)spec_fsync
}, /* fsync */
113 { &vnop_remove_desc
, (VOPFUNC
)err_remove
}, /* remove */
114 { &vnop_link_desc
, (VOPFUNC
)err_link
}, /* link */
115 { &vnop_rename_desc
, (VOPFUNC
)err_rename
}, /* rename */
116 { &vnop_mkdir_desc
, (VOPFUNC
)err_mkdir
}, /* mkdir */
117 { &vnop_rmdir_desc
, (VOPFUNC
)err_rmdir
}, /* rmdir */
118 { &vnop_symlink_desc
, (VOPFUNC
)err_symlink
}, /* symlink */
119 { &vnop_readdir_desc
, (VOPFUNC
)err_readdir
}, /* readdir */
120 { &vnop_readlink_desc
, (VOPFUNC
)err_readlink
}, /* readlink */
121 { &vnop_inactive_desc
, (VOPFUNC
)nop_inactive
}, /* inactive */
122 { &vnop_reclaim_desc
, (VOPFUNC
)nop_reclaim
}, /* reclaim */
123 { &vnop_strategy_desc
, (VOPFUNC
)spec_strategy
}, /* strategy */
124 { &vnop_pathconf_desc
, (VOPFUNC
)spec_pathconf
}, /* pathconf */
125 { &vnop_advlock_desc
, (VOPFUNC
)err_advlock
}, /* advlock */
126 { &vnop_bwrite_desc
, (VOPFUNC
)spec_bwrite
}, /* bwrite */
127 { &vnop_devblocksize_desc
, (VOPFUNC
)spec_devblocksize
}, /* devblocksize */
128 { &vnop_pagein_desc
, (VOPFUNC
)err_pagein
}, /* Pagein */
129 { &vnop_pageout_desc
, (VOPFUNC
)err_pageout
}, /* Pageout */
130 { &vnop_copyfile_desc
, (VOPFUNC
)err_copyfile
}, /* Copyfile */
131 { &vnop_blktooff_desc
, (VOPFUNC
)spec_blktooff
}, /* blktooff */
132 { &vnop_offtoblk_desc
, (VOPFUNC
)spec_offtoblk
}, /* offtoblk */
133 { &vnop_blockmap_desc
, (VOPFUNC
)spec_blockmap
}, /* blockmap */
134 { (struct vnodeop_desc
*)NULL
, (int(*)())NULL
}
136 struct vnodeopv_desc spec_vnodeop_opv_desc
=
137 { &spec_vnodeop_p
, spec_vnodeop_entries
};
140 static void set_blocksize(vnode_t
, dev_t
);
144 * Trivial lookup routine that always fails.
148 struct vnop_lookup_args
/* {
150 struct vnode **a_vpp;
151 struct componentname *a_cnp;
152 vfs_context_t a_context;
161 set_blocksize(struct vnode
*vp
, dev_t dev
)
166 if ((major(dev
) < nblkdev
) && (size
= bdevsw
[major(dev
)].d_psize
)) {
167 rsize
= (*size
)(dev
);
168 if (rsize
<= 0) /* did size fail? */
169 vp
->v_specsize
= DEV_BSIZE
;
171 vp
->v_specsize
= rsize
;
174 vp
->v_specsize
= DEV_BSIZE
;
178 set_fsblocksize(struct vnode
*vp
)
181 if (vp
->v_type
== VBLK
) {
182 dev_t dev
= (dev_t
)vp
->v_rdev
;
183 int maj
= major(dev
);
185 if ((u_int
)maj
>= (u_int
)nblkdev
)
189 set_blocksize(vp
, dev
);
197 * Open a special file.
201 struct vnop_open_args
/* {
204 vfs_context_t a_context;
207 struct proc
*p
= vfs_context_proc(ap
->a_context
);
208 kauth_cred_t cred
= vfs_context_ucred(ap
->a_context
);
209 struct vnode
*vp
= ap
->a_vp
;
210 dev_t bdev
, dev
= (dev_t
)vp
->v_rdev
;
211 int maj
= major(dev
);
215 * Don't allow open if fs is mounted -nodev.
217 if (vp
->v_mount
&& (vp
->v_mount
->mnt_flag
& MNT_NODEV
))
220 switch (vp
->v_type
) {
223 if ((u_int
)maj
>= (u_int
)nchrdev
)
225 if (cred
!= FSCRED
&& (ap
->a_mode
& FWRITE
)) {
227 * When running in very secure mode, do not allow
228 * opens for writing of any disk character devices.
230 if (securelevel
>= 2 && isdisk(dev
, VCHR
))
233 * When running in secure mode, do not allow opens
234 * for writing of /dev/mem, /dev/kmem, or character
235 * devices whose corresponding block devices are
238 if (securelevel
>= 1) {
239 if ((bdev
= chrtoblk(dev
)) != NODEV
&& check_mountedon(bdev
, VBLK
, &error
))
245 if (cdevsw
[maj
].d_type
== D_TTY
) {
247 vp
->v_flag
|= VISTTY
;
250 error
= (*cdevsw
[maj
].d_open
)(dev
, ap
->a_mode
, S_IFCHR
, p
);
254 if ((u_int
)maj
>= (u_int
)nblkdev
)
257 * When running in very secure mode, do not allow
258 * opens for writing of any disk block devices.
260 if (securelevel
>= 2 && cred
!= FSCRED
&&
261 (ap
->a_mode
& FWRITE
) && bdevsw
[maj
].d_type
== D_DISK
)
264 * Do not allow opens of block devices that are
267 if ( (error
= vfs_mountedon(vp
)) )
269 error
= (*bdevsw
[maj
].d_open
)(dev
, ap
->a_mode
, S_IFBLK
, p
);
274 u_int32_t size512
= 512;
277 if (!VNOP_IOCTL(vp
, DKIOCGETBLOCKSIZE
, (caddr_t
)&blksize
, 0, ap
->a_context
)) {
278 /* Switch to 512 byte sectors (temporarily) */
280 if (!VNOP_IOCTL(vp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&size512
, FWRITE
, ap
->a_context
)) {
281 /* Get the number of 512 byte physical blocks. */
282 if (!VNOP_IOCTL(vp
, DKIOCGETBLOCKCOUNT
, (caddr_t
)&blkcnt
, 0, ap
->a_context
)) {
286 /* If it doesn't set back, we can't recover */
287 if (VNOP_IOCTL(vp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&blksize
, FWRITE
, ap
->a_context
))
293 set_blocksize(vp
, dev
);
296 * Cache the size in bytes of the block device for later
297 * use by spec_write().
300 vp
->v_specdevsize
= blkcnt
* (u_int64_t
)size512
;
302 vp
->v_specdevsize
= (u_int64_t
)0; /* Default: Can't get */
309 panic("spec_open type");
319 struct vnop_read_args
/* {
323 vfs_context_t a_context;
326 register struct vnode
*vp
= ap
->a_vp
;
327 register struct uio
*uio
= ap
->a_uio
;
329 daddr64_t bn
, nextbn
;
337 if (uio
->uio_rw
!= UIO_READ
)
338 panic("spec_read mode");
339 if (UIO_SEG_IS_USER_SPACE(uio
->uio_segflg
))
340 panic("spec_read proc");
342 if (uio_resid(uio
) == 0)
345 switch (vp
->v_type
) {
348 error
= (*cdevsw
[major(vp
->v_rdev
)].d_read
)
349 (vp
->v_rdev
, uio
, ap
->a_ioflag
);
353 if (uio
->uio_offset
< 0)
358 devBlockSize
= vp
->v_specsize
;
360 if (devBlockSize
> PAGE_SIZE
)
363 bscale
= PAGE_SIZE
/ devBlockSize
;
364 bsize
= bscale
* devBlockSize
;
367 on
= uio
->uio_offset
% bsize
;
369 bn
= (daddr64_t
)((uio
->uio_offset
/ devBlockSize
) &~ (bscale
- 1));
371 if (vp
->v_speclastr
+ bscale
== bn
) {
372 nextbn
= bn
+ bscale
;
373 error
= buf_breadn(vp
, bn
, (int)bsize
, &nextbn
,
374 (int *)&bsize
, 1, NOCRED
, &bp
);
376 error
= buf_bread(vp
, bn
, (int)bsize
, NOCRED
, &bp
);
379 vp
->v_speclastr
= bn
;
382 n
= bsize
- buf_resid(bp
);
383 if ((on
> n
) || error
) {
389 // LP64todo - fix this!
390 n
= min((unsigned)(n
- on
), uio_resid(uio
));
392 error
= uiomove((char *)buf_dataptr(bp
) + on
, n
, uio
);
396 } while (error
== 0 && uio_resid(uio
) > 0 && n
!= 0);
400 panic("spec_read type");
412 struct vnop_write_args
/* {
416 vfs_context_t a_context;
419 register struct vnode
*vp
= ap
->a_vp
;
420 register struct uio
*uio
= ap
->a_uio
;
423 int bsize
, blkmask
, bscale
;
424 register int io_sync
;
425 register int io_size
;
432 if (uio
->uio_rw
!= UIO_WRITE
)
433 panic("spec_write mode");
434 if (UIO_SEG_IS_USER_SPACE(uio
->uio_segflg
))
435 panic("spec_write proc");
438 switch (vp
->v_type
) {
441 error
= (*cdevsw
[major(vp
->v_rdev
)].d_write
)
442 (vp
->v_rdev
, uio
, ap
->a_ioflag
);
446 if (uio_resid(uio
) == 0)
448 if (uio
->uio_offset
< 0)
451 io_sync
= (ap
->a_ioflag
& IO_SYNC
);
452 // LP64todo - fix this!
453 io_size
= uio_resid(uio
);
457 devBlockSize
= vp
->v_specsize
;
458 if (devBlockSize
> PAGE_SIZE
)
461 bscale
= PAGE_SIZE
/ devBlockSize
;
462 blkmask
= bscale
- 1;
463 bsize
= bscale
* devBlockSize
;
467 bn
= (daddr64_t
)((uio
->uio_offset
/ devBlockSize
) &~ blkmask
);
468 on
= uio
->uio_offset
% bsize
;
470 // LP64todo - fix this!
471 n
= min((unsigned)(bsize
- on
), uio_resid(uio
));
474 * Use buf_getblk() as an optimization IFF:
476 * 1) We are reading exactly a block on a block
478 * 2) We know the size of the device from spec_open
479 * 3) The read doesn't span the end of the device
481 * Otherwise, we fall back on buf_bread().
484 vp
->v_specdevsize
!= (u_int64_t
)0 &&
485 (uio
->uio_offset
+ (u_int64_t
)n
) > vp
->v_specdevsize
) {
486 /* reduce the size of the read to what is there */
487 n
= (uio
->uio_offset
+ (u_int64_t
)n
) - vp
->v_specdevsize
;
491 bp
= buf_getblk(vp
, bn
, bsize
, 0, 0, BLK_WRITE
);
493 error
= (int)buf_bread(vp
, bn
, bsize
, NOCRED
, &bp
);
495 /* Translate downstream error for upstream, if needed */
497 error
= (int)buf_error(bp
);
502 n
= min(n
, bsize
- buf_resid(bp
));
504 error
= uiomove((char *)buf_dataptr(bp
) + on
, n
, uio
);
512 error
= buf_bwrite(bp
);
514 if ((n
+ on
) == bsize
)
515 error
= buf_bawrite(bp
);
517 error
= buf_bdwrite(bp
);
519 } while (error
== 0 && uio_resid(uio
) > 0 && n
!= 0);
523 panic("spec_write type");
531 * Device ioctl operation.
535 struct vnop_ioctl_args
/* {
540 vfs_context_t a_context;
543 proc_t p
= vfs_context_proc(ap
->a_context
);
544 dev_t dev
= ap
->a_vp
->v_rdev
;
546 switch (ap
->a_vp
->v_type
) {
549 return ((*cdevsw
[major(dev
)].d_ioctl
)(dev
, ap
->a_command
, ap
->a_data
,
553 if (ap
->a_command
== 0 && (int)ap
->a_data
== B_TAPE
) {
554 if (bdevsw
[major(dev
)].d_type
== D_TAPE
)
559 return ((*bdevsw
[major(dev
)].d_ioctl
)(dev
, ap
->a_command
, ap
->a_data
,
571 struct vnop_select_args
/* {
576 vfs_context_t a_context;
579 proc_t p
= vfs_context_proc(ap
->a_context
);
582 switch (ap
->a_vp
->v_type
) {
585 return (1); /* XXX */
588 dev
= ap
->a_vp
->v_rdev
;
589 return (*cdevsw
[major(dev
)].d_select
)(dev
, ap
->a_which
, ap
->a_wql
, p
);
594 * Synch buffers associated with a block device
597 spec_fsync_internal(vnode_t vp
, int waitfor
, __unused vfs_context_t context
)
599 if (vp
->v_type
== VCHR
)
602 * Flush all dirty buffers associated with a block device.
604 buf_flushdirtyblks(vp
, waitfor
== MNT_WAIT
, 0, (char *)"spec_fsync");
611 struct vnop_fsync_args
/* {
614 vfs_context_t a_context;
617 return spec_fsync_internal(ap
->a_vp
, ap
->a_waitfor
, ap
->a_context
);
621 * Just call the device strategy routine
623 extern int hard_throttle_on_root
;
626 #define LOWPRI_DELAY_MSECS 200
627 #define LOWPRI_WINDOW_MSECS 200
629 int lowpri_IO_window_msecs
= LOWPRI_WINDOW_MSECS
;
630 int lowpri_IO_delay_msecs
= LOWPRI_DELAY_MSECS
;
632 struct timeval last_normal_IO_timestamp
;
633 struct timeval last_lowpri_IO_timestamp
;
634 struct timeval lowpri_IO_window
= { 0, LOWPRI_WINDOW_MSECS
* 1000 };
638 struct vnop_strategy_args
/* {
646 struct timeval elapsed
;
649 bdev
= buf_device(bp
);
650 bflags
= buf_flags(bp
);
657 if (bflags
& B_ASYNC
)
662 else if (bflags
& B_PAGEIO
)
665 KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_DKRW
, code
) | DBG_FUNC_NONE
,
666 (unsigned int)bp
, bdev
, (int)buf_blkno(bp
), buf_count(bp
), 0);
668 if (((bflags
& (B_PAGEIO
| B_READ
)) == (B_PAGEIO
| B_READ
)) &&
669 (buf_vnode(bp
)->v_mount
->mnt_kern_flag
& MNTK_ROOTDEV
))
670 hard_throttle_on_root
= 1;
672 if ( lowpri_IO_delay_msecs
&& lowpri_IO_window_msecs
) {
675 if ( (p
== NULL
) || !(p
->p_lflag
& P_LLOW_PRI_IO
)) {
676 if (!(p
->p_lflag
& P_LBACKGROUND_IO
))
677 microuptime(&last_normal_IO_timestamp
);
679 microuptime(&last_lowpri_IO_timestamp
);
681 elapsed
= last_lowpri_IO_timestamp
;
682 timevalsub(&elapsed
, &last_normal_IO_timestamp
);
684 lowpri_IO_window
.tv_sec
= lowpri_IO_window_msecs
/ 1000;
685 lowpri_IO_window
.tv_usec
= (lowpri_IO_window_msecs
% 1000) * 1000;
687 if (timevalcmp(&elapsed
, &lowpri_IO_window
, <)) {
691 * I'd really like to do the IOSleep here, but
692 * we may be holding all kinds of filesystem related locks
693 * and the pages for this I/O marked 'busy'...
694 * we don't want to cause a normal task to block on
695 * one of these locks while we're throttling a task marked
696 * for low priority I/O... we'll mark the uthread and
697 * do the delay just before we return from the system
698 * call that triggered this I/O or from vnode_pagein
700 ut
= get_bsdthread_info(current_thread());
701 ut
->uu_lowpri_delay
= lowpri_IO_delay_msecs
;
705 (*bdevsw
[major(bdev
)].d_strategy
)(bp
);
712 * This is a noop, simply returning what one has been given.
715 spec_blockmap(__unused
struct vnop_blockmap_args
*ap
)
722 * Device close routine
726 struct vnop_close_args
/* {
729 vfs_context_t a_context;
732 register struct vnode
*vp
= ap
->a_vp
;
733 dev_t dev
= vp
->v_rdev
;
734 int (*devclose
)(dev_t
, int, int, struct proc
*);
736 struct proc
*p
= vfs_context_proc(ap
->a_context
);
738 switch (vp
->v_type
) {
742 * Hack: a tty device that is a controlling terminal
743 * has a reference from the session structure.
744 * We cannot easily tell that a character device is
745 * a controlling terminal, unless it is the closing
746 * process' controlling terminal. In that case,
747 * if the reference count is 2 (this last descriptor
748 * plus the session), release the reference from the session.
750 if (vcount(vp
) == 2 && p
&&
751 vp
== p
->p_session
->s_ttyvp
) {
752 p
->p_session
->s_ttyvp
= NULL
;
756 * close on last reference.
760 devclose
= cdevsw
[major(dev
)].d_close
;
765 #ifdef DEVFS_IMPLEMENTS_LOCKING
767 * On last close of a block device (that isn't mounted)
768 * we must invalidate any in core blocks, so that
769 * we can, for instance, change floppy disks.
771 if ((error
= spec_fsync_internal(vp
, MNT_WAIT
, ap
->a_context
)))
774 error
= buf_invalidateblks(vp
, BUF_WRITE_DATA
, 0, 0);
778 * Since every use (buffer, vnode, swap, blockmap)
779 * holds a reference to the vnode, and because we mark
780 * any other vnodes that alias this device, when the
781 * sum of the reference counts on all the aliased
782 * vnodes descends to one, we are on last close.
786 #else /* DEVFS_IMPLEMENTS_LOCKING */
788 * Since every use (buffer, vnode, swap, blockmap)
789 * holds a reference to the vnode, and because we mark
790 * any other vnodes that alias this device, when the
791 * sum of the reference counts on all the aliased
792 * vnodes descends to one, we are on last close.
798 * On last close of a block device (that isn't mounted)
799 * we must invalidate any in core blocks, so that
800 * we can, for instance, change floppy disks.
802 if ((error
= spec_fsync_internal(vp
, MNT_WAIT
, ap
->a_context
)))
805 error
= buf_invalidateblks(vp
, BUF_WRITE_DATA
, 0, 0);
808 #endif /* DEVFS_IMPLEMENTS_LOCKING */
809 devclose
= bdevsw
[major(dev
)].d_close
;
814 panic("spec_close: not special");
817 return ((*devclose
)(dev
, ap
->a_fflag
, mode
, p
));
821 * Return POSIX pathconf information applicable to special devices.
825 struct vnop_pathconf_args
/* {
829 vfs_context_t a_context;
833 switch (ap
->a_name
) {
835 *ap
->a_retval
= LINK_MAX
;
838 *ap
->a_retval
= MAX_CANON
;
841 *ap
->a_retval
= MAX_INPUT
;
844 *ap
->a_retval
= PIPE_BUF
;
846 case _PC_CHOWN_RESTRICTED
:
850 *ap
->a_retval
= _POSIX_VDISABLE
;
859 spec_devblocksize(ap
)
860 struct vnop_devblocksize_args
/* {
865 *ap
->a_retval
= (ap
->a_vp
->v_specsize
);
870 * Special device failed operation
873 spec_ebadf(__unused
void *dummy
)
880 * Special device bad operation
886 panic("spec_badop called");
890 /* Blktooff derives file offset from logical block number */
893 struct vnop_blktooff_args
/* {
899 register struct vnode
*vp
= ap
->a_vp
;
901 switch (vp
->v_type
) {
903 *ap
->a_offset
= (off_t
)-1; /* failure */
907 printf("spec_blktooff: not implemented for VBLK\n");
908 *ap
->a_offset
= (off_t
)-1; /* failure */
912 panic("spec_blktooff type");
919 /* Offtoblk derives logical block number from file offset */
922 struct vnop_offtoblk_args
/* {
928 register struct vnode
*vp
= ap
->a_vp
;
930 switch (vp
->v_type
) {
932 *ap
->a_lblkno
= (daddr64_t
)-1; /* failure */
936 printf("spec_offtoblk: not implemented for VBLK\n");
937 *ap
->a_lblkno
= (daddr64_t
)-1; /* failure */
941 panic("spec_offtoblk type");