]> git.saurik.com Git - apple/xnu.git/blob - bsd/miscfs/specfs/spec_vnops.c
52eea7f8115fb3ef3652f3643beb7d0379bf4522
[apple/xnu.git] / bsd / miscfs / specfs / spec_vnops.c
1 /*
2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23 /*
24 * Copyright (c) 1989, 1993, 1995
25 * The Regents of the University of California. All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * @(#)spec_vnops.c 8.14 (Berkeley) 5/21/95
56 */
57
58 #include <sys/param.h>
59 #include <sys/proc.h>
60 #include <sys/systm.h>
61 #include <sys/kernel.h>
62 #include <sys/conf.h>
63 #include <sys/buf.h>
64 #include <sys/mount.h>
65 #include <sys/namei.h>
66 #include <sys/vnode.h>
67 #include <sys/stat.h>
68 #include <sys/errno.h>
69 #include <sys/ioctl.h>
70 #include <sys/file.h>
71 #include <sys/malloc.h>
72 #include <dev/disk.h>
73 #include <miscfs/specfs/specdev.h>
74 #include <vfs/vfs_support.h>
75
76 #include <sys/kdebug.h>
77
78 struct vnode *speclisth[SPECHSZ];
79
80 /* symbolic sleep message strings for devices */
81 char devopn[] = "devopn";
82 char devio[] = "devio";
83 char devwait[] = "devwait";
84 char devin[] = "devin";
85 char devout[] = "devout";
86 char devioc[] = "devioc";
87 char devcls[] = "devcls";
88
89 #define VOPFUNC int (*)(void *)
90
91 int (**spec_vnodeop_p)(void *);
92 struct vnodeopv_entry_desc spec_vnodeop_entries[] = {
93 { &vop_default_desc, (VOPFUNC)vn_default_error },
94 { &vop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */
95 { &vop_create_desc, (VOPFUNC)err_create }, /* create */
96 { &vop_mknod_desc, (VOPFUNC)err_mknod }, /* mknod */
97 { &vop_open_desc, (VOPFUNC)spec_open }, /* open */
98 { &vop_close_desc, (VOPFUNC)spec_close }, /* close */
99 { &vop_access_desc, (VOPFUNC)spec_access }, /* access */
100 { &vop_getattr_desc, (VOPFUNC)spec_getattr }, /* getattr */
101 { &vop_setattr_desc, (VOPFUNC)spec_setattr }, /* setattr */
102 { &vop_read_desc, (VOPFUNC)spec_read }, /* read */
103 { &vop_write_desc, (VOPFUNC)spec_write }, /* write */
104 { &vop_lease_desc, (VOPFUNC)nop_lease }, /* lease */
105 { &vop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */
106 { &vop_select_desc, (VOPFUNC)spec_select }, /* select */
107 { &vop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */
108 { &vop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */
109 { &vop_fsync_desc, (VOPFUNC)spec_fsync }, /* fsync */
110 { &vop_seek_desc, (VOPFUNC)err_seek }, /* seek */
111 { &vop_remove_desc, (VOPFUNC)err_remove }, /* remove */
112 { &vop_link_desc, (VOPFUNC)err_link }, /* link */
113 { &vop_rename_desc, (VOPFUNC)err_rename }, /* rename */
114 { &vop_mkdir_desc, (VOPFUNC)err_mkdir }, /* mkdir */
115 { &vop_rmdir_desc, (VOPFUNC)err_rmdir }, /* rmdir */
116 { &vop_symlink_desc, (VOPFUNC)err_symlink }, /* symlink */
117 { &vop_readdir_desc, (VOPFUNC)err_readdir }, /* readdir */
118 { &vop_readlink_desc, (VOPFUNC)err_readlink }, /* readlink */
119 { &vop_abortop_desc, (VOPFUNC)err_abortop }, /* abortop */
120 { &vop_inactive_desc, (VOPFUNC)nop_inactive }, /* inactive */
121 { &vop_reclaim_desc, (VOPFUNC)nop_reclaim }, /* reclaim */
122 { &vop_lock_desc, (VOPFUNC)nop_lock }, /* lock */
123 { &vop_unlock_desc, (VOPFUNC)nop_unlock }, /* unlock */
124 { &vop_bmap_desc, (VOPFUNC)spec_bmap }, /* bmap */
125 { &vop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */
126 { &vop_print_desc, (VOPFUNC)spec_print }, /* print */
127 { &vop_islocked_desc, (VOPFUNC)nop_islocked }, /* islocked */
128 { &vop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */
129 { &vop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
130 { &vop_blkatoff_desc, (VOPFUNC)err_blkatoff }, /* blkatoff */
131 { &vop_valloc_desc, (VOPFUNC)err_valloc }, /* valloc */
132 { &vop_vfree_desc, (VOPFUNC)err_vfree }, /* vfree */
133 { &vop_truncate_desc, (VOPFUNC)nop_truncate }, /* truncate */
134 { &vop_update_desc, (VOPFUNC)nop_update }, /* update */
135 { &vop_bwrite_desc, (VOPFUNC)spec_bwrite }, /* bwrite */
136 { &vop_devblocksize_desc, (VOPFUNC)spec_devblocksize }, /* devblocksize */
137 { &vop_pagein_desc, (VOPFUNC)err_pagein }, /* Pagein */
138 { &vop_pageout_desc, (VOPFUNC)err_pageout }, /* Pageout */
139 { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */
140 { &vop_blktooff_desc, (VOPFUNC)spec_blktooff }, /* blktooff */
141 { &vop_offtoblk_desc, (VOPFUNC)spec_offtoblk }, /* offtoblk */
142 { &vop_cmap_desc, (VOPFUNC)spec_cmap }, /* cmap */
143 { (struct vnodeop_desc*)NULL, (int(*)())NULL }
144 };
145 struct vnodeopv_desc spec_vnodeop_opv_desc =
146 { &spec_vnodeop_p, spec_vnodeop_entries };
147
148 /*
149 * Trivial lookup routine that always fails.
150 */
151 int
152 spec_lookup(ap)
153 struct vop_lookup_args /* {
154 struct vnode *a_dvp;
155 struct vnode **a_vpp;
156 struct componentname *a_cnp;
157 } */ *ap;
158 {
159
160 *ap->a_vpp = NULL;
161 return (ENOTDIR);
162 }
163
164 void
165 set_blocksize(struct vnode *vp, dev_t dev)
166 {
167 int (*size)();
168 int rsize;
169
170 if ((major(dev) < nblkdev) && (size = bdevsw[major(dev)].d_psize)) {
171 rsize = (*size)(dev);
172 if (rsize <= 0) /* did size fail? */
173 vp->v_specsize = DEV_BSIZE;
174 else
175 vp->v_specsize = rsize;
176 }
177 else
178 vp->v_specsize = DEV_BSIZE;
179 }
180
181 void
182 set_fsblocksize(struct vnode *vp)
183 {
184
185 if (vp->v_type == VBLK) {
186 dev_t dev = (dev_t)vp->v_rdev;
187 int maj = major(dev);
188
189 if ((u_int)maj >= nblkdev)
190 return;
191
192 set_blocksize(vp, dev);
193 }
194
195 }
196
197
198 /*
199 * Open a special file.
200 */
201 /* ARGSUSED */
202 spec_open(ap)
203 struct vop_open_args /* {
204 struct vnode *a_vp;
205 int a_mode;
206 struct ucred *a_cred;
207 struct proc *a_p;
208 } */ *ap;
209 {
210 struct proc *p = ap->a_p;
211 struct vnode *bvp, *vp = ap->a_vp;
212 dev_t bdev, dev = (dev_t)vp->v_rdev;
213 int maj = major(dev);
214 int error;
215
216 /*
217 * Don't allow open if fs is mounted -nodev.
218 */
219 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
220 return (ENXIO);
221
222 switch (vp->v_type) {
223
224 case VCHR:
225 if ((u_int)maj >= nchrdev)
226 return (ENXIO);
227 if (ap->a_cred != FSCRED && (ap->a_mode & FWRITE)) {
228 /*
229 * When running in very secure mode, do not allow
230 * opens for writing of any disk character devices.
231 */
232 if (securelevel >= 2 && isdisk(dev, VCHR))
233 return (EPERM);
234 /*
235 * When running in secure mode, do not allow opens
236 * for writing of /dev/mem, /dev/kmem, or character
237 * devices whose corresponding block devices are
238 * currently mounted.
239 */
240 if (securelevel >= 1) {
241 if ((bdev = chrtoblk(dev)) != NODEV &&
242 vfinddev(bdev, VBLK, &bvp) &&
243 bvp->v_usecount > 0 &&
244 (error = vfs_mountedon(bvp)))
245 return (error);
246 if (iskmemdev(dev))
247 return (EPERM);
248 }
249 }
250 if (cdevsw[maj].d_type == D_TTY)
251 vp->v_flag |= VISTTY;
252 VOP_UNLOCK(vp, 0, p);
253 error = (*cdevsw[maj].d_open)(dev, ap->a_mode, S_IFCHR, p);
254 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
255 return (error);
256
257 case VBLK:
258 if ((u_int)maj >= nblkdev)
259 return (ENXIO);
260 /*
261 * When running in very secure mode, do not allow
262 * opens for writing of any disk block devices.
263 */
264 if (securelevel >= 2 && ap->a_cred != FSCRED &&
265 (ap->a_mode & FWRITE) && bdevsw[maj].d_type == D_DISK)
266 return (EPERM);
267 /*
268 * Do not allow opens of block devices that are
269 * currently mounted.
270 */
271 if (error = vfs_mountedon(vp))
272 return (error);
273 error = (*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, p);
274 if (!error) {
275 set_blocksize(vp, dev);
276 }
277 return(error);
278 }
279 return (0);
280 }
281
282 /*
283 * Vnode op for read
284 */
285 /* ARGSUSED */
286 spec_read(ap)
287 struct vop_read_args /* {
288 struct vnode *a_vp;
289 struct uio *a_uio;
290 int a_ioflag;
291 struct ucred *a_cred;
292 } */ *ap;
293 {
294 register struct vnode *vp = ap->a_vp;
295 register struct uio *uio = ap->a_uio;
296 struct proc *p = uio->uio_procp;
297 struct buf *bp;
298 daddr_t bn, nextbn;
299 long bsize, bscale;
300 int devBlockSize=0;
301 int n, on, majordev, (*ioctl)();
302 int error = 0;
303 dev_t dev;
304
305 #if DIAGNOSTIC
306 if (uio->uio_rw != UIO_READ)
307 panic("spec_read mode");
308 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != current_proc())
309 panic("spec_read proc");
310 #endif
311 if (uio->uio_resid == 0)
312 return (0);
313
314 switch (vp->v_type) {
315
316 case VCHR:
317 VOP_UNLOCK(vp, 0, p);
318 error = (*cdevsw[major(vp->v_rdev)].d_read)
319 (vp->v_rdev, uio, ap->a_ioflag);
320 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
321 return (error);
322
323 case VBLK:
324 if (uio->uio_offset < 0)
325 return (EINVAL);
326
327 dev = vp->v_rdev;
328
329 devBlockSize = vp->v_specsize;
330
331 if (devBlockSize > PAGE_SIZE)
332 return (EINVAL);
333
334 bscale = PAGE_SIZE / devBlockSize;
335 bsize = bscale * devBlockSize;
336
337 do {
338 on = uio->uio_offset % bsize;
339
340 bn = (uio->uio_offset / devBlockSize) &~ (bscale - 1);
341
342 if (vp->v_lastr + bscale == bn) {
343 nextbn = bn + bscale;
344 error = breadn(vp, bn, (int)bsize, &nextbn,
345 (int *)&bsize, 1, NOCRED, &bp);
346 } else
347 error = bread(vp, bn, (int)bsize, NOCRED, &bp);
348
349 vp->v_lastr = bn;
350 n = bsize - bp->b_resid;
351 if ((on > n) || error) {
352 if (!error)
353 error = EINVAL;
354 brelse(bp);
355 return (error);
356 }
357 n = min((unsigned)(n - on), uio->uio_resid);
358
359 error = uiomove((char *)bp->b_data + on, n, uio);
360 if (n + on == bsize)
361 bp->b_flags |= B_AGE;
362 brelse(bp);
363 } while (error == 0 && uio->uio_resid > 0 && n != 0);
364 return (error);
365
366 default:
367 panic("spec_read type");
368 }
369 /* NOTREACHED */
370 }
371
372 /*
373 * Vnode op for write
374 */
375 /* ARGSUSED */
376 spec_write(ap)
377 struct vop_write_args /* {
378 struct vnode *a_vp;
379 struct uio *a_uio;
380 int a_ioflag;
381 struct ucred *a_cred;
382 } */ *ap;
383 {
384 register struct vnode *vp = ap->a_vp;
385 register struct uio *uio = ap->a_uio;
386 struct proc *p = uio->uio_procp;
387 struct buf *bp;
388 daddr_t bn;
389 int bsize, blkmask, bscale;
390 register int io_sync;
391 register int io_size;
392 int devBlockSize=0;
393 register int n, on;
394 int error = 0;
395 dev_t dev;
396
397 #if DIAGNOSTIC
398 if (uio->uio_rw != UIO_WRITE)
399 panic("spec_write mode");
400 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != current_proc())
401 panic("spec_write proc");
402 #endif
403
404 switch (vp->v_type) {
405
406 case VCHR:
407 VOP_UNLOCK(vp, 0, p);
408 error = (*cdevsw[major(vp->v_rdev)].d_write)
409 (vp->v_rdev, uio, ap->a_ioflag);
410 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
411 return (error);
412
413 case VBLK:
414 if (uio->uio_resid == 0)
415 return (0);
416 if (uio->uio_offset < 0)
417 return (EINVAL);
418
419 io_sync = (ap->a_ioflag & IO_SYNC);
420 io_size = uio->uio_resid;
421
422 dev = (vp->v_rdev);
423
424 devBlockSize = vp->v_specsize;
425 if (devBlockSize > PAGE_SIZE)
426 return(EINVAL);
427
428 bscale = PAGE_SIZE / devBlockSize;
429 blkmask = bscale - 1;
430 bsize = bscale * devBlockSize;
431
432
433 do {
434 bn = (uio->uio_offset / devBlockSize) &~ blkmask;
435 on = uio->uio_offset % bsize;
436
437 n = min((unsigned)(bsize - on), uio->uio_resid);
438
439 if (n == bsize)
440 bp = getblk(vp, bn, bsize, 0, 0, BLK_WRITE);
441 else
442 error = bread(vp, bn, bsize, NOCRED, &bp);
443
444 if (error) {
445 brelse(bp);
446 return (error);
447 }
448 n = min(n, bsize - bp->b_resid);
449
450 error = uiomove((char *)bp->b_data + on, n, uio);
451
452 bp->b_flags |= B_AGE;
453
454 if (io_sync)
455 bwrite(bp);
456 else {
457 if ((n + on) == bsize)
458 bawrite(bp);
459 else
460 bdwrite(bp);
461 }
462 } while (error == 0 && uio->uio_resid > 0 && n != 0);
463 return (error);
464
465 default:
466 panic("spec_write type");
467 }
468 /* NOTREACHED */
469 }
470
471 /*
472 * Device ioctl operation.
473 */
474 /* ARGSUSED */
475 spec_ioctl(ap)
476 struct vop_ioctl_args /* {
477 struct vnode *a_vp;
478 int a_command;
479 caddr_t a_data;
480 int a_fflag;
481 struct ucred *a_cred;
482 struct proc *a_p;
483 } */ *ap;
484 {
485 dev_t dev = ap->a_vp->v_rdev;
486
487 switch (ap->a_vp->v_type) {
488
489 case VCHR:
490 return ((*cdevsw[major(dev)].d_ioctl)(dev, ap->a_command, ap->a_data,
491 ap->a_fflag, ap->a_p));
492
493 case VBLK:
494 if (ap->a_command == 0 && (int)ap->a_data == B_TAPE)
495 if (bdevsw[major(dev)].d_type == D_TAPE)
496 return (0);
497 else
498 return (1);
499 return ((*bdevsw[major(dev)].d_ioctl)(dev, ap->a_command, ap->a_data,
500 ap->a_fflag, ap->a_p));
501
502 default:
503 panic("spec_ioctl");
504 /* NOTREACHED */
505 }
506 }
507
508 /* ARGSUSED */
509 spec_select(ap)
510 struct vop_select_args /* {
511 struct vnode *a_vp;
512 int a_which;
513 int a_fflags;
514 struct ucred *a_cred;
515 void * a_wql;
516 struct proc *a_p;
517 } */ *ap;
518 {
519 register dev_t dev;
520
521 switch (ap->a_vp->v_type) {
522
523 default:
524 return (1); /* XXX */
525
526 case VCHR:
527 dev = ap->a_vp->v_rdev;
528 return (*cdevsw[major(dev)].d_select)(dev, ap->a_which, ap->a_wql, ap->a_p);
529 }
530 }
531 /*
532 * Synch buffers associated with a block device
533 */
534 /* ARGSUSED */
535 int
536 spec_fsync(ap)
537 struct vop_fsync_args /* {
538 struct vnode *a_vp;
539 struct ucred *a_cred;
540 int a_waitfor;
541 struct proc *a_p;
542 } */ *ap;
543 {
544 register struct vnode *vp = ap->a_vp;
545 register struct buf *bp;
546 struct buf *nbp;
547 int s;
548
549 if (vp->v_type == VCHR)
550 return (0);
551 /*
552 * Flush all dirty buffers associated with a block device.
553 */
554 loop:
555 s = splbio();
556 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) {
557 nbp = bp->b_vnbufs.le_next;
558 if ((bp->b_flags & B_BUSY))
559 continue;
560 if ((bp->b_flags & B_DELWRI) == 0)
561 panic("spec_fsync: not dirty");
562 bremfree(bp);
563 bp->b_flags |= B_BUSY;
564 splx(s);
565 bawrite(bp);
566 goto loop;
567 }
568 if (ap->a_waitfor == MNT_WAIT) {
569 while (vp->v_numoutput) {
570 vp->v_flag |= VBWAIT;
571 tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "spec_fsync", 0);
572 }
573 #if DIAGNOSTIC
574 if (vp->v_dirtyblkhd.lh_first) {
575 vprint("spec_fsync: dirty", vp);
576 splx(s);
577 goto loop;
578 }
579 #endif
580 }
581 splx(s);
582 return (0);
583 }
584
585 /*
586 * Just call the device strategy routine
587 */
588 spec_strategy(ap)
589 struct vop_strategy_args /* {
590 struct buf *a_bp;
591 } */ *ap;
592 {
593 struct buf *bp;
594
595 bp = ap->a_bp;
596
597 if (kdebug_enable) {
598 int code = 0;
599
600 if (bp->b_flags & B_READ)
601 code |= DKIO_READ;
602 if (bp->b_flags & B_ASYNC)
603 code |= DKIO_ASYNC;
604
605 if (bp->b_flags & B_META)
606 code |= DKIO_META;
607 else if (bp->b_flags & (B_PGIN | B_PAGEOUT))
608 code |= DKIO_PAGING;
609
610 KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE,
611 bp, bp->b_dev, bp->b_blkno, bp->b_bcount, 0);
612 }
613 (*bdevsw[major(bp->b_dev)].d_strategy)(bp);
614 return (0);
615 }
616
617 /*
618 * This is a noop, simply returning what one has been given.
619 */
620 spec_bmap(ap)
621 struct vop_bmap_args /* {
622 struct vnode *a_vp;
623 daddr_t a_bn;
624 struct vnode **a_vpp;
625 daddr_t *a_bnp;
626 int *a_runp;
627 } */ *ap;
628 {
629
630 if (ap->a_vpp != NULL)
631 *ap->a_vpp = ap->a_vp;
632 if (ap->a_bnp != NULL)
633 *ap->a_bnp = ap->a_bn * (PAGE_SIZE / ap->a_vp->v_specsize);
634 if (ap->a_runp != NULL)
635 *ap->a_runp = (MAXPHYSIO / PAGE_SIZE) - 1;
636 return (0);
637 }
638
639 /*
640 * This is a noop, simply returning what one has been given.
641 */
642 spec_cmap(ap)
643 struct vop_cmap_args /* {
644 struct vnode *a_vp;
645 off_t a_offset;
646 size_t a_size;
647 daddr_t *a_bpn;
648 size_t *a_run;
649 void *a_poff;
650 } */ *ap;
651 {
652 return (EOPNOTSUPP);
653 }
654
655
656 /*
657 * Device close routine
658 */
659 /* ARGSUSED */
660 spec_close(ap)
661 struct vop_close_args /* {
662 struct vnode *a_vp;
663 int a_fflag;
664 struct ucred *a_cred;
665 struct proc *a_p;
666 } */ *ap;
667 {
668 register struct vnode *vp = ap->a_vp;
669 dev_t dev = vp->v_rdev;
670 int (*devclose) __P((dev_t, int, int, struct proc *));
671 int mode, error;
672
673 switch (vp->v_type) {
674
675 case VCHR:
676 /*
677 * Hack: a tty device that is a controlling terminal
678 * has a reference from the session structure.
679 * We cannot easily tell that a character device is
680 * a controlling terminal, unless it is the closing
681 * process' controlling terminal. In that case,
682 * if the reference count is 2 (this last descriptor
683 * plus the session), release the reference from the session.
684 */
685 if (vcount(vp) == 2 && ap->a_p &&
686 vp == ap->a_p->p_session->s_ttyvp) {
687 ap->a_p->p_session->s_ttyvp = NULL;
688 vrele(vp);
689 }
690 /*
691 * If the vnode is locked, then we are in the midst
692 * of forcably closing the device, otherwise we only
693 * close on last reference.
694 */
695 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
696 return (0);
697 devclose = cdevsw[major(dev)].d_close;
698 mode = S_IFCHR;
699 break;
700
701 case VBLK:
702 #ifdef DEVFS_IMPLEMENTS_LOCKING
703 /*
704 * On last close of a block device (that isn't mounted)
705 * we must invalidate any in core blocks, so that
706 * we can, for instance, change floppy disks.
707 */
708 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, ap->a_p);
709 error = vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0);
710 VOP_UNLOCK(vp, 0, ap->a_p);
711 if (error)
712 return (error);
713 /*
714 * We do not want to really close the device if it
715 * is still in use unless we are trying to close it
716 * forcibly. Since every use (buffer, vnode, swap, cmap)
717 * holds a reference to the vnode, and because we mark
718 * any other vnodes that alias this device, when the
719 * sum of the reference counts on all the aliased
720 * vnodes descends to one, we are on last close.
721 */
722 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
723 return (0);
724 #else /* DEVFS_IMPLEMENTS_LOCKING */
725 /*
726 * We do not want to really close the device if it
727 * is still in use unless we are trying to close it
728 * forcibly. Since every use (buffer, vnode, swap, cmap)
729 * holds a reference to the vnode, and because we mark
730 * any other vnodes that alias this device, when the
731 * sum of the reference counts on all the aliased
732 * vnodes descends to one, we are on last close.
733 */
734 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
735 return (0);
736
737 /*
738 * On last close of a block device (that isn't mounted)
739 * we must invalidate any in core blocks, so that
740 * we can, for instance, change floppy disks.
741 */
742 error = vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0);
743 if (error)
744 return (error);
745 #endif /* DEVFS_IMPLEMENTS_LOCKING */
746 devclose = bdevsw[major(dev)].d_close;
747 mode = S_IFBLK;
748 break;
749
750 default:
751 panic("spec_close: not special");
752 }
753
754 return ((*devclose)(dev, ap->a_fflag, mode, ap->a_p));
755 }
756
757 /*
758 * Print out the contents of a special device vnode.
759 */
760 spec_print(ap)
761 struct vop_print_args /* {
762 struct vnode *a_vp;
763 } */ *ap;
764 {
765
766 printf("tag VT_NON, dev %d, %d\n", major(ap->a_vp->v_rdev),
767 minor(ap->a_vp->v_rdev));
768 }
769
770 /*
771 * Return POSIX pathconf information applicable to special devices.
772 */
773 spec_pathconf(ap)
774 struct vop_pathconf_args /* {
775 struct vnode *a_vp;
776 int a_name;
777 int *a_retval;
778 } */ *ap;
779 {
780
781 switch (ap->a_name) {
782 case _PC_LINK_MAX:
783 *ap->a_retval = LINK_MAX;
784 return (0);
785 case _PC_MAX_CANON:
786 *ap->a_retval = MAX_CANON;
787 return (0);
788 case _PC_MAX_INPUT:
789 *ap->a_retval = MAX_INPUT;
790 return (0);
791 case _PC_PIPE_BUF:
792 *ap->a_retval = PIPE_BUF;
793 return (0);
794 case _PC_CHOWN_RESTRICTED:
795 *ap->a_retval = 1;
796 return (0);
797 case _PC_VDISABLE:
798 *ap->a_retval = _POSIX_VDISABLE;
799 return (0);
800 default:
801 return (EINVAL);
802 }
803 /* NOTREACHED */
804 }
805
806 int
807 spec_devblocksize(ap)
808 struct vop_devblocksize_args /* {
809 struct vnode *a_vp;
810 int *a_retval;
811 } */ *ap;
812 {
813 *ap->a_retval = (ap->a_vp->v_specsize);
814 return (0);
815 }
816
817 /*
818 * Special device failed operation
819 */
820 spec_ebadf()
821 {
822
823 return (EBADF);
824 }
825
826 /*
827 * Special device bad operation
828 */
829 spec_badop()
830 {
831
832 panic("spec_badop called");
833 /* NOTREACHED */
834 }
835
836 /* Blktooff derives file offset from logical block number */
837 int
838 spec_blktooff(ap)
839 struct vop_blktooff_args /* {
840 struct vnode *a_vp;
841 daddr_t a_lblkno;
842 off_t *a_offset;
843 } */ *ap;
844 {
845 register struct vnode *vp = ap->a_vp;
846
847 switch (vp->v_type) {
848 case VCHR:
849 *ap->a_offset = (off_t)-1; /* failure */
850 return (EOPNOTSUPP);
851
852 case VBLK:
853 printf("spec_blktooff: not implemented for VBLK\n");
854 *ap->a_offset = (off_t)-1; /* failure */
855 return (EOPNOTSUPP);
856
857 default:
858 panic("spec_blktooff type");
859 }
860 /* NOTREACHED */
861 }
862
863 /* Offtoblk derives logical block number from file offset */
864 int
865 spec_offtoblk(ap)
866 struct vop_offtoblk_args /* {
867 struct vnode *a_vp;
868 off_t a_offset;
869 daddr_t *a_lblkno;
870 } */ *ap;
871 {
872 register struct vnode *vp = ap->a_vp;
873
874 switch (vp->v_type) {
875 case VCHR:
876 *ap->a_lblkno = (daddr_t)-1; /* failure */
877 return (EOPNOTSUPP);
878
879 case VBLK:
880 printf("spec_offtoblk: not implemented for VBLK\n");
881 *ap->a_lblkno = (daddr_t)-1; /* failure */
882 return (EOPNOTSUPP);
883
884 default:
885 panic("spec_offtoblk type");
886 }
887 /* NOTREACHED */
888 }