]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/miscfs/specfs/spec_vnops.c
xnu-517.9.4.tar.gz
[apple/xnu.git] / bsd / miscfs / specfs / spec_vnops.c
index d5efd260bfc8a3473aca78e8f27ddef72ef01588..314464b1967e22fcf2f69267c80070f17ee19b15 100644 (file)
@@ -1,24 +1,21 @@
 /*
 /*
- * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_LICENSE_HEADER_START@
  * 
  *
  * @APPLE_LICENSE_HEADER_START@
  * 
- * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
+ * The contents of this file constitute Original Code as defined in and
+ * are subject to the Apple Public Source License Version 1.1 (the
+ * "License").  You may not use this file except in compliance with the
+ * License.  Please obtain a copy of the License at
+ * http://www.apple.com/publicsource and read it before using this file.
  * 
  * 
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- * 
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This Original Code and all software distributed under the License are
+ * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
+ * License for the specific language governing rights and limitations
+ * under the License.
  * 
  * @APPLE_LICENSE_HEADER_END@
  */
  * 
  * @APPLE_LICENSE_HEADER_END@
  */
@@ -72,8 +69,9 @@
 #include <sys/ioctl.h>
 #include <sys/file.h>
 #include <sys/malloc.h>
 #include <sys/ioctl.h>
 #include <sys/file.h>
 #include <sys/malloc.h>
-#include <dev/disk.h>
+#include <sys/disk.h>
 #include <miscfs/specfs/specdev.h>
 #include <miscfs/specfs/specdev.h>
+#include <miscfs/specfs/lockf.h>
 #include <vfs/vfs_support.h>
 
 #include <sys/kdebug.h>
 #include <vfs/vfs_support.h>
 
 #include <sys/kdebug.h>
@@ -129,7 +127,7 @@ struct vnodeopv_entry_desc spec_vnodeop_entries[] = {
        { &vop_print_desc, (VOPFUNC)spec_print },               /* print */
        { &vop_islocked_desc, (VOPFUNC)nop_islocked },          /* islocked */
        { &vop_pathconf_desc, (VOPFUNC)spec_pathconf },         /* pathconf */
        { &vop_print_desc, (VOPFUNC)spec_print },               /* print */
        { &vop_islocked_desc, (VOPFUNC)nop_islocked },          /* islocked */
        { &vop_pathconf_desc, (VOPFUNC)spec_pathconf },         /* pathconf */
-       { &vop_advlock_desc, (VOPFUNC)err_advlock },            /* advlock */
+       { &vop_advlock_desc, (VOPFUNC)spec_advlock },           /* advlock */
        { &vop_blkatoff_desc, (VOPFUNC)err_blkatoff },          /* blkatoff */
        { &vop_valloc_desc, (VOPFUNC)err_valloc },              /* valloc */
        { &vop_vfree_desc, (VOPFUNC)err_vfree },                /* vfree */
        { &vop_blkatoff_desc, (VOPFUNC)err_blkatoff },          /* blkatoff */
        { &vop_valloc_desc, (VOPFUNC)err_valloc },              /* valloc */
        { &vop_vfree_desc, (VOPFUNC)err_vfree },                /* vfree */
@@ -275,7 +273,30 @@ spec_open(ap)
                        return (error);
                error = (*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, p);
                if (!error) {
                        return (error);
                error = (*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, p);
                if (!error) {
+                   u_int64_t blkcnt;
+                   u_int32_t blksize;
+
                    set_blocksize(vp, dev);
                    set_blocksize(vp, dev);
+
+                   /*
+                    * Cache the size in bytes of the block device for later
+                    * use by spec_write().
+                    */
+                   vp->v_specdevsize = (u_int64_t)0;   /* Default: Can't get */
+                   if (!VOP_IOCTL(vp, DKIOCGETBLOCKSIZE, (caddr_t)&blksize, 0, NOCRED, p)) {
+                       /* Switch to 512 byte sectors (temporarily) */
+                       u_int32_t size512 = 512;
+
+                       if (!VOP_IOCTL(vp, DKIOCSETBLOCKSIZE, (caddr_t)&size512, FWRITE, NOCRED, p)) {
+                           /* Get the number of 512 byte physical blocks. */
+                           if (!VOP_IOCTL(vp, DKIOCGETBLOCKCOUNT, (caddr_t)&blkcnt, 0, NOCRED, p)) {
+                               vp->v_specdevsize = blkcnt * (u_int64_t)size512;
+                           }
+                       }
+                       /* If it doesn't set back, we can't recover */
+                       if (VOP_IOCTL(vp, DKIOCSETBLOCKSIZE, (caddr_t)&blksize, FWRITE, NOCRED, p))
+                           error = ENXIO;
+                   }
                }
                return(error);
        }
                }
                return(error);
        }
@@ -439,11 +460,35 @@ spec_write(ap)
 
                        n = min((unsigned)(bsize - on), uio->uio_resid);
 
 
                        n = min((unsigned)(bsize - on), uio->uio_resid);
 
+                       /*
+                        * Use getblk() as an optimization IFF:
+                        *
+                        * 1)   We are reading exactly a block on a block
+                        *      aligned boundary
+                        * 2)   We know the size of the device from spec_open
+                        * 3)   The read doesn't span the end of the device
+                        *
+                        * Otherwise, we fall back on bread().
+                        */
+                       if (n == bsize &&
+                           vp->v_specdevsize != (u_int64_t)0 &&
+                           (uio->uio_offset + (u_int64_t)n) > vp->v_specdevsize) {
+                           /* reduce the size of the read to what is there */
+                           n = (uio->uio_offset + (u_int64_t)n) - vp->v_specdevsize;
+                       }
+
                        if (n == bsize)
                                bp = getblk(vp, bn, bsize, 0, 0, BLK_WRITE);
                        else
                                error = bread(vp, bn, bsize, NOCRED, &bp);
 
                        if (n == bsize)
                                bp = getblk(vp, bn, bsize, 0, 0, BLK_WRITE);
                        else
                                error = bread(vp, bn, bsize, NOCRED, &bp);
 
+                       /* Translate downstream error for upstream, if needed */
+                       if (!error) {
+                               error = bp->b_error;
+                               if (!error && (bp->b_flags & B_ERROR) != 0) {
+                                       error = EIO;
+                               }
+                       }
                        if (error) {
                                brelse(bp);
                                return (error);
                        if (error) {
                                brelse(bp);
                                return (error);
@@ -595,6 +640,7 @@ spec_strategy(ap)
        } */ *ap;
 {
         struct buf *bp;
        } */ *ap;
 {
         struct buf *bp;
+       extern int hard_throttle_on_root;
 
         bp = ap->a_bp;
 
 
         bp = ap->a_bp;
 
@@ -612,12 +658,109 @@ spec_strategy(ap)
                 code |= DKIO_PAGING;
 
             KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE,
                 code |= DKIO_PAGING;
 
             KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE,
-                                bp, bp->b_dev, bp->b_blkno, bp->b_bcount, 0);
+                               (unsigned int)bp, bp->b_dev, bp->b_blkno, bp->b_bcount, 0);
         }
         }
+       if ((bp->b_flags & B_PGIN) && (bp->b_vp->v_mount->mnt_kern_flag & MNTK_ROOTDEV))
+              hard_throttle_on_root = 1;
+
         (*bdevsw[major(bp->b_dev)].d_strategy)(bp);
         return (0);
 }
 
         (*bdevsw[major(bp->b_dev)].d_strategy)(bp);
         return (0);
 }
 
+/*
+ * Advisory record locking support
+ */
+int
+spec_advlock(ap)
+       struct vop_advlock_args /* {
+               struct vnode *a_vp;
+               caddr_t  a_id;
+               int  a_op;
+               struct flock *a_fl;
+               int  a_flags;
+       } */ *ap;
+{
+       register struct flock *fl = ap->a_fl;
+       register struct lockf *lock;
+       off_t start, end;
+       int error;
+
+       /*
+        * Avoid the common case of unlocking when inode has no locks.
+        */
+       if (ap->a_vp->v_specinfo->si_lockf == (struct lockf *)0) {
+               if (ap->a_op != F_SETLK) {
+                       fl->l_type = F_UNLCK;
+                       return (0);
+               }
+       }
+       /*
+        * Convert the flock structure into a start and end.
+        */
+       switch (fl->l_whence) {
+
+       case SEEK_SET:
+       case SEEK_CUR:
+               /*
+                * Caller is responsible for adding any necessary offset
+                * when SEEK_CUR is used.
+                */
+               start = fl->l_start;
+               break;
+
+       case SEEK_END:
+               start = ap->a_vp->v_specinfo->si_devsize + fl->l_start;
+               break;
+
+       default:
+               return (EINVAL);
+       }
+       if (fl->l_len == 0)
+               end = -1;
+       else if (fl->l_len > 0)
+               end = start + fl->l_len - 1;
+       else { /* l_len is negative */
+               end = start - 1;
+               start += fl->l_len;
+       }
+       if (start < 0)
+               return (EINVAL);
+       /*
+        * Create the lockf structure
+        */
+       MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
+       lock->lf_start = start;
+       lock->lf_end = end;
+       lock->lf_id = ap->a_id;
+       lock->lf_specinfo = ap->a_vp->v_specinfo;
+       lock->lf_type = fl->l_type;
+       lock->lf_next = (struct lockf *)0;
+       TAILQ_INIT(&lock->lf_blkhd);
+       lock->lf_flags = ap->a_flags;
+       /*
+        * Do the requested operation.
+        */
+       switch(ap->a_op) {
+       case F_SETLK:
+               return (spec_lf_setlock(lock));
+
+       case F_UNLCK:
+               error = spec_lf_clearlock(lock);
+               FREE(lock, M_LOCKF);
+               return (error);
+
+       case F_GETLK:
+               error = spec_lf_getlock(lock, fl);
+               FREE(lock, M_LOCKF);
+               return (error);
+       
+       default:
+               _FREE(lock, M_LOCKF);
+               return (EINVAL);
+       }
+       /* NOTREACHED */
+}
+
 /*
  * This is a noop, simply returning what one has been given.
  */
 /*
  * This is a noop, simply returning what one has been given.
  */