]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/vm/vnode_pager.c
xnu-2050.7.9.tar.gz
[apple/xnu.git] / bsd / vm / vnode_pager.c
index a15b6dcc4516c382eabc7014e63f3dad0cf90ec9..f86ba0148f42e2d61b22a07672f8adf17919d25f 100644 (file)
@@ -52,6 +52,7 @@
 #include <sys/mount_internal.h>        /* needs internal due to fhandle_t */
 #include <sys/ubc_internal.h>
 #include <sys/lock.h>
+#include <sys/disk.h>          /* For DKIOC calls */
 
 #include <mach/mach_types.h>
 #include <mach/memory_object_types.h>
 #include <vm/vm_protos.h>
 
 
+void
+vnode_pager_throttle()
+{
+       struct uthread *ut;
+
+       ut = get_bsdthread_info(current_thread());
+
+       if (ut->uu_lowpri_window)
+               throttle_lowpri_io(TRUE);
+}
+
+
+boolean_t
+vnode_pager_isSSD(vnode_t vp)
+{
+       if (vp->v_mount->mnt_kern_flag & MNTK_SSD)
+               return (TRUE);
+       return (FALSE);
+}
+
+
 uint32_t
 vnode_pager_isinuse(struct vnode *vp)
 {
@@ -137,6 +159,85 @@ vnode_pager_get_cs_blobs(
        return KERN_SUCCESS;
 }
 
+/* 
+ * vnode_trim:
+ * Used to call the DKIOCUNMAP ioctl on the underlying disk device for the specified vnode.
+ * Trims the region at offset bytes into the file, for length bytes.
+ *
+ * Care must be taken to ensure that the vnode is sufficiently reference counted at the time this
+ * function is called; no iocounts or usecounts are taken on the vnode.
+ * This function is non-idempotent in error cases;  We cannot un-discard the blocks if only some of them
+ * are successfully discarded.
+ */
+u_int32_t vnode_trim (
+               struct vnode *vp,
+               off_t offset,
+               size_t length)
+{
+       daddr64_t io_blockno;    /* Block number corresponding to the start of the extent */
+       size_t io_bytecount;    /* Number of bytes in current extent for the specified range */
+       size_t trimmed = 0;
+       off_t current_offset = offset; 
+       size_t remaining_length = length;
+       int error = 0;
+       u_int32_t blocksize = 0;
+       struct vnode *devvp;
+       dk_extent_t extent;
+       dk_unmap_t unmap;
+
+
+       /* Get the underlying device vnode */
+       devvp = vp->v_mount->mnt_devvp;
+
+       /* Figure out the underlying device block size */
+       error  = VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, (caddr_t)&blocksize, 0, vfs_context_kernel());
+       if (error) {
+               goto trim_exit;
+       }
+
+       /* 
+        * We may not get the entire range from offset -> offset+length in a single
+        * extent from the blockmap call.  Keep looping/going until we are sure we've hit
+        * the whole range or if we encounter an error.
+        */
+       while (trimmed < length) {
+               /*
+                * VNOP_BLOCKMAP will tell us the logical to physical block number mapping for the
+                * specified offset.  It returns blocks in contiguous chunks, so if the logical range is 
+                * broken into multiple extents, it must be called multiple times, increasing the offset
+                * in each call to ensure that the entire range is covered.
+                */
+               error = VNOP_BLOCKMAP (vp, current_offset, remaining_length, 
+                               &io_blockno, &io_bytecount, NULL, VNODE_READ, NULL);
+
+               if (error) {
+                       goto trim_exit;
+               }
+               /* 
+                * We have a contiguous run.  Prepare & issue the ioctl for the device.
+                * the DKIOCUNMAP ioctl takes offset in bytes from the start of the device.
+                */
+               memset (&extent, 0, sizeof(dk_extent_t));
+               memset (&unmap, 0, sizeof(dk_unmap_t));
+               extent.offset = (uint64_t) io_blockno * (u_int64_t) blocksize;
+               extent.length = io_bytecount;
+               unmap.extents = &extent;
+               unmap.extentsCount = 1;
+               error = VNOP_IOCTL(devvp, DKIOCUNMAP, (caddr_t)&unmap, 0, vfs_context_kernel());
+
+               if (error) {
+                       goto trim_exit;
+               }
+               remaining_length = remaining_length - io_bytecount;
+               trimmed = trimmed + io_bytecount;
+               current_offset = current_offset + io_bytecount;
+       }
+trim_exit:
+
+       return error;
+
+}
+
 pager_return_t
 vnode_pageout(struct vnode *vp,
        upl_t                   upl,
@@ -179,15 +280,17 @@ vnode_pageout(struct vnode *vp,
                 * just go ahead and call vnop_pageout since
                 * it has already sorted out the dirty ranges
                 */
-               KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START, 
-                                     size, 1, 0, 0, 0);
+               KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+                       (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START, 
+                       size, 1, 0, 0, 0);
 
                if ( (error_ret = VNOP_PAGEOUT(vp, upl, upl_offset, (off_t)f_offset,
                                               (size_t)size, flags, ctx)) )
                        result = PAGER_ERROR;
 
-               KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END, 
-                                     size, 1, 0, 0, 0);
+               KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+                       (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END, 
+                       size, 1, 0, 0, 0);
 
                goto out;
        }
@@ -202,15 +305,17 @@ vnode_pageout(struct vnode *vp,
                         * via 'f_offset' and 'size' into a UPL... this allows the filesystem to first
                         * take any locks it needs, before effectively locking the pages into a UPL...
                         */
-                       KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START, 
-                                             size, (int)f_offset, 0, 0, 0);
+                       KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
+                               (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START, 
+                               size, (int)f_offset, 0, 0, 0);
 
                        if ( (error_ret = VNOP_PAGEOUT(vp, NULL, upl_offset, (off_t)f_offset,
                                                       size, flags, ctx)) ) {
                                result = PAGER_ERROR;
                        }
-                       KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END, 
-                                             size, 0, 0, 0, 0);
+                       KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+                               (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END, 
+                               size, 0, 0, 0, 0);
 
                        goto out;
                }
@@ -219,9 +324,7 @@ vnode_pageout(struct vnode *vp,
                else
                        request_flags = UPL_UBC_PAGEOUT | UPL_RET_ONLY_DIRTY;
                
-               ubc_create_upl(vp, f_offset, size, &upl, &pl, request_flags);
-
-               if (upl == (upl_t)NULL) {
+               if (ubc_create_upl(vp, f_offset, size, &upl, &pl, request_flags) != KERN_SUCCESS) {
                        result    = PAGER_ERROR;
                        error_ret = EINVAL;
                        goto out;
@@ -362,8 +465,9 @@ vnode_pageout(struct vnode *vp,
                }
                xsize = num_of_pages * PAGE_SIZE;
 
-               KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START, 
-                                     xsize, (int)f_offset, 0, 0, 0);
+               KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+                       (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START, 
+                       xsize, (int)f_offset, 0, 0, 0);
 
                if ( (error = VNOP_PAGEOUT(vp, upl, offset, (off_t)f_offset,
                                           xsize, flags, ctx)) ) {
@@ -371,8 +475,9 @@ vnode_pageout(struct vnode *vp,
                                error_ret = error;
                        result = PAGER_ERROR;
                }
-               KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END, 
-                                     xsize, 0, 0, 0, 0);
+               KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+                       (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END, 
+                       xsize, 0, 0, 0, 0);
 
                f_offset += xsize;
                offset   += xsize;
@@ -455,6 +560,8 @@ vnode_pagein(
                        error = PAGER_ABSENT;
                        goto out;
                }
+               ubc_upl_range_needed(upl, upl_offset / PAGE_SIZE, 1);
+
                upl_offset = 0;
                first_pg = 0;
                
@@ -555,14 +662,23 @@ vnode_pagein(
                                               xsize, flags, vfs_context_current())) ) {
                                /*
                                 * Usually this UPL will be aborted/committed by the lower cluster layer.
-                                * In the case of decmpfs, however, we may return an error (EAGAIN) to avoid
-                                * a deadlock with another thread already inflating the file. In that case,
-                                * we must take care of our UPL at this layer itself.
+                                *
+                                * a)   In the case of decmpfs, however, we may return an error (EAGAIN) to avoid
+                                *      a deadlock with another thread already inflating the file. 
+                                *
+                                * b)   In the case of content protection, EPERM is a valid error and we should respect it.
+                                *
+                                * In those cases, we must take care of our UPL at this layer itself.
                                 */
                                if (must_commit) {
                                        if(error == EAGAIN) {
                                                ubc_upl_abort_range(upl, (upl_offset_t) xoff, xsize, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_RESTART);
                                        }
+#if CONFIG_PROTECT
+                                       if(error == EPERM) {
+                                               ubc_upl_abort_range(upl, (upl_offset_t) xoff, xsize, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
+                                       }
+#endif
                                }
                                result = PAGER_ERROR;
                                error  = PAGER_ERROR;