]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/vm/vnode_pager.c
xnu-1504.7.4.tar.gz
[apple/xnu.git] / bsd / vm / vnode_pager.c
index f6969d1bc3bbad939fe6175325666d28d039de65..a15b6dcc4516c382eabc7014e63f3dad0cf90ec9 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
 
 #include <mach/mach_types.h>
 #include <mach/memory_object_types.h>
+#include <mach/memory_object_control.h>
+#include <mach/vm_map.h>
+#include <mach/mach_vm.h>
+#include <mach/upl.h>
+#include <mach/sdt.h>
 
 #include <vm/vm_map.h>
 #include <vm/vm_kern.h>
 
 #include <vm/vm_protos.h>
 
-unsigned int vp_pagein=0;
-unsigned int vp_pgodirty=0;
-unsigned int vp_pgoclean=0;
-unsigned int dp_pgouts=0;      /* Default pager pageouts */
-unsigned int dp_pgins=0;       /* Default pager pageins */
+
+uint32_t
+vnode_pager_isinuse(struct vnode *vp)
+{
+       if (vp->v_usecount > vp->v_kusecount)
+               return (1);
+       return (0);
+}
+
+uint32_t
+vnode_pager_return_hard_throttle_limit(struct vnode *vp, uint32_t *limit, uint32_t hard_throttle)
+{
+       return(cluster_hard_throttle_limit(vp, limit, hard_throttle));
+}
 
 vm_object_offset_t
 vnode_pager_get_filesize(struct vnode *vp)
@@ -88,16 +102,50 @@ vnode_pager_get_filesize(struct vnode *vp)
        return (vm_object_offset_t) ubc_getsize(vp);
 }
 
+kern_return_t
+vnode_pager_get_pathname(
+       struct vnode    *vp,
+       char            *pathname,
+       vm_size_t       *length_p)
+{
+       int     error, len;
+
+       len = (int) *length_p;
+       error = vn_getpath(vp, pathname, &len);
+       if (error != 0) {
+               return KERN_FAILURE;
+       }
+       *length_p = (vm_size_t) len;
+       return KERN_SUCCESS;
+}
+
+kern_return_t
+vnode_pager_get_filename(
+       struct vnode    *vp,
+       const char      **filename)
+{
+       *filename = vp->v_name;
+       return KERN_SUCCESS;
+}
+
+kern_return_t
+vnode_pager_get_cs_blobs(
+       struct vnode    *vp,
+       void            **blobs)
+{
+       *blobs = ubc_get_cs_blobs(vp);
+       return KERN_SUCCESS;
+}
+
 pager_return_t
 vnode_pageout(struct vnode *vp,
        upl_t                   upl,
-       vm_offset_t             upl_offset,
+       upl_offset_t            upl_offset,
        vm_object_offset_t      f_offset,
-       vm_size_t               size,
+       upl_size_t              size,
        int                     flags,
        int                     *errorp)
 {
-       struct proc     *p = current_proc();
        int             result = PAGER_SUCCESS;
        int             error = 0;
        int             error_ret = 0;
@@ -105,12 +153,9 @@ vnode_pageout(struct vnode *vp,
        int isize;
        int pg_index;
        int base_index;
-       int offset;
+       upl_offset_t offset;
        upl_page_info_t *pl;
-       struct vfs_context context;
-
-       context.vc_proc = p;
-       context.vc_ucred = kauth_cred_get();
+       vfs_context_t ctx = vfs_context_current();      /* pager context */
 
        isize = (int)size;
 
@@ -119,9 +164,8 @@ vnode_pageout(struct vnode *vp,
                error_ret = EINVAL;
                goto out;
        }
-       UBCINFOCHECK("vnode_pageout", vp);
 
-       if (UBCINVALID(vp)) {
+       if (UBCINFOEXISTS(vp) == 0) {
                result    = PAGER_ERROR;
                error_ret = EINVAL;
 
@@ -135,13 +179,11 @@ vnode_pageout(struct vnode *vp,
                 * just go ahead and call vnop_pageout since
                 * it has already sorted out the dirty ranges
                 */
-               dp_pgouts++;
-
                KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START, 
                                      size, 1, 0, 0, 0);
 
                if ( (error_ret = VNOP_PAGEOUT(vp, upl, upl_offset, (off_t)f_offset,
-                                              (size_t)size, flags, &context)) )
+                                              (size_t)size, flags, ctx)) )
                        result = PAGER_ERROR;
 
                KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END, 
@@ -149,6 +191,45 @@ vnode_pageout(struct vnode *vp,
 
                goto out;
        }
+       if (upl == NULL) {
+               int                     request_flags;
+
+               if (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_PAGEOUTV2) {
+                       /*
+                        * filesystem has requested the new form of VNOP_PAGEOUT for file
+                        * backed objects... we will not grab the UPL befofe calling VNOP_PAGEOUT...
+                        * it is the fileystem's responsibility to grab the range we're denoting
+                        * via 'f_offset' and 'size' into a UPL... this allows the filesystem to first
+                        * take any locks it needs, before effectively locking the pages into a UPL...
+                        */
+                       KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START, 
+                                             size, (int)f_offset, 0, 0, 0);
+
+                       if ( (error_ret = VNOP_PAGEOUT(vp, NULL, upl_offset, (off_t)f_offset,
+                                                      size, flags, ctx)) ) {
+                               result = PAGER_ERROR;
+                       }
+                       KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END, 
+                                             size, 0, 0, 0, 0);
+
+                       goto out;
+               }
+               if (flags & UPL_MSYNC)
+                       request_flags = UPL_UBC_MSYNC | UPL_RET_ONLY_DIRTY;
+               else
+                       request_flags = UPL_UBC_PAGEOUT | UPL_RET_ONLY_DIRTY;
+               
+               ubc_create_upl(vp, f_offset, size, &upl, &pl, request_flags);
+
+               if (upl == (upl_t)NULL) {
+                       result    = PAGER_ERROR;
+                       error_ret = EINVAL;
+                       goto out;
+               }
+               upl_offset = 0;
+       } else 
+               pl = ubc_upl_pageinfo(upl);
+
        /*
         * we come here for pageouts to 'real' files and
         * for msyncs...  the upl may not contain any
@@ -156,8 +237,6 @@ vnode_pageout(struct vnode *vp,
         * through it and find the 'runs' of dirty pages
         * to call VNOP_PAGEOUT on...
         */
-       pl = ubc_upl_pageinfo(upl);
-
        if (ubc_getsize(vp) == 0) {
                /*
                 * if the file has been effectively deleted, then
@@ -212,7 +291,7 @@ vnode_pageout(struct vnode *vp,
                        goto out;
                }
        }
-       isize = (pg_index + 1) * PAGE_SIZE;
+       isize = ((pg_index + 1) - base_index) * PAGE_SIZE;
 
        offset = upl_offset;
        pg_index = base_index;
@@ -227,8 +306,9 @@ vnode_pageout(struct vnode *vp,
                         * to get back empty slots in the UPL
                         * just skip over them
                         */
-                       offset += PAGE_SIZE;
-                       isize  -= PAGE_SIZE;
+                       f_offset += PAGE_SIZE;
+                       offset   += PAGE_SIZE;
+                       isize    -= PAGE_SIZE;
                        pg_index++;
 
                        continue;
@@ -244,16 +324,14 @@ vnode_pageout(struct vnode *vp,
                         * Note we must not sleep here if the buffer is busy - that is
                         * a lock inversion which causes deadlock.
                         */
-                       vp_pgoclean++;                  
-
 #if NFSCLIENT
                        if (vp->v_tag == VT_NFS)
                                /* check with nfs if page is OK to drop */
-                               error = nfs_buf_page_inval(vp, (off_t)(f_offset + offset));
+                               error = nfs_buf_page_inval(vp, (off_t)f_offset);
                        else
 #endif
                        {
-                               blkno = ubc_offtoblk(vp, (off_t)(f_offset + offset));
+                               blkno = ubc_offtoblk(vp, (off_t)f_offset);
                                error = buf_invalblkno(vp, blkno, 0);
                        }
                        if (error) {
@@ -266,14 +344,13 @@ vnode_pageout(struct vnode *vp,
                        } else if ( !(flags & UPL_NOCOMMIT)) {
                                ubc_upl_commit_range(upl, offset, PAGE_SIZE, UPL_COMMIT_FREE_ON_EMPTY);
                        }
-                       offset += PAGE_SIZE;
-                       isize  -= PAGE_SIZE;
+                       f_offset += PAGE_SIZE;
+                       offset   += PAGE_SIZE;
+                       isize    -= PAGE_SIZE;
                        pg_index++;
 
                        continue;
                }
-               vp_pgodirty++;
-
                num_of_pages = 1;
                xsize = isize - PAGE_SIZE;
 
@@ -286,11 +363,10 @@ vnode_pageout(struct vnode *vp,
                xsize = num_of_pages * PAGE_SIZE;
 
                KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START, 
-                                     xsize, (int)(f_offset + offset), 0, 0, 0);
+                                     xsize, (int)f_offset, 0, 0, 0);
 
-               if ( (error = VNOP_PAGEOUT(vp, upl, (vm_offset_t)offset,
-                                       (off_t)(f_offset + offset), xsize,
-                                          flags, &context)) ) {
+               if ( (error = VNOP_PAGEOUT(vp, upl, offset, (off_t)f_offset,
+                                          xsize, flags, ctx)) ) {
                        if (error_ret == 0)
                                error_ret = error;
                        result = PAGER_ERROR;
@@ -298,8 +374,9 @@ vnode_pageout(struct vnode *vp,
                KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END, 
                                      xsize, 0, 0, 0, 0);
 
-               offset += xsize;
-               isize  -= xsize;
+               f_offset += xsize;
+               offset   += xsize;
+               isize    -= xsize;
                pg_index += num_of_pages;
        }
 out:
@@ -310,19 +387,16 @@ out:
 }
 
 
-void IOSleep(int);
-
 pager_return_t
 vnode_pagein(
        struct vnode            *vp,
        upl_t                   upl,
-       vm_offset_t             upl_offset,
+       upl_offset_t            upl_offset,
        vm_object_offset_t      f_offset,
-       vm_size_t               size,
+       upl_size_t              size,
        int                     flags,
        int                     *errorp)
 {
-        struct proc     *p = current_proc();
         struct uthread *ut;
         upl_page_info_t *pl;
        int             result = PAGER_SUCCESS;
@@ -332,26 +406,49 @@ vnode_pagein(
         int             last_pg;
        int             first_pg;
         int             xsize;
-       int             abort_needed = 1;
+       int             must_commit = 1;
 
+       if (flags & UPL_NOCOMMIT)
+               must_commit = 0;
 
-       UBCINFOCHECK("vnode_pagein", vp);
-
-       if (UBCINVALID(vp)) {
+       if (UBCINFOEXISTS(vp) == 0) {
                result = PAGER_ERROR;
                error  = PAGER_ERROR;
-               if (upl && !(flags & UPL_NOCOMMIT)) {
+
+               if (upl && must_commit)
                        ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
-               }
+
                goto out;
        }
        if (upl == (upl_t)NULL) {
-               if (size > (MAX_UPL_TRANSFER * PAGE_SIZE)) {
+               flags &= ~UPL_NOCOMMIT;
+
+               if (size > (MAX_UPL_SIZE * PAGE_SIZE)) {
                        result = PAGER_ERROR;
                        error  = PAGER_ERROR;
                        goto out;
                }
-               ubc_create_upl(vp, f_offset, size, &upl, &pl, UPL_RET_ONLY_ABSENT | UPL_SET_LITE);
+               if (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_PAGEINV2) {
+                       /*
+                        * filesystem has requested the new form of VNOP_PAGEIN for file
+                        * backed objects... we will not grab the UPL befofe calling VNOP_PAGEIN...
+                        * it is the fileystem's responsibility to grab the range we're denoting
+                        * via 'f_offset' and 'size' into a UPL... this allows the filesystem to first
+                        * take any locks it needs, before effectively locking the pages into a UPL...
+                        * so we pass a NULL into the filesystem instead of a UPL pointer... the 'upl_offset'
+                        * is used to identify the "must have" page in the extent... the filesystem is free
+                        * to clip the extent to better fit the underlying FS blocksize if it desires as 
+                        * long as it continues to include the "must have" page... 'f_offset' + 'upl_offset'
+                        * identifies that page
+                        */
+                       if ( (error = VNOP_PAGEIN(vp, NULL, upl_offset, (off_t)f_offset,
+                                                 size, flags, vfs_context_current())) ) {
+                               result = PAGER_ERROR;
+                               error  = PAGER_ERROR;
+                       }
+                       goto out;
+               }
+               ubc_create_upl(vp, f_offset, size, &upl, &pl, UPL_UBC_PAGEIN | UPL_RET_ONLY_ABSENT);
 
                if (upl == (upl_t)NULL) {
                        result =  PAGER_ABSENT;
@@ -359,21 +456,20 @@ vnode_pagein(
                        goto out;
                }
                upl_offset = 0;
+               first_pg = 0;
+               
                /*
                 * if we get here, we've created the upl and
                 * are responsible for commiting/aborting it
                 * regardless of what the caller has passed in
                 */
-               flags &= ~UPL_NOCOMMIT;
-               
-               vp_pagein++;
+               must_commit = 1;
        } else {
                pl = ubc_upl_pageinfo(upl);
-
-               dp_pgins++;
+               first_pg = upl_offset / PAGE_SIZE;
        }
        pages_in_upl = size / PAGE_SIZE;
-       first_pg     = upl_offset / PAGE_SIZE;
+       DTRACE_VM2(pgpgin, int, pages_in_upl, (uint64_t *), NULL);
 
        /*
         * before we start marching forward, we must make sure we end on 
@@ -383,30 +479,31 @@ vnode_pagein(
        for (last_pg = pages_in_upl - 1; last_pg >= first_pg; last_pg--) {
                if (upl_page_present(pl, last_pg))
                        break;
+               if (last_pg == first_pg) {
+                       /*
+                        * empty UPL, no pages are present
+                        */
+                       if (must_commit)
+                               ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
+                       goto out;
+               }
        }
        pages_in_upl = last_pg + 1;
+       last_pg = first_pg;
 
-       for (last_pg = first_pg; last_pg < pages_in_upl;) {
+       while (last_pg < pages_in_upl) {
                /*
-                * scan the upl looking for the next
-                * page that is present.... if all of the 
-                * pages are absent, we're done
+                * skip over missing pages...
                 */
-               for (start_pg = last_pg; last_pg < pages_in_upl; last_pg++) {
+               for ( ; last_pg < pages_in_upl; last_pg++) {
                        if (upl_page_present(pl, last_pg))
                                break;
                }
-               if (last_pg == pages_in_upl)
-                       break;
-
                /*
-                * if we get here, we've sitting on a page 
-                * that is present... we want to skip over
-                * any range of 'valid' pages... if this takes
-                * us to the end of the request, than we're done
+                * skip over 'valid' pages... we don't want to issue I/O for these
                 */
                for (start_pg = last_pg; last_pg < pages_in_upl; last_pg++) {
-                       if (!upl_valid_page(pl, last_pg) || !upl_page_present(pl, last_pg))
+                       if (!upl_valid_page(pl, last_pg))
                                break;
                }
                if (last_pg > start_pg) {
@@ -418,22 +515,26 @@ vnode_pagein(
                         */
                        xsize = (last_pg - start_pg) * PAGE_SIZE;
 
-                       if (!(flags & UPL_NOCOMMIT))
+                       if (must_commit)
                                ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, xsize, UPL_ABORT_FREE_ON_EMPTY);
-
-                       abort_needed = 0;
                }
                if (last_pg == pages_in_upl)
+                       /*
+                        * we're done... all pages that were present
+                        * have either had I/O issued on them or 
+                        * were aborted unchanged...
+                        */
                        break;
 
-               if (!upl_page_present(pl, last_pg))
+               if (!upl_page_present(pl, last_pg)) {
                        /*
-                        * if we found a range of valid pages 
-                        * terminated by a non-present page
-                        * than start over
+                        * we found a range of valid pages 
+                        * terminated by a missing page...
+                        * bump index to the next page and continue on
                         */
+                       last_pg++;
                        continue;
-
+               }
                /*
                 * scan from the found invalid page looking for a valid
                 * or non-present page before the end of the upl is reached, if we
@@ -446,41 +547,44 @@ vnode_pagein(
                }
                if (last_pg > start_pg) {
                        int xoff;
-                       struct vfs_context context;
-
-                       context.vc_proc = p;
-                       context.vc_ucred = kauth_cred_get();
                        xsize = (last_pg - start_pg) * PAGE_SIZE;
                        xoff  = start_pg * PAGE_SIZE;
 
-                       if ( (error = VNOP_PAGEIN(vp, upl, (vm_offset_t) xoff,
+                       if ( (error = VNOP_PAGEIN(vp, upl, (upl_offset_t) xoff,
                                               (off_t)f_offset + xoff,
-                                              xsize, flags, &context)) ) {
+                                              xsize, flags, vfs_context_current())) ) {
+                               /*
+                                * Usually this UPL will be aborted/committed by the lower cluster layer.
+                                * In the case of decmpfs, however, we may return an error (EAGAIN) to avoid
+                                * a deadlock with another thread already inflating the file. In that case,
+                                * we must take care of our UPL at this layer itself.
+                                */
+                               if (must_commit) {
+                                       if(error == EAGAIN) {
+                                               ubc_upl_abort_range(upl, (upl_offset_t) xoff, xsize, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_RESTART);
+                                       }
+                               }
                                result = PAGER_ERROR;
                                error  = PAGER_ERROR;
 
                        }
-                       abort_needed = 0;
                }
         }
-       if (!(flags & UPL_NOCOMMIT) && abort_needed)
-               ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
 out:
        if (errorp)
                *errorp = result;
 
        ut = get_bsdthread_info(current_thread());
 
-       if (ut->uu_lowpri_delay) {
+       if (ut->uu_lowpri_window) {
                /*
                 * task is marked as a low priority I/O type
-                * and the I/O we issued while in this system call
+                * and the I/O we issued while in this page fault
                 * collided with normal I/O operations... we'll
                 * delay in order to mitigate the impact of this
                 * task on the normal operation of the system
                 */
-               IOSleep(ut->uu_lowpri_delay);
-               ut->uu_lowpri_delay = 0;
+               throttle_lowpri_io(TRUE);
        }
        return (error);
 }