]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/memory_object.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / osfmk / vm / memory_object.c
index 286fcf6919eee862d04bb7533374a45ba6491373..d4bf4dcd8ce78e7934c1ab6b1c6b3d16a44bf763 100644 (file)
@@ -1,23 +1,29 @@
 /*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
  * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
  * 
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
@@ -56,8 +62,6 @@
  *     External memory management interface control functions.
  */
 
-#include <advisory_pageout.h>
-
 /*
  *     Interface dependencies:
  */
 #include <vm/pmap.h>           /* For pmap_clear_modify */
 #include <vm/vm_kern.h>                /* For kernel_map, vm_move */
 #include <vm/vm_map.h>         /* For vm_map_pageable */
+#include <vm/vm_purgeable_internal.h>  /* Needed by some vm_page.h macros */
+#include <vm/vm_shared_region.h>
 
-#if    MACH_PAGEMAP
 #include <vm/vm_external.h>
-#endif /* MACH_PAGEMAP */
 
 #include <vm/vm_protos.h>
 
-
 memory_object_default_t        memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL;
-vm_size_t              memory_manager_default_cluster = 0;
-decl_mutex_data(,      memory_manager_default_lock)
+decl_lck_mtx_data(,    memory_manager_default_lock)
 
 
 /*
@@ -127,16 +129,16 @@ decl_mutex_data(, memory_manager_default_lock)
 
 #define        memory_object_should_return_page(m, should_return) \
     (should_return != MEMORY_OBJECT_RETURN_NONE && \
-     (((m)->dirty || ((m)->dirty = pmap_is_modified((m)->phys_page))) || \
+     (((m)->dirty || ((m)->dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(m)))) || \
       ((m)->precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \
       (should_return) == MEMORY_OBJECT_RETURN_ANYTHING))
 
 typedef        int     memory_object_lock_result_t;
 
-#define MEMORY_OBJECT_LOCK_RESULT_DONE          0
-#define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK    1
-#define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN    2
-#define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN   3
+#define MEMORY_OBJECT_LOCK_RESULT_DONE                 0
+#define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK           1
+#define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN          2
+#define MEMORY_OBJECT_LOCK_RESULT_MUST_FREE            3
 
 memory_object_lock_result_t memory_object_lock_page(
                                vm_page_t               m,
@@ -166,192 +168,107 @@ memory_object_lock_page(
 {
         XPR(XPR_MEMORY_OBJECT,
             "m_o_lock_page, page 0x%X rtn %d flush %d prot %d\n",
-            (integer_t)m, should_return, should_flush, prot, 0);
+            m, should_return, should_flush, prot, 0);
 
-       /*
-        *      If we cannot change access to the page,
-        *      either because a mapping is in progress
-        *      (busy page) or because a mapping has been
-        *      wired, then give up.
-        */
 
        if (m->busy || m->cleaning)
-               return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK);
+               return (MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK);
+
+       if (m->laundry)
+               vm_pageout_steal_laundry(m, FALSE);
 
        /*
         *      Don't worry about pages for which the kernel
         *      does not have any data.
         */
-
        if (m->absent || m->error || m->restart) {
-               if(m->error && should_flush) {
-                       /* dump the page, pager wants us to */
-                       /* clean it up and there is no      */
-                       /* relevant data to return */
-                       if(m->wire_count == 0) {
-                               VM_PAGE_FREE(m);
-                               return(MEMORY_OBJECT_LOCK_RESULT_DONE);
-                       }
-               } else {
-                       return(MEMORY_OBJECT_LOCK_RESULT_DONE);
+               if (m->error && should_flush && !VM_PAGE_WIRED(m)) {
+                       /*
+                        * dump the page, pager wants us to
+                        * clean it up and there is no
+                        * relevant data to return
+                        */
+                       return (MEMORY_OBJECT_LOCK_RESULT_MUST_FREE);
                }
+               return (MEMORY_OBJECT_LOCK_RESULT_DONE);
        }
-
        assert(!m->fictitious);
 
-       if (m->wire_count != 0) {
+       if (VM_PAGE_WIRED(m)) {
                /*
-                *      If no change would take place
-                *      anyway, return successfully.
-                *
-                *      No change means:
-                *              Not flushing AND
-                *              No change to page lock [2 checks]  AND
-                *              Should not return page
-                *
-                * XXX  This doesn't handle sending a copy of a wired
-                * XXX  page to the pager, but that will require some
-                * XXX  significant surgery.
+                * The page is wired... just clean or return the page if needed.
+                * Wired pages don't get flushed or disconnected from the pmap.
                 */
-               if (!should_flush &&
-                   (m->page_lock == prot || prot == VM_PROT_NO_CHANGE) &&
-                   ! memory_object_should_return_page(m, should_return)) {
-
-                       /*
-                        *      Restart page unlock requests,
-                        *      even though no change took place.
-                        *      [Memory managers may be expecting
-                        *      to see new requests.]
-                        */
-                       m->unlock_request = VM_PROT_NONE;
-                       PAGE_WAKEUP(m);
-
-                       return(MEMORY_OBJECT_LOCK_RESULT_DONE);
-               }
+               if (memory_object_should_return_page(m, should_return))
+                       return (MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN);
 
-               return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK);
-       }
-
-       /*
-        *      If the page is to be flushed, allow
-        *      that to be done as part of the protection.
-        */
-
-       if (should_flush)
-               prot = VM_PROT_ALL;
+               return (MEMORY_OBJECT_LOCK_RESULT_DONE);
+       }               
 
-       /*
-        *      Set the page lock.
-        *
-        *      If we are decreasing permission, do it now;
-        *      let the fault handler take care of increases
-        *      (pmap_page_protect may not increase protection).
-        */
-
-       if (prot != VM_PROT_NO_CHANGE) {
-               if ((m->page_lock ^ prot) & prot) {
-                       pmap_page_protect(m->phys_page, VM_PROT_ALL & ~prot);
-               }
-#if 0
-               /* code associated with the vestigial 
-                * memory_object_data_unlock
+       if (should_flush) {
+               /*
+                * must do the pmap_disconnect before determining the 
+                * need to return the page... otherwise it's possible
+                * for the page to go from the clean to the dirty state
+                * after we've made our decision
                 */
-               m->page_lock = prot;
-               m->lock_supplied = TRUE;
-               if (prot != VM_PROT_NONE)
-                       m->unusual = TRUE;
-               else
-                       m->unusual = FALSE;
-
+               if (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED) {
+                       SET_PAGE_DIRTY(m, FALSE);
+               }
+       } else {
                /*
-                *      Restart any past unlock requests, even if no
-                *      change resulted.  If the manager explicitly
-                *      requested no protection change, then it is assumed
-                *      to be remembering past requests.
+                * If we are decreasing permission, do it now;
+                * let the fault handler take care of increases
+                * (pmap_page_protect may not increase protection).
                 */
-
-               m->unlock_request = VM_PROT_NONE;
-#endif /* 0 */
-               PAGE_WAKEUP(m);
+               if (prot != VM_PROT_NO_CHANGE)
+                       pmap_page_protect(VM_PAGE_GET_PHYS_PAGE(m), VM_PROT_ALL & ~prot);
        }
-
        /*
-        *      Handle page returning.
+        *      Handle returning dirty or precious pages
         */
-
        if (memory_object_should_return_page(m, should_return)) {
-
                /*
-                *      If we weren't planning
-                *      to flush the page anyway,
-                *      we may need to remove the
-                *      page from the pageout
-                *      system and from physical
-                *      maps now.
+                * we use to do a pmap_disconnect here in support
+                * of memory_object_lock_request, but that routine
+                * no longer requires this...  in any event, in
+                * our world, it would turn into a big noop since
+                * we don't lock the page in any way and as soon
+                * as we drop the object lock, the page can be
+                * faulted back into an address space
+                *
+                *      if (!should_flush)
+                *              pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
                 */
-               
-               vm_page_lock_queues();
-               VM_PAGE_QUEUES_REMOVE(m);
-               vm_page_unlock_queues();
-
-               if (!should_flush)
-                       pmap_disconnect(m->phys_page);
-
-               if (m->dirty)
-                       return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN);
-               else
-                       return(MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN);
+               return (MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN);
        }
 
        /*
-        *      Handle flushing
+        *      Handle flushing clean pages
         */
+       if (should_flush)
+               return (MEMORY_OBJECT_LOCK_RESULT_MUST_FREE);
 
-       if (should_flush) {
-               VM_PAGE_FREE(m);
-       } else {
-               /*
-                *      XXX Make clean but not flush a paging hint,
-                *      and deactivate the pages.  This is a hack
-                *      because it overloads flush/clean with
-                *      implementation-dependent meaning.  This only
-                *      happens to pages that are already clean.
-                */
-
-               if (vm_page_deactivate_hint &&
-                   (should_return != MEMORY_OBJECT_RETURN_NONE)) {
-                       vm_page_lock_queues();
-                       vm_page_deactivate(m);
-                       vm_page_unlock_queues();
-               }
-       }
+       /*
+        * we use to deactivate clean pages at this point,
+        * but we do not believe that an msync should change
+        * the 'age' of a page in the cache... here is the
+        * original comment and code concerning this...
+        *
+        *      XXX Make clean but not flush a paging hint,
+        *      and deactivate the pages.  This is a hack
+        *      because it overloads flush/clean with
+        *      implementation-dependent meaning.  This only
+        *      happens to pages that are already clean.
+        *
+        *   if (vm_page_deactivate_hint && (should_return != MEMORY_OBJECT_RETURN_NONE))
+        *      return (MEMORY_OBJECT_LOCK_RESULT_MUST_DEACTIVATE);
+        */
 
-       return(MEMORY_OBJECT_LOCK_RESULT_DONE);
+       return (MEMORY_OBJECT_LOCK_RESULT_DONE);
 }
 
-#define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, action, po, ro, ioerr, iosync)    \
-MACRO_BEGIN                                                            \
-                                                                       \
-        register int            upl_flags;                              \
-                                                                       \
-       vm_object_unlock(object);                                       \
-                                                                       \
-                if (iosync)                                             \
-                        upl_flags = UPL_MSYNC | UPL_IOSYNC;             \
-                else                                                    \
-                        upl_flags = UPL_MSYNC;                          \
-                                                                       \
-               (void) memory_object_data_return(object->pager,         \
-               po,                                                     \
-               data_cnt,                                               \
-                ro,                                                     \
-                ioerr,                                                  \
-               (action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN),       \
-               !should_flush,                                          \
-               upl_flags);                                             \
-                                                                       \
-       vm_object_lock(object);                                         \
-MACRO_END
+
 
 /*
  *     Routine:        memory_object_lock_request [user interface]
@@ -390,16 +307,8 @@ memory_object_lock_request(
        vm_prot_t                       prot)
 {
        vm_object_t     object;
-       __unused boolean_t should_flush;
-
-       should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
 
-        XPR(XPR_MEMORY_OBJECT,
-           "m_o_lock_request, control 0x%X off 0x%X size 0x%X flags %X prot %X\n",
-           (integer_t)control, offset, size, 
-           (((should_return&1)<<1)|should_flush), prot);
-
-       /*
+        /*
         *      Check for bogus arguments.
         */
        object = memory_object_control_to_vm_object(control);
@@ -417,10 +326,20 @@ memory_object_lock_request(
         */
        vm_object_lock(object);
        vm_object_paging_begin(object);
+
+       if (flags & MEMORY_OBJECT_DATA_FLUSH_ALL) {
+               if ((should_return != MEMORY_OBJECT_RETURN_NONE) || offset || object->copy) {
+                       flags &= ~MEMORY_OBJECT_DATA_FLUSH_ALL;
+                       flags |= MEMORY_OBJECT_DATA_FLUSH;
+               }
+       }
        offset -= object->paging_offset;
 
-       (void)vm_object_update(object,
-               offset, size, resid_offset, io_errno, should_return, flags, prot);
+       if (flags & MEMORY_OBJECT_DATA_FLUSH_ALL)
+               vm_object_reap_pages(object, REAP_DATA_FLUSH);
+       else
+               (void)vm_object_update(object, offset, size, resid_offset,
+                                      io_errno, should_return, flags, prot);
 
        vm_object_paging_end(object);
        vm_object_unlock(object);
@@ -520,7 +439,7 @@ vm_object_sync(
 
         XPR(XPR_VM_OBJECT,
             "vm_o_sync, object 0x%X, offset 0x%X size 0x%x flush %d rtn %d\n",
-            (integer_t)object, offset, size, should_flush, should_return);
+            object, offset, size, should_flush, should_return);
 
        /*
         * Lock the object, and acquire a paging reference to
@@ -530,9 +449,17 @@ vm_object_sync(
        vm_object_lock(object);
        vm_object_paging_begin(object);
 
-       if (should_flush)
+       if (should_flush) {
                flags = MEMORY_OBJECT_DATA_FLUSH;
-       else
+               /*
+                * This flush is from an msync(), not a truncate(), so the
+                * contents of the file are not affected.
+                * MEMORY_OBECT_DATA_NO_CHANGE lets vm_object_update() know
+                * that the data is not changed and that there's no need to
+                * push the old contents to a copy object.
+                */
+               flags |= MEMORY_OBJECT_DATA_NO_CHANGE;
+       } else
                flags = 0;
 
        if (should_iosync)
@@ -553,6 +480,41 @@ vm_object_sync(
 
 
 
+#define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, po, ro, ioerr, iosync)    \
+MACRO_BEGIN                                                            \
+                                                                       \
+        int                    upl_flags;                              \
+       memory_object_t         pager;                                  \
+                                                                       \
+       if (object->object_slid) {                                      \
+               panic("Objects with slid pages not allowed\n");         \
+       }                                                               \
+                                                                       \
+       if ((pager = (object)->pager) != MEMORY_OBJECT_NULL) {          \
+               vm_object_paging_begin(object);                         \
+               vm_object_unlock(object);                               \
+                                                                       \
+                if (iosync)                                            \
+                        upl_flags = UPL_MSYNC | UPL_IOSYNC;            \
+                else                                                   \
+                        upl_flags = UPL_MSYNC;                         \
+                                                                       \
+               (void) memory_object_data_return(pager,                 \
+                       po,                                             \
+                       (memory_object_cluster_size_t)data_cnt,         \
+                       ro,                                             \
+                       ioerr,                                          \
+                       FALSE,                                          \
+                       FALSE,                                          \
+                       upl_flags);                                     \
+                                                                       \
+               vm_object_lock(object);                                 \
+               vm_object_paging_end(object);                           \
+       }                                                               \
+MACRO_END
+
+extern struct vnode *
+vnode_pager_lookup_vnode(memory_object_t);
 
 static int
 vm_object_update_extent(
@@ -568,127 +530,140 @@ vm_object_update_extent(
 {
         vm_page_t      m;
         int            retval = 0;
-       vm_size_t       data_cnt = 0;
        vm_object_offset_t      paging_offset = 0;
-       vm_object_offset_t      last_offset = offset;
+       vm_object_offset_t      next_offset = offset;
         memory_object_lock_result_t    page_lock_result;
-       memory_object_lock_result_t     pageout_action;
-       
-       pageout_action = MEMORY_OBJECT_LOCK_RESULT_DONE;
+       memory_object_cluster_size_t    data_cnt = 0;
+       struct vm_page_delayed_work     dw_array[DEFAULT_DELAYED_WORK_LIMIT];
+       struct vm_page_delayed_work     *dwp;
+       int             dw_count;
+       int             dw_limit;
+       int             dirty_count;
+
+        dwp = &dw_array[0];
+        dw_count = 0;
+       dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
+       dirty_count = 0;
 
        for (;
             offset < offset_end && object->resident_page_count;
             offset += PAGE_SIZE_64) {
 
                /*
-                * Limit the number of pages to be cleaned at once.
+                * Limit the number of pages to be cleaned at once to a contiguous
+                * run, or at most MAX_UPL_TRANSFER_BYTES
                 */
-               if (data_cnt >= PAGE_SIZE * MAX_UPL_TRANSFER) {
-                       LIST_REQ_PAGEOUT_PAGES(object, data_cnt, 
-                                              pageout_action, paging_offset, offset_resid, io_errno, should_iosync);
-                       data_cnt = 0;
-               }
+               if (data_cnt) {
+                       if ((data_cnt >= MAX_UPL_TRANSFER_BYTES) || (next_offset != offset)) {
 
+                               if (dw_count) {
+                                       vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
+                                       dwp = &dw_array[0];
+                                       dw_count = 0;
+                               }
+                               LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
+                                                      paging_offset, offset_resid, io_errno, should_iosync);
+                               data_cnt = 0;
+                       }
+               }
                while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
-                       page_lock_result = memory_object_lock_page(m, should_return, should_flush, prot);
-
-                       XPR(XPR_MEMORY_OBJECT,
-                           "m_o_update: lock_page, obj 0x%X offset 0x%X result %d\n",
-                           (integer_t)object, offset, page_lock_result, 0, 0);
-
-                       switch (page_lock_result)
-                       {
-                         case MEMORY_OBJECT_LOCK_RESULT_DONE:
-                           /*
-                            *  End of a cluster of dirty pages.
-                            */
-                           if (data_cnt) {
-                                   LIST_REQ_PAGEOUT_PAGES(object, 
-                                                          data_cnt, pageout_action, 
-                                                          paging_offset, offset_resid, io_errno, should_iosync);
-                                   data_cnt = 0;
-                                   continue;
-                           }
-                           break;
-
-                         case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK:
-                           /*
-                            *  Since it is necessary to block,
-                            *  clean any dirty pages now.
-                            */
-                           if (data_cnt) {
-                                   LIST_REQ_PAGEOUT_PAGES(object,
-                                                          data_cnt, pageout_action, 
-                                                          paging_offset, offset_resid, io_errno, should_iosync);
-                                   data_cnt = 0;
-                                   continue;
-                           }
-                           PAGE_SLEEP(object, m, THREAD_UNINT);
-                           continue;
-
-                         case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN:
-                         case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN:
-                           /*
-                            * The clean and return cases are similar.
-                            *
-                            * if this would form a discontiguous block,
-                            * clean the old pages and start anew.
-                            *
-                            * Mark the page busy since we will unlock the
-                            * object if we issue the LIST_REQ_PAGEOUT
-                            */
-                           m->busy = TRUE;
-                           if (data_cnt && 
-                               ((last_offset != offset) || (pageout_action != page_lock_result))) {
-                                   LIST_REQ_PAGEOUT_PAGES(object, 
-                                                          data_cnt, pageout_action, 
-                                                          paging_offset, offset_resid, io_errno, should_iosync);
-                                   data_cnt = 0;
-                           }
-                           m->busy = FALSE;
-
-                           if (m->cleaning) {
-                                   PAGE_SLEEP(object, m, THREAD_UNINT);
-                                   continue;
-                           }
-                           if (data_cnt == 0) {
-                                   pageout_action = page_lock_result;
-                                   paging_offset = offset;
-                           }
-                           data_cnt += PAGE_SIZE;
-                           last_offset = offset + PAGE_SIZE_64;
-
-                           vm_page_lock_queues();
-                           /*
-                            * Clean
-                            */
-                           m->list_req_pending = TRUE;
-                           m->cleaning = TRUE;
-
-                           if (should_flush) {
-                                   /*
-                                    * and add additional state
-                                    * for the flush
-                                    */
-                                   m->busy = TRUE;
-                                   m->pageout = TRUE;
-                                   vm_page_wire(m);
-                           }
-                           vm_page_unlock_queues();
-
-                           retval = 1;
-                           break;
+
+                       dwp->dw_mask = 0;
+                       
+                       page_lock_result = memory_object_lock_page(m, should_return, should_flush, prot);
+
+                       if (data_cnt && page_lock_result != MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN) {
+                               /*
+                                *      End of a run of dirty/precious pages.
+                                */
+                               if (dw_count) {
+                                       vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
+                                       dwp = &dw_array[0];
+                                       dw_count = 0;
+                               }
+                               LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
+                                                      paging_offset, offset_resid, io_errno, should_iosync);
+                               /*
+                                * LIST_REQ_PAGEOUT_PAGES will drop the object lock which will
+                                * allow the state of page 'm' to change... we need to re-lookup
+                                * the current offset
+                                */
+                               data_cnt = 0;
+                               continue;
+                       }
+
+                       switch (page_lock_result) {
+
+                       case MEMORY_OBJECT_LOCK_RESULT_DONE:
+                               break;
+
+                       case MEMORY_OBJECT_LOCK_RESULT_MUST_FREE:
+                               if (m->dirty == TRUE)
+                                       dirty_count++;
+                               dwp->dw_mask |= DW_vm_page_free;
+                               break;
+
+                       case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK:
+                               PAGE_SLEEP(object, m, THREAD_UNINT);
+                               continue;
+
+                       case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN:
+                               if (data_cnt == 0)
+                                       paging_offset = offset;
+
+                               data_cnt += PAGE_SIZE;
+                               next_offset = offset + PAGE_SIZE_64;
+
+                               /*
+                                * wired pages shouldn't be flushed and
+                                * since they aren't on any queue,
+                                * no need to remove them
+                                */
+                               if (!VM_PAGE_WIRED(m)) {
+
+                                       if (should_flush) {
+                                               /*
+                                                * add additional state for the flush
+                                                */
+                                               m->free_when_done = TRUE;
+                                       }
+                                       /*
+                                        * we use to remove the page from the queues at this
+                                        * point, but we do not believe that an msync
+                                        * should cause the 'age' of a page to be changed
+                                        *
+                                        *    else
+                                        *      dwp->dw_mask |= DW_VM_PAGE_QUEUES_REMOVE;
+                                        */
+                               }
+                               retval = 1;
+                               break;
+                       }
+                       if (dwp->dw_mask) {
+                               VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
+
+                               if (dw_count >= dw_limit) {
+                                       vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
+                                       dwp = &dw_array[0];
+                                       dw_count = 0;
+                               }
                        }
                        break;
                }
        }
+       
+       if (object->pager)
+               task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_INVALIDATED, vnode_pager_lookup_vnode(object->pager));
        /*
         *      We have completed the scan for applicable pages.
         *      Clean any pages that have been saved.
         */
+       if (dw_count)
+               vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
+
        if (data_cnt) {
-               LIST_REQ_PAGEOUT_PAGES(object,
-                                      data_cnt, pageout_action, paging_offset, offset_resid, io_errno, should_iosync);
+               LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
+                                      paging_offset, offset_resid, io_errno, should_iosync);
        }
        return (retval);
 }
@@ -704,20 +679,21 @@ vm_object_update_extent(
  */
 kern_return_t
 vm_object_update(
-       register vm_object_t            object,
-       register vm_object_offset_t     offset,
-       register vm_object_size_t       size,
-       register vm_object_offset_t     *resid_offset,
-       int                             *io_errno,
-       memory_object_return_t          should_return,
-       int                             flags,
-       vm_prot_t                       protection)
+       vm_object_t             object,
+       vm_object_offset_t      offset,
+       vm_object_size_t        size,
+       vm_object_offset_t      *resid_offset,
+       int                     *io_errno,
+       memory_object_return_t  should_return,
+       int                     flags,
+       vm_prot_t               protection)
 {
-       vm_object_t             copy_object;
+        vm_object_t            copy_object = VM_OBJECT_NULL;
        boolean_t               data_returned = FALSE;
        boolean_t               update_cow;
        boolean_t               should_flush = (flags & MEMORY_OBJECT_DATA_FLUSH) ? TRUE : FALSE;
        boolean_t               should_iosync = (flags & MEMORY_OBJECT_IO_SYNC) ? TRUE : FALSE;
+       vm_fault_return_t       result;
        int                     num_of_extents;
        int                     n;
 #define MAX_EXTENTS    8
@@ -751,9 +727,39 @@ vm_object_update(
                                        !(flags & MEMORY_OBJECT_DATA_PURGE)))
                                || (flags & MEMORY_OBJECT_COPY_SYNC);
                        
+       if (update_cow || (flags & (MEMORY_OBJECT_DATA_PURGE | MEMORY_OBJECT_DATA_SYNC))) {
+               int collisions = 0;
+
+               while ((copy_object = object->copy) != VM_OBJECT_NULL) {
+                       /*
+                        * need to do a try here since we're swimming upstream
+                        * against the normal lock ordering... however, we need
+                        * to hold the object stable until we gain control of the
+                        * copy object so we have to be careful how we approach this
+                        */
+                       if (vm_object_lock_try(copy_object)) {
+                              /*
+                               * we 'won' the lock on the copy object...
+                               * no need to hold the object lock any longer...
+                               * take a real reference on the copy object because
+                               * we're going to call vm_fault_page on it which may
+                               * under certain conditions drop the lock and the paging
+                               * reference we're about to take... the reference
+                               * will keep the copy object from going away if that happens
+                               */
+                              vm_object_unlock(object);
+                              vm_object_reference_locked(copy_object);
+                              break;
+                       }
+                       vm_object_unlock(object);
+
+                       collisions++;
+                       mutex_pause(collisions);
 
-       if((((copy_object = object->copy) != NULL) && update_cow) ||
-                                       (flags & MEMORY_OBJECT_DATA_SYNC)) {
+                       vm_object_lock(object);
+               }
+       }
+       if ((copy_object != VM_OBJECT_NULL && update_cow) || (flags & MEMORY_OBJECT_DATA_SYNC)) {
                vm_map_size_t           i;
                vm_map_size_t           copy_size;
                vm_map_offset_t         copy_offset;
@@ -761,81 +767,97 @@ vm_object_update(
                vm_page_t               page;
                vm_page_t               top_page;
                kern_return_t           error = 0;
+               struct vm_object_fault_info fault_info;
+
+               if (copy_object != VM_OBJECT_NULL) {
+                       /*
+                        * translate offset with respect to shadow's offset
+                        */
+                       copy_offset = (offset >= copy_object->vo_shadow_offset) ?
+                         (vm_map_offset_t)(offset - copy_object->vo_shadow_offset) :
+                         (vm_map_offset_t) 0;
+
+                       if (copy_offset > copy_object->vo_size)
+                               copy_offset = copy_object->vo_size;
+
+                       /*
+                        * clip size with respect to shadow offset
+                        */
+                       if (offset >= copy_object->vo_shadow_offset) {
+                               copy_size = size;
+                       } else if (size >= copy_object->vo_shadow_offset - offset) {
+                               copy_size = size - (copy_object->vo_shadow_offset - offset);
+                       } else {
+                               copy_size = 0;
+                       }
+                       
+                       if (copy_offset + copy_size > copy_object->vo_size) {
+                               if (copy_object->vo_size >= copy_offset) {
+                                       copy_size = copy_object->vo_size - copy_offset;
+                               } else {
+                                       copy_size = 0;
+                               }
+                       }
+                       copy_size+=copy_offset;
 
-               if(copy_object != NULL) {
-                  /* translate offset with respect to shadow's offset */
-                  copy_offset = (offset >= copy_object->shadow_offset)?
-                       (vm_map_offset_t)(offset - copy_object->shadow_offset) :
-                       (vm_map_offset_t) 0;
-                  if(copy_offset > copy_object->size)
-                       copy_offset = copy_object->size;
-
-                  /* clip size with respect to shadow offset */
-                  if (offset >= copy_object->shadow_offset) {
-                          copy_size = size;
-                  } else if (size >= copy_object->shadow_offset - offset) {
-                          copy_size = size -
-                                  (copy_object->shadow_offset - offset);
-                  } else {
-                          copy_size = 0;
-                  }
-
-                  if (copy_offset + copy_size > copy_object->size) {
-                          if (copy_object->size >= copy_offset) {
-                                  copy_size = copy_object->size - copy_offset;
-                          } else {
-                                  copy_size = 0;
-                          }
-                  }
-
-                  copy_size+=copy_offset;
-
-                  vm_object_unlock(object);
-                  vm_object_lock(copy_object);
                } else {
                        copy_object = object;
 
                        copy_size   = offset + size;
                        copy_offset = offset;
                }
+               fault_info.interruptible = THREAD_UNINT;
+               fault_info.behavior  = VM_BEHAVIOR_SEQUENTIAL;
+               fault_info.user_tag  = 0;
+               fault_info.pmap_options = 0;
+               fault_info.lo_offset = copy_offset;
+               fault_info.hi_offset = copy_size;
+               fault_info.no_cache   = FALSE;
+               fault_info.stealth = TRUE;
+               fault_info.io_sync = FALSE;
+               fault_info.cs_bypass = FALSE;
+               fault_info.mark_zf_absent = FALSE;
+               fault_info.batch_pmap_op = FALSE;
 
                vm_object_paging_begin(copy_object);
-               for (i=copy_offset; i<copy_size; i+=PAGE_SIZE) {
+
+               for (i = copy_offset; i < copy_size; i += PAGE_SIZE) {
        RETRY_COW_OF_LOCK_REQUEST:
-                       prot =  VM_PROT_WRITE|VM_PROT_READ;
-                       switch (vm_fault_page(copy_object, i, 
-                               VM_PROT_WRITE|VM_PROT_READ,
-                               FALSE,
-                               THREAD_UNINT,
-                               copy_offset,
-                               copy_offset+copy_size,
-                               VM_BEHAVIOR_SEQUENTIAL,
-                               &prot,
-                               &page,
-                               &top_page,
-                               (int *)0,
-                               &error,
-                               FALSE,
-                               FALSE, NULL, 0)) {
+                       fault_info.cluster_size = (vm_size_t) (copy_size - i);
+                       assert(fault_info.cluster_size == copy_size - i);
 
+                       prot =  VM_PROT_WRITE|VM_PROT_READ;
+                       page = VM_PAGE_NULL;
+                       result = vm_fault_page(copy_object, i, 
+                                              VM_PROT_WRITE|VM_PROT_READ,
+                                              FALSE,
+                                              FALSE, /* page not looked up */
+                                              &prot,
+                                              &page,
+                                              &top_page,
+                                              (int *)0,
+                                              &error,
+                                              FALSE,
+                                              FALSE, &fault_info);
+
+                       switch (result) {
                        case VM_FAULT_SUCCESS:
-                               if(top_page) {
+                               if (top_page) {
                                        vm_fault_cleanup(
-                                               page->object, top_page);
-                                       PAGE_WAKEUP_DONE(page);
-                                       vm_page_lock_queues();
-                                       if (!page->active && !page->inactive)
-                                               vm_page_activate(page);
-                                       vm_page_unlock_queues();
+                                               VM_PAGE_OBJECT(page), top_page);
                                        vm_object_lock(copy_object);
                                        vm_object_paging_begin(copy_object);
-                               } else {
-                                       PAGE_WAKEUP_DONE(page);
-                                       vm_page_lock_queues();
-                                       if (!page->active && !page->inactive)
-                                               vm_page_activate(page);
+                               }
+                               if (( !VM_PAGE_NON_SPECULATIVE_PAGEABLE(page))) {
+
+                                       vm_page_lockspin_queues();
+                                       
+                                       if (( !VM_PAGE_NON_SPECULATIVE_PAGEABLE(page))) {
+                                               vm_page_deactivate(page);
+                                       }
                                        vm_page_unlock_queues();
                                }
+                               PAGE_WAKEUP_DONE(page);
                                break;
                        case VM_FAULT_RETRY:
                                prot =  VM_PROT_WRITE|VM_PROT_READ;
@@ -853,34 +875,46 @@ vm_object_update(
                                vm_object_lock(copy_object);
                                vm_object_paging_begin(copy_object);
                                goto RETRY_COW_OF_LOCK_REQUEST;
-                       case VM_FAULT_FICTITIOUS_SHORTAGE:
-                               vm_page_more_fictitious();
-                               prot =  VM_PROT_WRITE|VM_PROT_READ;
-                               vm_object_lock(copy_object);
-                               vm_object_paging_begin(copy_object);
-                               goto RETRY_COW_OF_LOCK_REQUEST;
+                       case VM_FAULT_SUCCESS_NO_VM_PAGE:
+                               /* success but no VM page: fail */
+                               vm_object_paging_end(copy_object);
+                               vm_object_unlock(copy_object);
+                               /*FALLTHROUGH*/
                        case VM_FAULT_MEMORY_ERROR:
+                               if (object != copy_object)
+                                       vm_object_deallocate(copy_object);
                                vm_object_lock(object);
                                goto BYPASS_COW_COPYIN;
+                       default:
+                               panic("vm_object_update: unexpected error 0x%x"
+                                     " from vm_fault_page()\n", result);
                        }
 
                }
                vm_object_paging_end(copy_object);
-               if(copy_object != object) {
+       }
+       if ((flags & (MEMORY_OBJECT_DATA_SYNC | MEMORY_OBJECT_COPY_SYNC))) {
+               if (copy_object != VM_OBJECT_NULL && copy_object != object) {
                        vm_object_unlock(copy_object);
+                       vm_object_deallocate(copy_object);
                        vm_object_lock(object);
                }
+               return KERN_SUCCESS;
        }
-       if((flags & (MEMORY_OBJECT_DATA_SYNC | MEMORY_OBJECT_COPY_SYNC))) {
-                       return KERN_SUCCESS;
-       }
-       if(((copy_object = object->copy) != NULL) && 
-                                       (flags & MEMORY_OBJECT_DATA_PURGE)) {
-               copy_object->shadow_severed = TRUE;
-               copy_object->shadowed = FALSE;
-               copy_object->shadow = NULL;
-               /* delete the ref the COW was holding on the target object */
-               vm_object_deallocate(object);
+       if (copy_object != VM_OBJECT_NULL && copy_object != object) {
+               if ((flags & MEMORY_OBJECT_DATA_PURGE)) {
+                       vm_object_lock_assert_exclusive(copy_object);
+                       copy_object->shadow_severed = TRUE;
+                       copy_object->shadowed = FALSE;
+                       copy_object->shadow = NULL;
+                       /*
+                        * delete the ref the COW was holding on the target object
+                        */
+                       vm_object_deallocate(object);
+               }
+               vm_object_unlock(copy_object);
+               vm_object_deallocate(copy_object);
+               vm_object_lock(object);
        }
 BYPASS_COW_COPYIN:
 
@@ -921,10 +955,10 @@ BYPASS_COW_COPYIN:
                num_of_extents = 0;
                e_mask = ~((vm_object_size_t)(EXTENT_SIZE - 1));
 
-               m = (vm_page_t) queue_first(&object->memq);
+               m = (vm_page_t) vm_page_queue_first(&object->memq);
 
-               while (!queue_end(&object->memq, (queue_entry_t) m)) {
-                       next = (vm_page_t) queue_next(&m->listq);
+               while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t) m)) {
+                       next = (vm_page_t) vm_page_queue_next(&m->listq);
 
                        if ((m->offset >= start) && (m->offset < end)) {
                                /*
@@ -1026,7 +1060,7 @@ kern_return_t
 memory_object_synchronize_completed(
        memory_object_control_t control,
        memory_object_offset_t  offset,
-       vm_offset_t                     length)
+       memory_object_size_t    length)
 {
        vm_object_t                     object;
        msync_req_t                     msr;
@@ -1035,7 +1069,7 @@ memory_object_synchronize_completed(
 
         XPR(XPR_MEMORY_OBJECT,
            "m_o_sync_completed, object 0x%X, offset 0x%X length 0x%X\n",
-           (integer_t)object, offset, length, 0, 0);
+           object, offset, length, 0, 0);
 
        /*
         *      Look for bogus arguments
@@ -1076,15 +1110,14 @@ vm_object_set_attributes_common(
        boolean_t       may_cache,
        memory_object_copy_strategy_t copy_strategy,
        boolean_t       temporary,
-       memory_object_cluster_size_t    cluster_size,
-        boolean_t      silent_overwrite,
+       __unused boolean_t      silent_overwrite,
        boolean_t       advisory_pageout)
 {
        boolean_t       object_became_ready;
 
         XPR(XPR_MEMORY_OBJECT,
            "m_o_set_attr_com, object 0x%X flg %x strat %d\n",
-           (integer_t)object, (may_cache&1)|((temporary&1)<1), copy_strategy, 0, 0);
+           object, (may_cache&1)|((temporary&1)<1), copy_strategy, 0, 0);
 
        if (object == VM_OBJECT_NULL)
                return(KERN_INVALID_ARGUMENT);
@@ -1101,26 +1134,10 @@ vm_object_set_attributes_common(
                        return(KERN_INVALID_ARGUMENT);
        }
 
-#if    !ADVISORY_PAGEOUT
-       if (silent_overwrite || advisory_pageout)
-               return(KERN_INVALID_ARGUMENT);
-
-#endif /* !ADVISORY_PAGEOUT */
        if (may_cache)
                may_cache = TRUE;
        if (temporary)
                temporary = TRUE;
-       if (cluster_size != 0) {
-               int     pages_per_cluster;
-               pages_per_cluster = atop_32(cluster_size);
-               /*
-                * Cluster size must be integral multiple of page size,
-                * and be a power of 2 number of pages.
-                */
-               if ((cluster_size & (PAGE_SIZE-1)) ||
-                   ((pages_per_cluster-1) & pages_per_cluster))
-                       return KERN_INVALID_ARGUMENT;
-       }
 
        vm_object_lock(object);
 
@@ -1132,14 +1149,8 @@ vm_object_set_attributes_common(
        object->copy_strategy = copy_strategy;
        object->can_persist = may_cache;
        object->temporary = temporary;
-       object->silent_overwrite = silent_overwrite;
+//     object->silent_overwrite = silent_overwrite;
        object->advisory_pageout = advisory_pageout;
-       if (cluster_size == 0)
-               cluster_size = PAGE_SIZE;
-       object->cluster_size = cluster_size;
-
-       assert(cluster_size >= PAGE_SIZE &&
-              cluster_size % PAGE_SIZE == 0);
 
        /*
         *      Wake up anyone waiting for the ready attribute
@@ -1176,7 +1187,6 @@ memory_object_change_attributes(
        boolean_t                       temporary;
        boolean_t                       may_cache;
        boolean_t                       invalidate;
-       memory_object_cluster_size_t    cluster_size;
        memory_object_copy_strategy_t   copy_strategy;
        boolean_t                       silent_overwrite;
        boolean_t                       advisory_pageout;
@@ -1190,12 +1200,12 @@ memory_object_change_attributes(
        temporary = object->temporary;
        may_cache = object->can_persist;
        copy_strategy = object->copy_strategy;
-       silent_overwrite = object->silent_overwrite;
+//     silent_overwrite = object->silent_overwrite;
+       silent_overwrite = FALSE;
        advisory_pageout = object->advisory_pageout;
 #if notyet
        invalidate = object->invalidate;
 #endif
-       cluster_size = object->cluster_size;
        vm_object_unlock(object);       
 
        switch (flavor) {
@@ -1248,7 +1258,6 @@ memory_object_change_attributes(
                 perf = (memory_object_perf_info_t) attributes;
 
                may_cache = perf->may_cache;
-               cluster_size = round_page_32(perf->cluster_size);
 
                break;
            }
@@ -1266,7 +1275,6 @@ memory_object_change_attributes(
 
                 may_cache = attr->may_cache;
                 copy_strategy = attr->copy_strategy;
-               cluster_size = page_size;
 
                break;
            }
@@ -1284,7 +1292,6 @@ memory_object_change_attributes(
 
                copy_strategy = attr->copy_strategy;
                 may_cache = attr->may_cache_object;
-               cluster_size = attr->cluster_size;
                temporary = attr->temporary;
 
                break;
@@ -1313,7 +1320,6 @@ memory_object_change_attributes(
                                                     may_cache,
                                                     copy_strategy,
                                                     temporary,
-                                                    cluster_size,
                                                     silent_overwrite,
                                                     advisory_pageout));
 }
@@ -1375,7 +1381,8 @@ memory_object_get_attributes(
                behave->invalidate = FALSE;
 #endif
                behave->advisory_pageout = object->advisory_pageout;
-               behave->silent_overwrite = object->silent_overwrite;
+//             behave->silent_overwrite = object->silent_overwrite;
+               behave->silent_overwrite = FALSE;
                 *count = MEMORY_OBJECT_BEHAVE_INFO_COUNT;
                break;
            }
@@ -1390,7 +1397,7 @@ memory_object_get_attributes(
                }
 
                perf = (memory_object_perf_info_t) attributes;
-               perf->cluster_size = object->cluster_size;
+               perf->cluster_size = PAGE_SIZE;
                perf->may_cache = object->can_persist;
 
                *count = MEMORY_OBJECT_PERF_INFO_COUNT;
@@ -1425,7 +1432,7 @@ memory_object_get_attributes(
 
                 attr = (memory_object_attr_info_t) attributes;
                attr->copy_strategy = object->copy_strategy;
-               attr->cluster_size = object->cluster_size;
+               attr->cluster_size = PAGE_SIZE;
                attr->may_cache_object = object->can_persist;
                attr->temporary = object->temporary;
 
@@ -1452,11 +1459,11 @@ memory_object_iopl_request(
        upl_t                   *upl_ptr,
        upl_page_info_array_t   user_page_list,
        unsigned int            *page_list_count,
-       int                     *flags)
+       upl_control_flags_t     *flags)
 {
        vm_object_t             object;
        kern_return_t           ret;
-       int                     caller_flags;
+       upl_control_flags_t     caller_flags;
 
        caller_flags = *flags;
 
@@ -1476,7 +1483,9 @@ memory_object_iopl_request(
                if(*upl_size == 0) {
                        if(offset >= named_entry->size)
                                return(KERN_INVALID_RIGHT);
-                       *upl_size = named_entry->size - offset;
+                       *upl_size = (upl_size_t)(named_entry->size - offset);
+                       if (*upl_size != named_entry->size - offset)
+                               return KERN_INVALID_ARGUMENT;
                }
                if(caller_flags & UPL_COPYOUT_FROM) {
                        if((named_entry->protection & VM_PROT_READ) 
@@ -1497,8 +1506,9 @@ memory_object_iopl_request(
                /* offset from beginning of named entry offset in object */
                offset = offset + named_entry->offset;
 
-               if(named_entry->is_sub_map) 
-                       return (KERN_INVALID_ARGUMENT);
+               if (named_entry->is_sub_map ||
+                   named_entry->is_copy)
+                       return KERN_INVALID_ARGUMENT;
                
                named_entry_lock(named_entry);
 
@@ -1541,22 +1551,22 @@ memory_object_iopl_request(
                        vm_object_reference(object);
                        named_entry_unlock(named_entry);
                }
-       } else  {
+       } else if (ip_kotype(port) == IKOT_MEM_OBJ_CONTROL) {
                memory_object_control_t control;
-               control = (memory_object_control_t)port->ip_kobject;
+               control = (memory_object_control_t) port;
                if (control == NULL)
                        return (KERN_INVALID_ARGUMENT);
                object = memory_object_control_to_vm_object(control);
                if (object == VM_OBJECT_NULL)
                        return (KERN_INVALID_ARGUMENT);
                vm_object_reference(object);
+       } else {
+               return KERN_INVALID_ARGUMENT;
        }
        if (object == VM_OBJECT_NULL)
                return (KERN_INVALID_ARGUMENT);
 
        if (!object->private) {
-               if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
-                       *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
                if (object->phys_contiguous) {
                        *flags = UPL_PHYS_CONTIG;
                } else {
@@ -1600,7 +1610,7 @@ memory_object_upl_request(
 
        object = memory_object_control_to_vm_object(control);
        if (object == VM_OBJECT_NULL)
-               return (KERN_INVALID_ARGUMENT);
+               return (KERN_TERMINATED);
 
        return vm_object_upl_request(object,
                                     offset,
@@ -1608,7 +1618,7 @@ memory_object_upl_request(
                                     upl_ptr,
                                     user_page_list,
                                     page_list_count,
-                                    cntrl_flags);
+                                    (upl_control_flags_t)(unsigned int) cntrl_flags);
 }
 
 /*  
@@ -1646,9 +1656,30 @@ memory_object_super_upl_request(
                                           upl,
                                           user_page_list,
                                           page_list_count,
-                                          cntrl_flags);
+                                          (upl_control_flags_t)(unsigned int) cntrl_flags);
+}
+
+kern_return_t
+memory_object_cluster_size(memory_object_control_t control, memory_object_offset_t *start,
+                          vm_size_t *length, uint32_t *io_streaming, memory_object_fault_info_t fault_info)
+{
+       vm_object_t             object;
+
+       object = memory_object_control_to_vm_object(control);
+
+       if (object == VM_OBJECT_NULL || object->paging_offset > *start)
+               return (KERN_INVALID_ARGUMENT);
+
+       *start -= object->paging_offset;
+
+       vm_object_cluster_size(object, (vm_object_offset_t *)start, length, (vm_object_fault_info_t)fault_info, io_streaming);
+
+       *start += object->paging_offset;
+
+       return (KERN_SUCCESS);
 }
 
+
 int vm_stat_discard_cleared_reply = 0;
 int vm_stat_discard_cleared_unset = 0;
 int vm_stat_discard_cleared_too_late = 0;
@@ -1667,11 +1698,12 @@ kern_return_t
 host_default_memory_manager(
        host_priv_t             host_priv,
        memory_object_default_t *default_manager,
-       memory_object_cluster_size_t cluster_size)
+       __unused memory_object_cluster_size_t cluster_size)
 {
        memory_object_default_t current_manager;
        memory_object_default_t new_manager;
        memory_object_default_t returned_manager;
+       kern_return_t result = KERN_SUCCESS;
 
        if (host_priv == HOST_PRIV_NULL)
                return(KERN_INVALID_HOST);
@@ -1679,16 +1711,36 @@ host_default_memory_manager(
        assert(host_priv == &realhost);
 
        new_manager = *default_manager;
-       mutex_lock(&memory_manager_default_lock);
+       lck_mtx_lock(&memory_manager_default_lock);
        current_manager = memory_manager_default;
+       returned_manager = MEMORY_OBJECT_DEFAULT_NULL;
 
        if (new_manager == MEMORY_OBJECT_DEFAULT_NULL) {
                /*
                 *      Retrieve the current value.
                 */
-               memory_object_default_reference(current_manager);
                returned_manager = current_manager;
+               memory_object_default_reference(returned_manager);
        } else {
+               /*
+                *      Only allow the kernel to change the value.
+                */
+               extern task_t kernel_task;
+               if (current_task() != kernel_task) {
+                       result = KERN_NO_ACCESS;
+                       goto out;
+               }
+
+               /*
+                *      If this is the first non-null manager, start
+                *      up the internal pager support.
+                */
+               if (current_manager == MEMORY_OBJECT_DEFAULT_NULL) {
+                       result = vm_pageout_internal_start();
+                       if (result != KERN_SUCCESS)
+                               goto out;
+               }
+
                /*
                 *      Retrieve the current value,
                 *      and replace it with the supplied value.
@@ -1696,33 +1748,32 @@ host_default_memory_manager(
                 *      but we have to take a reference on the new
                 *      one.
                 */
-
                returned_manager = current_manager;
                memory_manager_default = new_manager;
                memory_object_default_reference(new_manager);
 
-               if (cluster_size % PAGE_SIZE != 0) {
-#if 0
-                       mutex_unlock(&memory_manager_default_lock);
-                       return KERN_INVALID_ARGUMENT;
-#else
-                       cluster_size = round_page_32(cluster_size);
-#endif
-               }
-               memory_manager_default_cluster = cluster_size;
-
                /*
                 *      In case anyone's been waiting for a memory
                 *      manager to be established, wake them up.
                 */
 
                thread_wakeup((event_t) &memory_manager_default);
-       }
 
-       mutex_unlock(&memory_manager_default_lock);
+               /*
+                * Now that we have a default pager for anonymous memory,
+                * reactivate all the throttled pages (i.e. dirty pages with
+                * no pager).
+                */
+               if (current_manager == MEMORY_OBJECT_DEFAULT_NULL)
+               {
+                       vm_page_reactivate_all_throttled();
+               }
+       }
+ out:
+       lck_mtx_unlock(&memory_manager_default_lock);
 
        *default_manager = returned_manager;
-       return(KERN_SUCCESS);
+       return(result);
 }
 
 /*
@@ -1734,25 +1785,24 @@ host_default_memory_manager(
  */
 
 __private_extern__ memory_object_default_t
-memory_manager_default_reference(
-       memory_object_cluster_size_t *cluster_size)
+memory_manager_default_reference(void)
 {
        memory_object_default_t current_manager;
 
-       mutex_lock(&memory_manager_default_lock);
+       lck_mtx_lock(&memory_manager_default_lock);
        current_manager = memory_manager_default;
        while (current_manager == MEMORY_OBJECT_DEFAULT_NULL) {
                wait_result_t res;
 
-               res = thread_sleep_mutex((event_t) &memory_manager_default,
-                                        &memory_manager_default_lock,
-                                        THREAD_UNINT);
+               res = lck_mtx_sleep(&memory_manager_default_lock,
+                                       LCK_SLEEP_DEFAULT,
+                                       (event_t) &memory_manager_default,
+                                       THREAD_UNINT);
                assert(res == THREAD_AWAKENED);
                current_manager = memory_manager_default;
        }
        memory_object_default_reference(current_manager);
-       *cluster_size = memory_manager_default_cluster;
-       mutex_unlock(&memory_manager_default_lock);
+       lck_mtx_unlock(&memory_manager_default_lock);
 
        return current_manager;
 }
@@ -1774,18 +1824,18 @@ memory_manager_default_check(void)
 {
        memory_object_default_t current;
 
-       mutex_lock(&memory_manager_default_lock);
+       lck_mtx_lock(&memory_manager_default_lock);
        current = memory_manager_default;
        if (current == MEMORY_OBJECT_DEFAULT_NULL) {
                static boolean_t logged;        /* initialized to 0 */
                boolean_t       complain = !logged;
                logged = TRUE;
-               mutex_unlock(&memory_manager_default_lock);
+               lck_mtx_unlock(&memory_manager_default_lock);
                if (complain)
                        printf("Warning: No default memory manager\n");
                return(KERN_FAILURE);
        } else {
-               mutex_unlock(&memory_manager_default_lock);
+               lck_mtx_unlock(&memory_manager_default_lock);
                return(KERN_SUCCESS);
        }
 }
@@ -1794,7 +1844,7 @@ __private_extern__ void
 memory_manager_default_init(void)
 {
        memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL;
-       mutex_init(&memory_manager_default_lock, 0);
+       lck_mtx_init(&memory_manager_default_lock, &vm_object_lck_grp, &vm_object_lck_attr);
 }
 
 
@@ -1811,152 +1861,12 @@ memory_object_page_op(
        int                     *flags)
 {
        vm_object_t             object;
-       vm_page_t               dst_page;
-
 
        object = memory_object_control_to_vm_object(control);
        if (object == VM_OBJECT_NULL)
                return (KERN_INVALID_ARGUMENT);
 
-       vm_object_lock(object);
-
-       if(ops & UPL_POP_PHYSICAL) {
-               if(object->phys_contiguous) {
-                       if (phys_entry) {
-                               *phys_entry = (ppnum_t)
-                                       (object->shadow_offset >> 12);
-                       }
-                       vm_object_unlock(object);
-                       return KERN_SUCCESS;
-               } else {
-                       vm_object_unlock(object);
-                       return KERN_INVALID_OBJECT;
-               }
-       }
-       if(object->phys_contiguous) {
-               vm_object_unlock(object);
-               return KERN_INVALID_OBJECT;
-       }
-
-       while(TRUE) {
-               if((dst_page = vm_page_lookup(object,offset)) == VM_PAGE_NULL) {
-                       vm_object_unlock(object);
-                       return KERN_FAILURE;
-               }
-
-               /* Sync up on getting the busy bit */
-               if((dst_page->busy || dst_page->cleaning) && 
-                          (((ops & UPL_POP_SET) && 
-                          (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) {
-                       /* someone else is playing with the page, we will */
-                       /* have to wait */
-                       PAGE_SLEEP(object, dst_page, THREAD_UNINT);
-                       continue;
-               }
-
-               if (ops & UPL_POP_DUMP) {
-                       vm_page_lock_queues();
-
-                       if (dst_page->no_isync == FALSE)
-                               pmap_disconnect(dst_page->phys_page);
-                       vm_page_free(dst_page);
-
-                       vm_page_unlock_queues();
-                       break;
-               }
-
-               if (flags) {
-                       *flags = 0;
-
-                       /* Get the condition of flags before requested ops */
-                       /* are undertaken */
-
-                       if(dst_page->dirty) *flags |= UPL_POP_DIRTY;
-                       if(dst_page->pageout) *flags |= UPL_POP_PAGEOUT;
-                       if(dst_page->precious) *flags |= UPL_POP_PRECIOUS;
-                       if(dst_page->absent) *flags |= UPL_POP_ABSENT;
-                       if(dst_page->busy) *flags |= UPL_POP_BUSY;
-               }
-
-               /* The caller should have made a call either contingent with */
-               /* or prior to this call to set UPL_POP_BUSY */
-               if(ops & UPL_POP_SET) {
-                       /* The protection granted with this assert will */
-                       /* not be complete.  If the caller violates the */
-                       /* convention and attempts to change page state */
-                       /* without first setting busy we may not see it */
-                       /* because the page may already be busy.  However */
-                       /* if such violations occur we will assert sooner */
-                       /* or later. */
-                       assert(dst_page->busy || (ops & UPL_POP_BUSY));
-                       if (ops & UPL_POP_DIRTY) dst_page->dirty = TRUE;
-                       if (ops & UPL_POP_PAGEOUT) dst_page->pageout = TRUE;
-                       if (ops & UPL_POP_PRECIOUS) dst_page->precious = TRUE;
-                       if (ops & UPL_POP_ABSENT) dst_page->absent = TRUE;
-                       if (ops & UPL_POP_BUSY) dst_page->busy = TRUE;
-               }
-
-               if(ops & UPL_POP_CLR) {
-                       assert(dst_page->busy);
-                       if (ops & UPL_POP_DIRTY) dst_page->dirty = FALSE;
-                       if (ops & UPL_POP_PAGEOUT) dst_page->pageout = FALSE;
-                       if (ops & UPL_POP_PRECIOUS) dst_page->precious = FALSE;
-                       if (ops & UPL_POP_ABSENT) dst_page->absent = FALSE;
-                       if (ops & UPL_POP_BUSY) {
-                               dst_page->busy = FALSE;
-                               PAGE_WAKEUP(dst_page);
-                       }
-               }
-
-               if (dst_page->encrypted) {
-                       /*
-                        * ENCRYPTED SWAP:
-                        * We need to decrypt this encrypted page before the
-                        * caller can access its contents.
-                        * But if the caller really wants to access the page's
-                        * contents, they have to keep the page "busy".
-                        * Otherwise, the page could get recycled or re-encrypted
-                        * at any time.
-                        */
-                       if ((ops & UPL_POP_SET) && (ops & UPL_POP_BUSY) &&
-                           dst_page->busy) {
-                               /*
-                                * The page is stable enough to be accessed by
-                                * the caller, so make sure its contents are
-                                * not encrypted.
-                                */
-                               vm_page_decrypt(dst_page, 0);
-                       } else {
-                               /*
-                                * The page is not busy, so don't bother
-                                * decrypting it, since anything could
-                                * happen to it between now and when the
-                                * caller wants to access it.
-                                * We should not give the caller access
-                                * to this page.
-                                */
-                               assert(!phys_entry);
-                       }
-               }
-
-               if (phys_entry) {
-                       /*
-                        * The physical page number will remain valid
-                        * only if the page is kept busy.
-                        * ENCRYPTED SWAP: make sure we don't let the
-                        * caller access an encrypted page.
-                        */
-                       assert(dst_page->busy);
-                       assert(!dst_page->encrypted);
-                       *phys_entry = dst_page->phys_page;
-               }
-
-               break;
-       }
-
-       vm_object_unlock(object);
-       return KERN_SUCCESS;
-                               
+       return vm_object_page_op(object, offset, ops, phys_entry, flags);
 }
 
 /*
@@ -1977,73 +1887,103 @@ memory_object_range_op(
        int                     ops,
        int                     *range)
 {
-        memory_object_offset_t offset;
        vm_object_t             object;
-       vm_page_t               dst_page;
 
        object = memory_object_control_to_vm_object(control);
        if (object == VM_OBJECT_NULL)
                return (KERN_INVALID_ARGUMENT);
 
-       if (object->resident_page_count == 0) {
-               if (range) {
-                       if (ops & UPL_ROP_PRESENT)
-                               *range = 0;
-                       else
-                               *range = offset_end - offset_beg;
-               }
-               return KERN_SUCCESS;
-       }
-       vm_object_lock(object);
+       return vm_object_range_op(object,
+                                 offset_beg,
+                                 offset_end,
+                                 ops,
+                                 (uint32_t *) range);
+}
+
+
+void
+memory_object_mark_used(
+        memory_object_control_t        control)
+{
+       vm_object_t             object;
+
+       if (control == NULL)
+               return;
+
+       object = memory_object_control_to_vm_object(control);
+
+       if (object != VM_OBJECT_NULL)
+               vm_object_cache_remove(object);
+}
+
+
+void
+memory_object_mark_unused(
+       memory_object_control_t control,
+       __unused boolean_t      rage)
+{
+       vm_object_t             object;
+
+       if (control == NULL)
+               return;
+
+       object = memory_object_control_to_vm_object(control);
+
+       if (object != VM_OBJECT_NULL)
+               vm_object_cache_add(object);
+}
 
-       if (object->phys_contiguous) {
+void
+memory_object_mark_io_tracking(
+       memory_object_control_t control)
+{
+       vm_object_t             object;
+
+       if (control == NULL)
+               return;
+       object = memory_object_control_to_vm_object(control);
+
+       if (object != VM_OBJECT_NULL) {
+               vm_object_lock(object);
+               object->io_tracking = TRUE;
                vm_object_unlock(object);
-               return KERN_INVALID_OBJECT;
        }
-       
-       offset = offset_beg;
+}
 
-       while (offset < offset_end) {
-               dst_page = vm_page_lookup(object, offset);
-               if (dst_page != VM_PAGE_NULL) {
-                       if (ops & UPL_ROP_DUMP) {
-                               if (dst_page->busy || dst_page->cleaning) {
-                                       /*
-                                        * someone else is playing with the 
-                                        * page, we will have to wait
-                                        */
-                                       PAGE_SLEEP(object, 
-                                               dst_page, THREAD_UNINT);
-                                       /*
-                                        * need to relook the page up since it's
-                                        * state may have changed while we slept
-                                        * it might even belong to a different object
-                                        * at this point
-                                        */
-                                       continue;
-                               }
-                               vm_page_lock_queues();
+#if CONFIG_SECLUDED_MEMORY
+void
+memory_object_mark_eligible_for_secluded(
+       memory_object_control_t control,
+       boolean_t               eligible_for_secluded)
+{
+       vm_object_t             object;
 
-                               if (dst_page->no_isync == FALSE)
-                                       pmap_disconnect(dst_page->phys_page);
-                               vm_page_free(dst_page);
+       if (control == NULL)
+               return;
+       object = memory_object_control_to_vm_object(control);
 
-                               vm_page_unlock_queues();
-                       } else if (ops & UPL_ROP_ABSENT)
-                               break;
-               } else if (ops & UPL_ROP_PRESENT)
-                       break;
+       if (object == VM_OBJECT_NULL) {
+               return;
+       }
 
-               offset += PAGE_SIZE;
+       vm_object_lock(object);
+       if (eligible_for_secluded &&
+           secluded_for_filecache && /* global boot-arg */
+           !object->eligible_for_secluded) {
+               object->eligible_for_secluded = TRUE;
+               vm_page_secluded.eligible_for_secluded += object->resident_page_count;
+       } else if (!eligible_for_secluded &&
+                  object->eligible_for_secluded) {
+               object->eligible_for_secluded = FALSE;
+               vm_page_secluded.eligible_for_secluded -= object->resident_page_count;
+               if (object->resident_page_count) {
+                       /* XXX FBDP TODO: flush pages from secluded queue? */
+                       // printf("FBDP TODO: flush %d pages from %p from secluded queue\n", object->resident_page_count, object);
+               }
        }
        vm_object_unlock(object);
-
-       if (range)
-               *range = offset - offset_beg;
-
-       return KERN_SUCCESS;
 }
-
+#endif /* CONFIG_SECLUDED_MEMORY */
 
 kern_return_t
 memory_object_pages_resident(
@@ -2064,6 +2004,54 @@ memory_object_pages_resident(
        return (KERN_SUCCESS);
 }
 
+kern_return_t
+memory_object_signed(
+       memory_object_control_t control,
+       boolean_t               is_signed)
+{
+       vm_object_t     object;
+
+       object = memory_object_control_to_vm_object(control);
+       if (object == VM_OBJECT_NULL)
+               return KERN_INVALID_ARGUMENT;
+
+       vm_object_lock(object);
+       object->code_signed = is_signed;
+       vm_object_unlock(object);
+
+       return KERN_SUCCESS;
+}
+
+boolean_t
+memory_object_is_signed(
+       memory_object_control_t control)
+{
+       boolean_t       is_signed;
+       vm_object_t     object;
+
+       object = memory_object_control_to_vm_object(control);
+       if (object == VM_OBJECT_NULL)
+               return FALSE;
+
+       vm_object_lock_shared(object);
+       is_signed = object->code_signed;
+       vm_object_unlock(object);
+
+       return is_signed;
+}
+
+boolean_t
+memory_object_is_slid(
+       memory_object_control_t control)
+{
+       vm_object_t     object = VM_OBJECT_NULL;
+
+       object = memory_object_control_to_vm_object(control);
+       if (object == VM_OBJECT_NULL)
+               return FALSE;
+
+       return object->object_slid;
+}
 
 static zone_t mem_obj_control_zone;
 
@@ -2074,6 +2062,8 @@ memory_object_control_bootstrap(void)
 
        i = (vm_size_t) sizeof (struct memory_object_control);
        mem_obj_control_zone = zinit (i, 8192*i, 4096, "mem_obj_control");
+       zone_change(mem_obj_control_zone, Z_CALLERACCT, FALSE);
+       zone_change(mem_obj_control_zone, Z_NOENCRYPT, TRUE);
        return;
 }
 
@@ -2084,8 +2074,10 @@ memory_object_control_allocate(
        memory_object_control_t control;
 
        control = (memory_object_control_t)zalloc(mem_obj_control_zone);
-       if (control != MEMORY_OBJECT_CONTROL_NULL)
-               control->object = object;
+       if (control != MEMORY_OBJECT_CONTROL_NULL) {
+               control->moc_object = object;
+               control->moc_ikot = IKOT_MEM_OBJ_CONTROL; /* fake ip_kotype */
+       }
        return (control);
 }
 
@@ -2094,19 +2086,20 @@ memory_object_control_collapse(
        memory_object_control_t control,                       
        vm_object_t             object)
 {                     
-       assert((control->object != VM_OBJECT_NULL) &&
-              (control->object != object));
-       control->object = object;
+       assert((control->moc_object != VM_OBJECT_NULL) &&
+              (control->moc_object != object));
+       control->moc_object = object;
 }
 
 __private_extern__ vm_object_t
 memory_object_control_to_vm_object(
        memory_object_control_t control)
 {
-       if (control == MEMORY_OBJECT_CONTROL_NULL)
+       if (control == MEMORY_OBJECT_CONTROL_NULL ||
+           control->moc_ikot != IKOT_MEM_OBJ_CONTROL)
                return VM_OBJECT_NULL;
 
-       return (control->object);
+       return (control->moc_object);
 }
 
 memory_object_control_t
@@ -2147,8 +2140,8 @@ void
 memory_object_control_disable(
        memory_object_control_t control)
 {
-       assert(control->object != VM_OBJECT_NULL);
-       control->object = VM_OBJECT_NULL;
+       assert(control->moc_object != VM_OBJECT_NULL);
+       control->moc_object = VM_OBJECT_NULL;
 }
 
 void
@@ -2185,30 +2178,16 @@ convert_memory_object_to_port(
 void memory_object_reference(
        memory_object_t memory_object)
 {
-
-#ifdef MACH_BSD
-       if (memory_object->pager == &vnode_pager_workaround) {
-               vnode_pager_reference(memory_object);
-       } else if (memory_object->pager == &device_pager_workaround) {
-               device_pager_reference(memory_object);
-       } else
-#endif
-               dp_memory_object_reference(memory_object);
+       (memory_object->mo_pager_ops->memory_object_reference)(
+               memory_object);
 }
 
 /* Routine memory_object_deallocate */
 void memory_object_deallocate(
        memory_object_t memory_object)
 {
-
-#ifdef MACH_BSD
-       if (memory_object->pager == &vnode_pager_workaround) {
-               vnode_pager_deallocate(memory_object);
-       } else if (memory_object->pager == &device_pager_workaround) {
-               device_pager_deallocate(memory_object);
-       } else
-#endif
-               dp_memory_object_deallocate(memory_object);
+       (memory_object->mo_pager_ops->memory_object_deallocate)(
+                memory_object);
 }
 
 
@@ -2220,20 +2199,10 @@ kern_return_t memory_object_init
        memory_object_cluster_size_t memory_object_page_size
 )
 {
-#ifdef MACH_BSD
-       if (memory_object->pager == &vnode_pager_workaround) {
-               return vnode_pager_init(memory_object,
-                                       memory_control,
-                                       memory_object_page_size);
-       } else if (memory_object->pager == &device_pager_workaround) {
-               return device_pager_init(memory_object,
-                                        memory_control,
-                                        memory_object_page_size);
-       } else
-#endif
-               return dp_memory_object_init(memory_object,
-                                            memory_control,
-                                            memory_object_page_size);
+       return (memory_object->mo_pager_ops->memory_object_init)(
+               memory_object,
+               memory_control,
+               memory_object_page_size);
 }
 
 /* Routine memory_object_terminate */
@@ -2242,14 +2211,8 @@ kern_return_t memory_object_terminate
        memory_object_t memory_object
 )
 {
-#ifdef MACH_BSD
-       if (memory_object->pager == &vnode_pager_workaround) {
-               return vnode_pager_terminate(memory_object);
-       } else if (memory_object->pager == &device_pager_workaround) {
-               return device_pager_terminate(memory_object);
-       } else
-#endif
-               return dp_memory_object_terminate(memory_object);
+       return (memory_object->mo_pager_ops->memory_object_terminate)(
+               memory_object);
 }
 
 /* Routine memory_object_data_request */
@@ -2258,26 +2221,16 @@ kern_return_t memory_object_data_request
        memory_object_t memory_object,
        memory_object_offset_t offset,
        memory_object_cluster_size_t length,
-       vm_prot_t desired_access
+       vm_prot_t desired_access,
+       memory_object_fault_info_t fault_info
 )
 {
-#ifdef MACH_BSD
-       if (memory_object->pager == &vnode_pager_workaround) {
-               return vnode_pager_data_request(memory_object, 
-                                               offset, 
-                                               length,
-                                               desired_access);
-       } else if (memory_object->pager == &device_pager_workaround) {
-               return device_pager_data_request(memory_object, 
-                                                offset, 
-                                                length,
-                                                desired_access);
-       } else
-#endif
-               return dp_memory_object_data_request(memory_object, 
-                                                    offset, 
-                                                    length,
-                                                    desired_access);
+       return (memory_object->mo_pager_ops->memory_object_data_request)(
+               memory_object,
+               offset, 
+               length,
+               desired_access,
+               fault_info);
 }
 
 /* Routine memory_object_data_return */
@@ -2285,7 +2238,7 @@ kern_return_t memory_object_data_return
 (
        memory_object_t memory_object,
        memory_object_offset_t offset,
-       vm_size_t size,
+       memory_object_cluster_size_t size,
        memory_object_offset_t *resid_offset,
        int     *io_error,
        boolean_t dirty,
@@ -2293,37 +2246,15 @@ kern_return_t memory_object_data_return
        int     upl_flags
 )
 {
-#ifdef MACH_BSD
-       if (memory_object->pager == &vnode_pager_workaround) {
-               return vnode_pager_data_return(memory_object,
-                                              offset,
-                                              size,
-                                              resid_offset,
-                                              io_error,
-                                              dirty,
-                                              kernel_copy,
-                                              upl_flags);
-       } else if (memory_object->pager == &device_pager_workaround) {
-
-               return device_pager_data_return(memory_object,
-                                               offset,
-                                               size,
-                                               dirty,
-                                               kernel_copy,
-                                               upl_flags);
-       }
-       else 
-#endif
-       {
-               return dp_memory_object_data_return(memory_object,
-                                                   offset,
-                                                   size,
-                                                   NULL,
-                                                   NULL,
-                                                   dirty,
-                                                   kernel_copy,
-                                                   upl_flags);
-       }
+       return (memory_object->mo_pager_ops->memory_object_data_return)(
+               memory_object,
+               offset,
+               size,
+               resid_offset,
+               io_error,
+               dirty,
+               kernel_copy,
+               upl_flags);
 }
 
 /* Routine memory_object_data_initialize */
@@ -2331,23 +2262,13 @@ kern_return_t memory_object_data_initialize
 (
        memory_object_t memory_object,
        memory_object_offset_t offset,
-       vm_size_t size
+       memory_object_cluster_size_t size
 )
 {
-#ifdef MACH_BSD
-       if (memory_object->pager == &vnode_pager_workaround) {
-               return vnode_pager_data_initialize(memory_object,
-                                                  offset,
-                                                  size);
-       } else if (memory_object->pager == &device_pager_workaround) {
-               return device_pager_data_initialize(memory_object,
-                                                   offset,
-                                                   size);
-       } else
-#endif
-               return dp_memory_object_data_initialize(memory_object,
-                                                       offset,
-                                                       size);
+       return (memory_object->mo_pager_ops->memory_object_data_initialize)(
+               memory_object,
+               offset,
+               size);
 }
 
 /* Routine memory_object_data_unlock */
@@ -2355,27 +2276,15 @@ kern_return_t memory_object_data_unlock
 (
        memory_object_t memory_object,
        memory_object_offset_t offset,
-       vm_size_t size,
+       memory_object_size_t size,
        vm_prot_t desired_access
 )
 {
-#ifdef MACH_BSD
-       if (memory_object->pager == &vnode_pager_workaround) {
-               return vnode_pager_data_unlock(memory_object,
-                                              offset,
-                                              size,
-                                              desired_access);
-       } else if (memory_object->pager == &device_pager_workaround) {
-               return device_pager_data_unlock(memory_object,
-                                               offset,
-                                               size,
-                                               desired_access);
-       } else
-#endif
-               return dp_memory_object_data_unlock(memory_object,
-                                                   offset,
-                                                   size,
-                                                   desired_access);
+       return (memory_object->mo_pager_ops->memory_object_data_unlock)(
+               memory_object,
+               offset,
+               size,
+               desired_access);
 }
 
 /* Routine memory_object_synchronize */
@@ -2383,56 +2292,70 @@ kern_return_t memory_object_synchronize
 (
        memory_object_t memory_object,
        memory_object_offset_t offset,
-       vm_size_t size,
+       memory_object_size_t size,
        vm_sync_t sync_flags
 )
 {
-#ifdef MACH_BSD
-       if (memory_object->pager == &vnode_pager_workaround) {
-               return vnode_pager_synchronize(memory_object,
-                                              offset,
-                                              size,
-                                              sync_flags);
-       } else if (memory_object->pager == &device_pager_workaround) {
-               return device_pager_synchronize(memory_object,
-                                               offset,
-                                               size,
-                                               sync_flags);
-       } else
-#endif
-               return dp_memory_object_synchronize(memory_object,
-                                                   offset,
-                                                   size,
-                                                   sync_flags);
+       return (memory_object->mo_pager_ops->memory_object_synchronize)(
+               memory_object,
+               offset,
+               size,
+               sync_flags);
+}
+
+
+/*
+ * memory_object_map() is called by VM (in vm_map_enter() and its variants)
+ * each time a "named" VM object gets mapped directly or indirectly
+ * (copy-on-write mapping).  A "named" VM object has an extra reference held
+ * by the pager to keep it alive until the pager decides that the 
+ * memory object (and its VM object) can be reclaimed.
+ * VM calls memory_object_last_unmap() (in vm_object_deallocate()) when all
+ * the mappings of that memory object have been removed.
+ *
+ * For a given VM object, calls to memory_object_map() and memory_object_unmap()
+ * are serialized (through object->mapping_in_progress), to ensure that the
+ * pager gets a consistent view of the mapping status of the memory object.
+ *
+ * This allows the pager to keep track of how many times a memory object
+ * has been mapped and with which protections, to decide when it can be
+ * reclaimed.
+ */
+
+/* Routine memory_object_map */
+kern_return_t memory_object_map
+(
+       memory_object_t memory_object,
+       vm_prot_t prot
+)
+{
+       return (memory_object->mo_pager_ops->memory_object_map)(
+               memory_object,
+               prot);
 }
 
-/* Routine memory_object_unmap */
-kern_return_t memory_object_unmap
+/* Routine memory_object_last_unmap */
+kern_return_t memory_object_last_unmap
 (
        memory_object_t memory_object
 )
 {
-#ifdef MACH_BSD
-       if (memory_object->pager == &vnode_pager_workaround) {
-               return vnode_pager_unmap(memory_object);
-       } else if (memory_object->pager == &device_pager_workaround) {
-               return device_pager_unmap(memory_object);
-       } else
-#endif
-               return dp_memory_object_unmap(memory_object);
+       return (memory_object->mo_pager_ops->memory_object_last_unmap)(
+               memory_object);
 }
 
-/* Routine memory_object_create */
-kern_return_t memory_object_create
+/* Routine memory_object_data_reclaim */
+kern_return_t memory_object_data_reclaim
 (
-       memory_object_default_t default_memory_manager,
-       vm_size_t new_memory_object_size,
-       memory_object_t *new_memory_object
+       memory_object_t memory_object,
+       boolean_t       reclaim_backing_store
 )
 {
-       return default_pager_memory_object_create(default_memory_manager,
-                                                 new_memory_object_size,
-                                                 new_memory_object);
+       if (memory_object->mo_pager_ops->memory_object_data_reclaim == NULL)
+               return KERN_NOT_SUPPORTED;
+       return (memory_object->mo_pager_ops->memory_object_data_reclaim)(
+               memory_object,
+               reclaim_backing_store);
 }
 
 upl_t