]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/bsd_vm.c
xnu-4570.51.1.tar.gz
[apple/xnu.git] / osfmk / vm / bsd_vm.c
index cd8dc83173b6daa18a65d8ccf5847cc0595e31a8..af3985f95ce6a01232588d7080e7c0d5eb590127 100644 (file)
 
 #include <kern/assert.h>
 #include <kern/host.h>
+#include <kern/ledger.h>
 #include <kern/thread.h>
+#include <kern/ipc_kobject.h>
 
 #include <ipc/ipc_port.h>
 #include <ipc/ipc_space.h>
 
-#include <default_pager/default_pager_types.h>
-#include <default_pager/default_pager_object_server.h>
-
 #include <vm/vm_map.h>
 #include <vm/vm_pageout.h>
 #include <vm/memory_object.h>
@@ -113,18 +112,14 @@ const struct memory_object_pager_ops vnode_pager_ops = {
 };
 
 typedef struct vnode_pager {
-       struct ipc_object_header        pager_header;   /* fake ip_kotype()             */
-       memory_object_pager_ops_t pager_ops;    /* == &vnode_pager_ops       */
+       /* mandatory generic header */
+       struct memory_object vn_pgr_hdr;
+
+       /*  pager-specific */
        unsigned int            ref_count;      /* reference count           */
-       memory_object_control_t control_handle; /* mem object control handle */
        struct vnode            *vnode_handle;  /* vnode handle              */
 } *vnode_pager_t;
 
-#define pager_ikot pager_header.io_bits
-
-ipc_port_t
-trigger_name_to_port(                  /* forward */
-       mach_port_t);
 
 kern_return_t
 vnode_pager_cluster_read(              /* forward */
@@ -152,6 +147,10 @@ vnode_pager_t
 vnode_pager_lookup(                    /* forward */
        memory_object_t);
 
+struct vnode *
+vnode_pager_lookup_vnode(              /* forward */
+       memory_object_t);
+
 zone_t vnode_pager_zone;
 
 
@@ -182,175 +181,6 @@ extern int proc_resetpcontrol(int);
 extern unsigned long vm_cs_validated_resets;
 #endif
 
-/*
- *     Routine:        mach_macx_triggers
- *     Function:
- *             Syscall interface to set the call backs for low and
- *             high water marks.
- */
-int
-mach_macx_triggers(
-       struct macx_triggers_args *args)
-{
-       int     hi_water = args->hi_water;
-       int     low_water = args->low_water;
-       int     flags = args->flags;
-       mach_port_t     trigger_name = args->alert_port;
-       kern_return_t kr;
-       memory_object_default_t default_pager;
-       ipc_port_t              trigger_port;
-
-       default_pager = MEMORY_OBJECT_DEFAULT_NULL;
-       kr = host_default_memory_manager(host_priv_self(), 
-                                       &default_pager, 0);
-       if(kr != KERN_SUCCESS) {
-               return EINVAL;
-       }
-
-       if (((flags & SWAP_ENCRYPT_ON) && (flags & SWAP_ENCRYPT_OFF)) || 
-           ((flags & SWAP_COMPACT_ENABLE) && (flags & SWAP_COMPACT_DISABLE))) {
-               /* can't have it both ways */
-               return EINVAL;
-       }
-
-       if (default_pager_init_flag == 0) {
-               start_def_pager(NULL);
-               default_pager_init_flag = 1;
-       }
-
-       if (flags & SWAP_ENCRYPT_ON) {
-               /* ENCRYPTED SWAP: tell default_pager to encrypt */
-               default_pager_triggers(default_pager,
-                                      0, 0,
-                                      SWAP_ENCRYPT_ON,
-                                      IP_NULL);
-       } else if (flags & SWAP_ENCRYPT_OFF) {
-               /* ENCRYPTED SWAP: tell default_pager not to encrypt */
-               default_pager_triggers(default_pager,
-                                      0, 0,
-                                      SWAP_ENCRYPT_OFF,
-                                      IP_NULL);
-       }
-
-       if (flags & USE_EMERGENCY_SWAP_FILE_FIRST) {
-               /*
-                * Time to switch to the emergency segment.
-                */
-               return default_pager_triggers(default_pager,
-                                       0, 0, 
-                                       USE_EMERGENCY_SWAP_FILE_FIRST,
-                                       IP_NULL);
-       }
-
-       if (flags & SWAP_FILE_CREATION_ERROR) {
-               /* 
-                * For some reason, the dynamic pager failed to create a swap file.
-                */
-               trigger_port = trigger_name_to_port(trigger_name);
-               if(trigger_port == NULL) {
-                       return EINVAL;
-               }
-               /* trigger_port is locked and active */
-               ipc_port_make_send_locked(trigger_port); 
-               /* now unlocked */
-               default_pager_triggers(default_pager,
-                                       0, 0, 
-                                       SWAP_FILE_CREATION_ERROR,
-                                       trigger_port);
-       }
-
-       if (flags & HI_WAT_ALERT) {
-               trigger_port = trigger_name_to_port(trigger_name);
-               if(trigger_port == NULL) {
-                       return EINVAL;
-               }
-               /* trigger_port is locked and active */
-               ipc_port_make_send_locked(trigger_port); 
-               /* now unlocked */
-               default_pager_triggers(default_pager, 
-                                      hi_water, low_water,
-                                      HI_WAT_ALERT, trigger_port);
-       }
-
-       if (flags & LO_WAT_ALERT) {
-               trigger_port = trigger_name_to_port(trigger_name);
-               if(trigger_port == NULL) {
-                       return EINVAL;
-               }
-               /* trigger_port is locked and active */
-               ipc_port_make_send_locked(trigger_port);
-               /* and now its unlocked */
-               default_pager_triggers(default_pager, 
-                                      hi_water, low_water,
-                                      LO_WAT_ALERT, trigger_port);
-       }
-
-
-       if (flags & PROC_RESUME) {
-
-               /*
-                * For this call, hi_water is used to pass in the pid of the process we want to resume
-                * or unthrottle.  This is of course restricted to the superuser (checked inside of 
-                * proc_resetpcontrol).
-                */
-
-               return proc_resetpcontrol(hi_water);
-       }
-
-       /*
-        * Set thread scheduling priority and policy for the current thread
-        * it is assumed for the time being that the thread setting the alert
-        * is the same one which will be servicing it.
-        *
-        * XXX This does not belong in the kernel XXX
-        */
-       if (flags & HI_WAT_ALERT) {
-               thread_precedence_policy_data_t         pre;
-               thread_extended_policy_data_t           ext;
-
-               ext.timeshare = FALSE;
-               pre.importance = INT32_MAX;
-
-               thread_policy_set(current_thread(),
-                                 THREAD_EXTENDED_POLICY,
-                                 (thread_policy_t)&ext,
-                                 THREAD_EXTENDED_POLICY_COUNT);
-
-               thread_policy_set(current_thread(),
-                                 THREAD_PRECEDENCE_POLICY,
-                                 (thread_policy_t)&pre,
-                                 THREAD_PRECEDENCE_POLICY_COUNT);
-
-               current_thread()->options |= TH_OPT_VMPRIV;
-       }
-       if (flags & (SWAP_COMPACT_DISABLE | SWAP_COMPACT_ENABLE)) {
-               return macx_backing_store_compaction(flags & (SWAP_COMPACT_DISABLE | SWAP_COMPACT_ENABLE));
-       }
-
-       return 0;
-}
-
-/*
- *
- */
-ipc_port_t
-trigger_name_to_port(
-       mach_port_t     trigger_name)
-{
-       ipc_port_t      trigger_port;
-       ipc_space_t     space;
-
-       if (trigger_name == 0)
-               return (NULL);
-
-       space  = current_space();
-       if(ipc_port_translate_receive(space, CAST_MACH_PORT_TO_NAME(trigger_name), 
-                                               &trigger_port) != KERN_SUCCESS)
-               return (NULL);
-       return trigger_port;
-}
-
 
 extern int     uiomove64(addr64_t, int, void *);
 #define        MAX_RUN 32
@@ -374,6 +204,7 @@ memory_object_control_uiomove(
        int                     i;
        int                     orig_offset;
        vm_page_t               page_run[MAX_RUN];
+       int                     dirty_count;    /* keeps track of number of pages dirtied as part of this uiomove */
 
        object = memory_object_control_to_vm_object(control);
        if (object == VM_OBJECT_NULL) {
@@ -394,76 +225,22 @@ memory_object_control_uiomove(
                return 0;
        }
        orig_offset = start_offset;
-           
+
+       dirty_count = 0;        
        while (io_requested && retval == 0) {
 
                cur_needed = (start_offset + io_requested + (PAGE_SIZE - 1)) / PAGE_SIZE;
 
                if (cur_needed > MAX_RUN)
                        cur_needed = MAX_RUN;
-
+               
                for (cur_run = 0; cur_run < cur_needed; ) {
 
                        if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
                                break;
 
-                       /*
-                        * if we're in this routine, we are inside a filesystem's
-                        * locking model, so we don't ever want to wait for pages that have
-                        * list_req_pending == TRUE since it means that the
-                        * page is a candidate for some type of I/O operation,
-                        * but that it has not yet been gathered into a UPL...
-                        * this implies that it is still outside the domain
-                        * of the filesystem and that whoever is responsible for
-                        * grabbing it into a UPL may be stuck behind the filesystem
-                        * lock this thread owns, or trying to take a lock exclusively
-                        * and waiting for the readers to drain from a rw lock...
-                        * if we block in those cases, we will deadlock
-                        */
-                       if (dst_page->list_req_pending) {
-
-                               if (dst_page->absent) {
-                                       /*
-                                        * this is the list_req_pending | absent | busy case
-                                        * which originates from vm_fault_page... we want
-                                        * to fall out of the fast path and go back
-                                        * to the caller which will gather this page
-                                        * into a UPL and issue the I/O if no one
-                                        * else beats us to it
-                                        */
-                                       break;
-                               }
-                               if (dst_page->pageout || dst_page->cleaning) {
-                                       /*
-                                        * this is the list_req_pending | pageout | busy case
-                                        * or the list_req_pending | cleaning case...
-                                        * which originate from the pageout_scan and
-                                        * msync worlds for the pageout case and the hibernate
-                                        * pre-cleaning world for the cleaning case...
-                                        * we need to reset the state of this page to indicate
-                                        * it should stay in the cache marked dirty... nothing else we
-                                        * can do at this point... we can't block on it, we can't busy
-                                        * it and we can't clean it from this routine.
-                                        */
-                                       vm_page_lockspin_queues();
-
-                                       vm_pageout_queue_steal(dst_page, TRUE); 
-                                       vm_page_deactivate(dst_page);
-
-                                       vm_page_unlock_queues();
-                               }
-                               /*
-                                * this is the list_req_pending | cleaning case...
-                                * we can go ahead and deal with this page since
-                                * its ok for us to mark this page busy... if a UPL
-                                * tries to gather this page, it will block until the
-                                * busy is cleared, thus allowing us safe use of the page
-                                * when we're done with it, we will clear busy and wake
-                                * up anyone waiting on it, thus allowing the UPL creation
-                                * to finish
-                                */
 
-                       } else if (dst_page->busy || dst_page->cleaning) {
+                       if (dst_page->busy || dst_page->cleaning) {
                                /*
                                 * someone else is playing with the page... if we've
                                 * already collected pages into this run, go ahead
@@ -476,16 +253,13 @@ memory_object_control_uiomove(
                                PAGE_SLEEP(object, dst_page, THREAD_UNINT);
                                continue;
                        }
-
-                       /*
-                        * this routine is only called when copying
-                        * to/from real files... no need to consider
-                        * encrypted swap pages
-                        */
-                       assert(!dst_page->encrypted);
+                       if (dst_page->laundry)
+                               vm_pageout_steal_laundry(dst_page, FALSE);
 
                        if (mark_dirty) {
-                               dst_page->dirty = TRUE;
+                               if (dst_page->dirty == FALSE)
+                                       dirty_count++;
+                               SET_PAGE_DIRTY(dst_page, FALSE);
                                if (dst_page->cs_validated && 
                                    !dst_page->cs_tainted) {
                                        /*
@@ -497,7 +271,7 @@ memory_object_control_uiomove(
 #if DEVELOPMENT || DEBUG
                                         vm_cs_validated_resets++;
 #endif
-                                       pmap_disconnect(dst_page->phys_page);
+                                       pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
                                }
                        }
                        dst_page->busy = TRUE;
@@ -523,7 +297,7 @@ memory_object_control_uiomove(
                        if ((xsize = PAGE_SIZE - start_offset) > io_requested)
                                xsize = io_requested;
 
-                       if ( (retval = uiomove64((addr64_t)(((addr64_t)(dst_page->phys_page) << 12) + start_offset), xsize, uio)) )
+                       if ( (retval = uiomove64((addr64_t)(((addr64_t)(VM_PAGE_GET_PHYS_PAGE(dst_page)) << PAGE_SHIFT) + start_offset), xsize, uio)) )
                                break;
 
                        io_requested -= xsize;
@@ -560,14 +334,16 @@ memory_object_control_uiomove(
                         * update clustered and speculative state
                         * 
                         */
-                       VM_PAGE_CONSUME_CLUSTERED(dst_page);
+                       if (dst_page->clustered)
+                               VM_PAGE_CONSUME_CLUSTERED(dst_page);
 
                        PAGE_WAKEUP_DONE(dst_page);
                }
                orig_offset = 0;
        }
+       if (object->pager)
+               task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager));
        vm_object_unlock(object);
-
        return (retval);
 }
 
@@ -578,7 +354,7 @@ memory_object_control_uiomove(
 void
 vnode_pager_bootstrap(void)
 {
-       register vm_size_t      size;
+       vm_size_t      size;
 
        size = (vm_size_t) sizeof(struct vnode_pager);
        vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE*size,
@@ -591,6 +367,9 @@ vnode_pager_bootstrap(void)
        apple_protect_pager_bootstrap();
 #endif /* CONFIG_CODE_DECRYPTION */
        swapfile_pager_bootstrap();
+#if __arm64__
+       fourk_pager_bootstrap();
+#endif /* __arm64__ */
        return;
 }
 
@@ -635,7 +414,7 @@ vnode_pager_init(memory_object_t mem_obj,
 
        memory_object_control_reference(control);
 
-       vnode_object->control_handle = control;
+       vnode_object->vn_pgr_hdr.mo_control = control;
 
        attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
        /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
@@ -668,7 +447,7 @@ vnode_pager_data_return(
        __unused boolean_t              kernel_copy,
        int                     upl_flags)  
 {
-       register vnode_pager_t  vnode_object;
+       vnode_pager_t   vnode_object;
 
        vnode_object = vnode_pager_lookup(mem_obj);
 
@@ -716,10 +495,9 @@ vnode_pager_get_isinuse(
 }
 
 kern_return_t
-vnode_pager_check_hard_throttle(
+vnode_pager_get_throttle_io_limit(
        memory_object_t         mem_obj,
-       uint32_t                *limit,
-       uint32_t                hard_throttle)
+       uint32_t                *limit)
 {
        vnode_pager_t   vnode_object;
 
@@ -728,7 +506,7 @@ vnode_pager_check_hard_throttle(
 
        vnode_object = vnode_pager_lookup(mem_obj);
 
-       (void)vnode_pager_return_hard_throttle_limit(vnode_object->vnode_handle, limit, hard_throttle);
+       (void)vnode_pager_return_throttle_io_limit(vnode_object->vnode_handle, limit);
        return KERN_SUCCESS;
 }
 
@@ -767,10 +545,13 @@ vnode_pager_get_object_size(
 }
 
 kern_return_t
-vnode_pager_get_object_pathname(
+vnode_pager_get_object_name(
        memory_object_t         mem_obj,
        char                    *pathname,
-       vm_size_t               *length_p)
+       vm_size_t               pathname_len,
+       char                    *filename,
+       vm_size_t               filename_len,
+       boolean_t               *truncated_path_p)
 {
        vnode_pager_t   vnode_object;
 
@@ -780,15 +561,19 @@ vnode_pager_get_object_pathname(
 
        vnode_object = vnode_pager_lookup(mem_obj);
 
-       return vnode_pager_get_pathname(vnode_object->vnode_handle,
-                                       pathname,
-                                       length_p);
+       return vnode_pager_get_name(vnode_object->vnode_handle,
+                                   pathname,
+                                   pathname_len,
+                                   filename,
+                                   filename_len,
+                                   truncated_path_p);
 }
 
 kern_return_t
-vnode_pager_get_object_filename(
-       memory_object_t mem_obj,
-       const char      **filename)
+vnode_pager_get_object_mtime(
+       memory_object_t         mem_obj,
+       struct timespec         *mtime,
+       struct timespec         *cs_mtime)
 {
        vnode_pager_t   vnode_object;
 
@@ -798,26 +583,9 @@ vnode_pager_get_object_filename(
 
        vnode_object = vnode_pager_lookup(mem_obj);
 
-       return vnode_pager_get_filename(vnode_object->vnode_handle,
-                                       filename);
-}
-
-kern_return_t
-vnode_pager_get_object_cs_blobs(
-       memory_object_t mem_obj,
-       void            **blobs)
-{
-       vnode_pager_t   vnode_object;
-
-       if (mem_obj == MEMORY_OBJECT_NULL ||
-           mem_obj->mo_pager_ops != &vnode_pager_ops) {
-               return KERN_INVALID_ARGUMENT;
-       }
-
-       vnode_object = vnode_pager_lookup(mem_obj);
-
-       return vnode_pager_get_cs_blobs(vnode_object->vnode_handle,
-                                       blobs);
+       return vnode_pager_get_mtime(vnode_object->vnode_handle,
+                                    mtime,
+                                    cs_mtime);
 }
 
 #if CHECK_CS_VALIDATION_BITMAP
@@ -857,10 +625,12 @@ vnode_pager_data_request(
 
        vnode_object = vnode_pager_lookup(mem_obj);
 
-       size = MAX_UPL_TRANSFER * PAGE_SIZE;
+       size = MAX_UPL_TRANSFER_BYTES;
        base_offset = offset;
 
-       if (memory_object_cluster_size(vnode_object->control_handle, &base_offset, &size, &io_streaming, fault_info) != KERN_SUCCESS)
+       if (memory_object_cluster_size(vnode_object->vn_pgr_hdr.mo_control,
+                                      &base_offset, &size, &io_streaming,
+                                      fault_info) != KERN_SUCCESS)
                size = PAGE_SIZE;
 
        assert(offset >= base_offset &&
@@ -876,7 +646,7 @@ void
 vnode_pager_reference(
        memory_object_t         mem_obj)
 {      
-       register vnode_pager_t  vnode_object;
+       vnode_pager_t   vnode_object;
        unsigned int            new_ref_count;
 
        vnode_object = vnode_pager_lookup(mem_obj);
@@ -891,7 +661,7 @@ void
 vnode_pager_deallocate(
        memory_object_t         mem_obj)
 {
-       register vnode_pager_t  vnode_object;
+       vnode_pager_t   vnode_object;
 
        PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %p\n", mem_obj));
 
@@ -926,20 +696,13 @@ vnode_pager_terminate(
  */
 kern_return_t
 vnode_pager_synchronize(
-       memory_object_t         mem_obj,
-       memory_object_offset_t  offset,
-       memory_object_size_t            length,
+       __unused memory_object_t        mem_obj,
+       __unused memory_object_offset_t offset,
+       __unused memory_object_size_t   length,
        __unused vm_sync_t              sync_flags)
 {
-       register vnode_pager_t  vnode_object;
-
-       PAGER_DEBUG(PAGER_ALL, ("vnode_pager_synchronize: %p\n", mem_obj));
-
-       vnode_object = vnode_pager_lookup(mem_obj);
-
-       memory_object_synchronize_completed(vnode_object->control_handle, offset, length);
-
-       return (KERN_SUCCESS);
+       panic("vnode_pager_synchronize: memory_object_synchronize no longer supported\n");
+       return (KERN_FAILURE);
 }
 
 /*
@@ -973,7 +736,7 @@ kern_return_t
 vnode_pager_last_unmap(
        memory_object_t         mem_obj)
 {
-       register vnode_pager_t  vnode_object;
+       vnode_pager_t   vnode_object;
 
        PAGER_DEBUG(PAGER_ALL, ("vnode_pager_last_unmap: %p\n", mem_obj));
 
@@ -1008,7 +771,7 @@ vnode_pager_cluster_write(
                        upl_flags |= UPL_KEEPCACHED;
 
                while (cnt) {
-                       size = (cnt < (PAGE_SIZE * MAX_UPL_TRANSFER)) ? cnt : (PAGE_SIZE * MAX_UPL_TRANSFER); /* effective max */
+                       size = (cnt < MAX_UPL_TRANSFER_BYTES) ? cnt : MAX_UPL_TRANSFER_BYTES; /* effective max */
 
                        assert((upl_size_t) size == size);
                        vnode_pageout(vnode_object->vnode_handle, 
@@ -1040,7 +803,7 @@ vnode_pager_cluster_write(
                         * and then clip the size to insure we
                         * don't request past the end of the underlying file
                         */
-                       size = PAGE_SIZE * MAX_UPL_TRANSFER;
+                       size = MAX_UPL_TRANSFER_BYTES;
                        base_offset = offset & ~((signed)(size - 1));
 
                        if ((base_offset + size) > vnode_size)
@@ -1058,7 +821,8 @@ vnode_pager_cluster_write(
                }
                assert((upl_size_t) size == size);
                vnode_pageout(vnode_object->vnode_handle,
-                             NULL, (upl_offset_t)(offset - base_offset), base_offset, (upl_size_t) size, UPL_VNODE_PAGER, NULL);
+                             NULL, (upl_offset_t)(offset - base_offset), base_offset, (upl_size_t) size,
+                             (upl_flags & UPL_IOSYNC) | UPL_VNODE_PAGER, NULL);
        }
 }
 
@@ -1108,9 +872,9 @@ vnode_pager_cluster_read(
                            UPL_SET_INTERNAL);
                count = 0;
                assert((upl_size_t) cnt == cnt);
-               kr = memory_object_upl_request(vnode_object->control_handle,
+               kr = memory_object_upl_request(vnode_object->vn_pgr_hdr.mo_control,
                                               base_offset, (upl_size_t) cnt,
-                                              &upl, NULL, &count, uplflags);
+                                              &upl, NULL, &count, uplflags, VM_KERN_MEMORY_NONE);
                if (kr == KERN_SUCCESS) {
                        upl_abort(upl, 0);
                        upl_deallocate(upl);
@@ -1131,18 +895,6 @@ vnode_pager_cluster_read(
 
 }
 
-
-/*
- *
- */
-void
-vnode_pager_release_from_cache(
-               int     *cnt)
-{
-       memory_object_free_from_cache(
-                       &realhost, &vnode_pager_ops, cnt);
-}
-
 /*
  *
  */
@@ -1150,7 +902,7 @@ vnode_pager_t
 vnode_object_create(
         struct vnode *vp)
 {
-       register vnode_pager_t  vnode_object;
+       vnode_pager_t  vnode_object;
 
        vnode_object = (struct vnode_pager *) zalloc(vnode_pager_zone);
        if (vnode_object == VNODE_PAGER_NULL)
@@ -1163,10 +915,11 @@ vnode_object_create(
         * we reserve the first word in the object for a fake ip_kotype
         * setting - that will tell vm_map to use it as a memory object.
         */
-       vnode_object->pager_ops = &vnode_pager_ops;
-       vnode_object->pager_ikot = IKOT_MEMORY_OBJECT;
+       vnode_object->vn_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
+       vnode_object->vn_pgr_hdr.mo_pager_ops = &vnode_pager_ops;
+       vnode_object->vn_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
+
        vnode_object->ref_count = 1;
-       vnode_object->control_handle = MEMORY_OBJECT_CONTROL_NULL;
        vnode_object->vnode_handle = vp;
 
        return(vnode_object);
@@ -1182,11 +935,23 @@ vnode_pager_lookup(
        vnode_pager_t   vnode_object;
 
        vnode_object = (vnode_pager_t)name;
-       assert(vnode_object->pager_ops == &vnode_pager_ops);
+       assert(vnode_object->vn_pgr_hdr.mo_pager_ops == &vnode_pager_ops);
        return (vnode_object);
 }
 
 
+struct vnode *
+vnode_pager_lookup_vnode(
+       memory_object_t  name)
+{
+       vnode_pager_t   vnode_object;
+       vnode_object = (vnode_pager_t)name;
+       if(vnode_object->vn_pgr_hdr.mo_pager_ops == &vnode_pager_ops)
+               return (vnode_object->vnode_handle);
+       else
+               return NULL;
+}
+
 /*********************** proc_info implementation *************/
 
 #include <sys/bsdtask_info.h>
@@ -1205,6 +970,7 @@ fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal *
        vm_map_offset_t         start;
        vm_region_extended_info_data_t extended;
        vm_region_top_info_data_t top;
+       boolean_t do_region_footprint;
 
            task_lock(task);
            map = task->map;
@@ -1215,15 +981,75 @@ fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal *
            }
            vm_map_reference(map); 
            task_unlock(task);
-           
+
+           do_region_footprint = task_self_region_footprint();
+
            vm_map_lock_read(map);
 
            start = address;
+
            if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
                if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
+                       if (do_region_footprint &&
+                           address == tmp_entry->vme_end) {
+                               ledger_amount_t nonvol, nonvol_compressed;
+
+                               /*
+                                * This request is right after the last valid
+                                * memory region;  instead of reporting the
+                                * end of the address space, report a fake
+                                * memory region to account for non-volatile
+                                * purgeable memory owned by this task.
+                                */
+
+                               ledger_get_balance(
+                                       task->ledger,
+                                       task_ledgers.purgeable_nonvolatile,
+                                       &nonvol);
+                               ledger_get_balance(
+                                       task->ledger,
+                                       task_ledgers.purgeable_nonvolatile_compressed,
+                                       &nonvol_compressed);
+                               if (nonvol + nonvol_compressed == 0) {
+                                       /* nothing to report */
+                                       vm_map_unlock_read(map);
+                                       vm_map_deallocate(map);
+                                       return 0;
+                               }
+                               /* provide fake region for purgeable */
+                               pinfo->pri_offset = address;
+                               pinfo->pri_protection = VM_PROT_DEFAULT;
+                               pinfo->pri_max_protection = VM_PROT_DEFAULT;
+                               pinfo->pri_inheritance = VM_INHERIT_NONE;
+                               pinfo->pri_behavior = VM_BEHAVIOR_DEFAULT;
+                               pinfo->pri_user_wired_count = 0;
+                               pinfo->pri_user_tag = -1;
+                               pinfo->pri_pages_resident =
+                                       (uint32_t) (nonvol / PAGE_SIZE);
+                               pinfo->pri_pages_shared_now_private = 0;
+                               pinfo->pri_pages_swapped_out =
+                                       (uint32_t) (nonvol_compressed / PAGE_SIZE);
+                               pinfo->pri_pages_dirtied =
+                                       (uint32_t) (nonvol / PAGE_SIZE);
+                               pinfo->pri_ref_count = 1;
+                               pinfo->pri_shadow_depth = 0;
+                               pinfo->pri_share_mode = SM_PRIVATE;
+                               pinfo->pri_private_pages_resident =
+                                       (uint32_t) (nonvol / PAGE_SIZE);
+                               pinfo->pri_shared_pages_resident = 0;
+                               pinfo->pri_obj_id = INFO_MAKE_FAKE_OBJECT_ID(map, task_ledgers.purgeable_nonvolatile);
+                               pinfo->pri_address = address;
+                               pinfo->pri_size =
+                                       (uint64_t) (nonvol + nonvol_compressed);
+                               pinfo->pri_depth = 0;
+
+                               vm_map_unlock_read(map);
+                               vm_map_deallocate(map);
+                               return 1;
+                       }
                        vm_map_unlock_read(map);
-                       vm_map_deallocate(map); 
-                       return(0);
+                       vm_map_deallocate(map);
+                       return 0;
                }
            } else {
                entry = tmp_entry;
@@ -1231,13 +1057,13 @@ fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal *
 
            start = entry->vme_start;
 
-           pinfo->pri_offset = entry->offset;
+           pinfo->pri_offset = VME_OFFSET(entry);
            pinfo->pri_protection = entry->protection;
            pinfo->pri_max_protection = entry->max_protection;
            pinfo->pri_inheritance = entry->inheritance;
            pinfo->pri_behavior = entry->behavior;
            pinfo->pri_user_wired_count = entry->user_wired_count;
-           pinfo->pri_user_tag = entry->alias;
+           pinfo->pri_user_tag = VME_ALIAS(entry);
 
            if (entry->is_sub_map) {
                pinfo->pri_flags |= PROC_REGION_SUBMAP;
@@ -1248,7 +1074,7 @@ fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal *
 
 
            extended.protection = entry->protection;
-           extended.user_tag = entry->alias;
+           extended.user_tag = VME_ALIAS(entry);
            extended.pages_resident = 0;
            extended.pages_swapped_out = 0;
            extended.pages_shared_now_private = 0;
@@ -1256,7 +1082,7 @@ fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal *
            extended.external_pager = 0;
            extended.shadow_depth = 0;
 
-           vm_map_region_walk(map, start, entry, entry->offset, entry->vme_end - start, &extended);
+           vm_map_region_walk(map, start, entry, VME_OFFSET(entry), entry->vme_end - start, &extended, TRUE, VM_REGION_EXTENDED_INFO_COUNT);
 
            if (extended.external_pager && extended.ref_count == 2 && extended.share_mode == SM_SHARED)
                    extended.share_mode = SM_PRIVATE;
@@ -1297,6 +1123,86 @@ fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal *
            return(1);
 }
 
+int
+fill_procregioninfo_onlymappedvnodes(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t  *vid)
+{
+
+       vm_map_t map;
+       vm_map_offset_t address = (vm_map_offset_t )arg;
+       vm_map_entry_t          tmp_entry;
+       vm_map_entry_t          entry;
+
+       task_lock(task);
+       map = task->map;
+       if (map == VM_MAP_NULL) 
+       {
+               task_unlock(task);
+               return(0);
+       }
+       vm_map_reference(map); 
+       task_unlock(task);
+       
+       vm_map_lock_read(map);
+
+       if (!vm_map_lookup_entry(map, address, &tmp_entry)) {
+               if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
+                       vm_map_unlock_read(map);
+                       vm_map_deallocate(map); 
+                       return(0);
+               }
+       } else {
+               entry = tmp_entry;
+       }
+
+       while (entry != vm_map_to_entry(map)) {
+               *vnodeaddr = 0;
+               *vid = 0;
+
+               if (entry->is_sub_map == 0) {
+                       if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid)) {
+
+                               pinfo->pri_offset = VME_OFFSET(entry);
+                               pinfo->pri_protection = entry->protection;
+                               pinfo->pri_max_protection = entry->max_protection;
+                               pinfo->pri_inheritance = entry->inheritance;
+                               pinfo->pri_behavior = entry->behavior;
+                               pinfo->pri_user_wired_count = entry->user_wired_count;
+                               pinfo->pri_user_tag = VME_ALIAS(entry);
+                               
+                               if (entry->is_shared)
+                                       pinfo->pri_flags |= PROC_REGION_SHARED;
+                               
+                               pinfo->pri_pages_resident = 0;
+                               pinfo->pri_pages_shared_now_private = 0;
+                               pinfo->pri_pages_swapped_out = 0;
+                               pinfo->pri_pages_dirtied = 0;
+                               pinfo->pri_ref_count = 0;
+                               pinfo->pri_shadow_depth = 0;
+                               pinfo->pri_share_mode = 0;
+                               
+                               pinfo->pri_private_pages_resident = 0;
+                               pinfo->pri_shared_pages_resident = 0;
+                               pinfo->pri_obj_id = 0;
+                               
+                               pinfo->pri_address = (uint64_t)entry->vme_start;
+                               pinfo->pri_size = (uint64_t)(entry->vme_end - entry->vme_start);
+                               pinfo->pri_depth = 0;
+       
+                               vm_map_unlock_read(map);
+                               vm_map_deallocate(map); 
+                               return(1);
+                       }
+               }
+
+               /* Keep searching for a vnode-backed mapping */
+               entry = entry->vme_next;
+       }
+
+       vm_map_unlock_read(map);
+       vm_map_deallocate(map); 
+       return(0);
+}
+
 static int
 fill_vnodeinfoforaddr(
        vm_map_entry_t                  entry,
@@ -1317,7 +1223,7 @@ fill_vnodeinfoforaddr(
                 * The last object in the shadow chain has the
                 * relevant pager information.
                 */
-               top_object = entry->object.vm_object;
+               top_object = VME_OBJECT(entry);
                if (top_object == VM_OBJECT_NULL) {
                        object = VM_OBJECT_NULL;
                        shadow_depth = 0;
@@ -1381,6 +1287,23 @@ vnode_pager_get_object_vnode (
        return(KERN_FAILURE);
 }
 
+#if CONFIG_IOSCHED
+kern_return_t
+vnode_pager_get_object_devvp(
+       memory_object_t         mem_obj,
+       uintptr_t               *devvp)
+{
+       struct vnode    *vp;
+       uint32_t        vid;
+
+       if(vnode_pager_get_object_vnode(mem_obj, (uintptr_t *)&vp, (uint32_t *)&vid) != KERN_SUCCESS)
+               return (KERN_FAILURE);
+       *devvp = (uintptr_t)vnode_mountdevvp(vp);
+       if (*devvp)
+               return (KERN_SUCCESS);  
+       return (KERN_FAILURE);
+}
+#endif
 
 /*
  * Find the underlying vnode object for the given vm_map_entry.  If found, return with the
@@ -1403,7 +1326,7 @@ find_vnode_object(
                 * relevant pager information.
                 */
 
-               top_object = entry->object.vm_object;
+               top_object = VME_OBJECT(entry);
 
                if (top_object) {
                        vm_object_lock(top_object);