]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_object.c
xnu-2422.115.4.tar.gz
[apple/xnu.git] / osfmk / vm / vm_object.c
index 7f48b127e1cf6cce597b596fc6e9c1deecf70c05..a16857ec00750e91161bdcf31d56b25669ad1ed4 100644 (file)
@@ -72,6 +72,8 @@
 #include <mach/memory_object_control_server.h>
 #include <mach/vm_param.h>
 
+#include <mach/sdt.h>
+
 #include <ipc/ipc_types.h>
 #include <ipc/ipc_port.h>
 
@@ -80,6 +82,7 @@
 #include <kern/lock.h>
 #include <kern/queue.h>
 #include <kern/xpr.h>
+#include <kern/kalloc.h>
 #include <kern/zalloc.h>
 #include <kern/host.h>
 #include <kern/host_statistics.h>
@@ -87,6 +90,7 @@
 #include <kern/misc_protos.h>
 
 #include <vm/memory_object.h>
+#include <vm/vm_compressor_pager.h>
 #include <vm/vm_fault.h>
 #include <vm/vm_map.h>
 #include <vm/vm_object.h>
@@ -95,9 +99,7 @@
 #include <vm/vm_protos.h>
 #include <vm/vm_purgeable_internal.h>
 
-#if CONFIG_EMBEDDED
-#include <sys/kern_memorystatus.h>
-#endif
+#include <vm/vm_compressor.h>
 
 /*
  *     Virtual memory objects maintain the actual data
@@ -206,6 +208,8 @@ static zone_t               vm_object_zone;         /* vm backing store zone */
 static struct vm_object                        kernel_object_store;
 vm_object_t                                            kernel_object;
 
+static struct vm_object                        compressor_object_store;
+vm_object_t                            compressor_object = &compressor_object_store;
 
 /*
  *     The submap object is used as a placeholder for vm_map_submap
@@ -261,28 +265,40 @@ unsigned int vm_page_purged_others = 0;
 static vm_object_t     vm_object_cache_trim(
                                boolean_t called_from_vm_object_deallocate);
 
-static queue_head_t    vm_object_cached_list;
-static int             vm_object_cached_count=0;
+static void            vm_object_deactivate_all_pages(
+                               vm_object_t     object);
+
 static int             vm_object_cached_high;  /* highest # cached objects */
 static int             vm_object_cached_max = 512;     /* may be patched*/
 
-static lck_mtx_t       vm_object_cached_lock_data;
-static lck_mtx_ext_t   vm_object_cached_lock_data_ext;
-
 #define vm_object_cache_lock()         \
                lck_mtx_lock(&vm_object_cached_lock_data)
 #define vm_object_cache_lock_try()             \
                lck_mtx_try_lock(&vm_object_cached_lock_data)
+
+#endif /* VM_OBJECT_CACHE */
+
+static queue_head_t    vm_object_cached_list;
+static uint32_t                vm_object_cache_pages_freed = 0;
+static uint32_t                vm_object_cache_pages_moved = 0;
+static uint32_t                vm_object_cache_pages_skipped = 0;
+static uint32_t                vm_object_cache_adds = 0;
+static uint32_t                vm_object_cached_count = 0;
+static lck_mtx_t       vm_object_cached_lock_data;
+static lck_mtx_ext_t   vm_object_cached_lock_data_ext;
+
+static uint32_t                vm_object_page_grab_failed = 0;
+static uint32_t                vm_object_page_grab_skipped = 0;
+static uint32_t                vm_object_page_grab_returned = 0;
+static uint32_t                vm_object_page_grab_pmapped = 0;
+static uint32_t                vm_object_page_grab_reactivations = 0;
+
 #define vm_object_cache_lock_spin()            \
                lck_mtx_lock_spin(&vm_object_cached_lock_data)
 #define vm_object_cache_unlock()       \
                lck_mtx_unlock(&vm_object_cached_lock_data)
 
-#endif /* VM_OBJECT_CACHE */
-
-
-static void            vm_object_deactivate_all_pages(
-                               vm_object_t     object);
+static void    vm_object_cache_remove_locked(vm_object_t);
 
 
 #define        VM_OBJECT_HASH_COUNT            1024
@@ -333,6 +349,10 @@ unsigned int vm_object_reap_count_async = 0;
 #define vm_object_reaper_unlock()      \
                lck_mtx_unlock(&vm_object_reaper_lock_data)
 
+#if 0
+#undef KERNEL_DEBUG
+#define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
+#endif
 
 
 static lck_mtx_t *
@@ -446,7 +466,7 @@ _vm_object_allocate(
        queue_init(&object->uplq);
 #endif /* UPL_DEBUG */
        vm_object_lock_init(object);
-       object->size = size;
+       object->vo_size = size;
 }
 
 __private_extern__ vm_object_t
@@ -467,9 +487,11 @@ vm_object_allocate(
 
 
 lck_grp_t              vm_object_lck_grp;
-lck_grp_attr_t vm_object_lck_grp_attr;
+lck_grp_t              vm_object_cache_lck_grp;
+lck_grp_attr_t         vm_object_lck_grp_attr;
 lck_attr_t             vm_object_lck_attr;
 lck_attr_t             kernel_object_lck_attr;
+lck_attr_t             compressor_object_lck_attr;
 
 /*
  *     vm_object_bootstrap:
@@ -485,17 +507,18 @@ vm_object_bootstrap(void)
                                round_page(512*1024),
                                round_page(12*1024),
                                "vm objects");
+       zone_change(vm_object_zone, Z_CALLERACCT, FALSE); /* don't charge caller */
+       zone_change(vm_object_zone, Z_NOENCRYPT, TRUE);
 
        vm_object_init_lck_grp();
 
-#if VM_OBJECT_CACHE
        queue_init(&vm_object_cached_list);
 
        lck_mtx_init_ext(&vm_object_cached_lock_data,
                &vm_object_cached_lock_data_ext,
-               &vm_object_lck_grp,
+               &vm_object_cache_lck_grp,
                &vm_object_lck_attr);
-#endif
+
        queue_init(&vm_object_reaper_queue);
 
        for (i = 0; i < VM_OBJECT_HASH_LOCK_COUNT; i++) {
@@ -514,6 +537,8 @@ vm_object_bootstrap(void)
                              round_page(512*1024),
                              round_page(12*1024),
                              "vm object hash entries");
+       zone_change(vm_object_hash_zone, Z_CALLERACCT, FALSE);
+       zone_change(vm_object_hash_zone, Z_NOENCRYPT, TRUE);
 
        for (i = 0; i < VM_OBJECT_HASH_COUNT; i++)
                queue_init(&vm_object_hashtable[i]);
@@ -536,7 +561,7 @@ vm_object_bootstrap(void)
         */
        vm_object_lock_init(&vm_object_template);
 #endif
-       vm_object_template.size = 0;
+       vm_object_template.vo_size = 0;
        vm_object_template.memq_hint = VM_PAGE_NULL;
        vm_object_template.ref_count = 1;
 #if    TASK_SWAPPER
@@ -547,7 +572,7 @@ vm_object_bootstrap(void)
        vm_object_template.reusable_page_count = 0;
        vm_object_template.copy = VM_OBJECT_NULL;
        vm_object_template.shadow = VM_OBJECT_NULL;
-       vm_object_template.shadow_offset = (vm_object_offset_t) 0;
+       vm_object_template.vo_shadow_offset = (vm_object_offset_t) 0;
        vm_object_template.pager = MEMORY_OBJECT_NULL;
        vm_object_template.paging_offset = 0;
        vm_object_template.pager_control = MEMORY_OBJECT_CONTROL_NULL;
@@ -568,8 +593,8 @@ vm_object_bootstrap(void)
        vm_object_template.pageout = FALSE;
        vm_object_template.alive = TRUE;
        vm_object_template.purgable = VM_PURGABLE_DENY;
+       vm_object_template.purgeable_when_ripe = FALSE;
        vm_object_template.shadowed = FALSE;
-       vm_object_template.silent_overwrite = FALSE;
        vm_object_template.advisory_pageout = FALSE;
        vm_object_template.true_share = FALSE;
        vm_object_template.terminating = FALSE;
@@ -588,6 +613,7 @@ vm_object_bootstrap(void)
        vm_object_template.sequential = (vm_object_offset_t) 0;
        vm_object_template.pages_created = 0;
        vm_object_template.pages_used = 0;
+       vm_object_template.scan_collisions = 0;
 
 #if    MACH_PAGEMAP
        vm_object_template.existence_map = VM_EXTERNAL_NULL;
@@ -598,7 +624,9 @@ vm_object_bootstrap(void)
 #endif /* MACH_ASSERT */
 
        /* cache bitfields */
-       vm_object_template.wimg_bits = VM_WIMG_DEFAULT;
+       vm_object_template.wimg_bits = VM_WIMG_USE_DEFAULT;
+       vm_object_template.set_cache_attr = FALSE;
+       vm_object_template.object_slid = FALSE;
        vm_object_template.code_signed = FALSE;
        vm_object_template.hashed = FALSE;
        vm_object_template.transposed = FALSE;
@@ -620,6 +648,10 @@ vm_object_bootstrap(void)
        vm_object_template.objq.next=NULL;
        vm_object_template.objq.prev=NULL;
 
+       vm_object_template.purgeable_queue_type = PURGEABLE_Q_TYPE_MAX;
+       vm_object_template.purgeable_queue_group = 0;
+
+       vm_object_template.vo_cache_ts = 0;
        
        /*
         *      Initialize the "kernel object"
@@ -638,8 +670,12 @@ vm_object_bootstrap(void)
 #else
        _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1,
                            kernel_object);
+
+       _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1,
+                           compressor_object);
 #endif
        kernel_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
+       compressor_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
 
        /*
         *      Initialize the "submap object".  Make it as large as the
@@ -702,9 +738,12 @@ vm_object_init_lck_grp(void)
         */
        lck_grp_attr_setdefault(&vm_object_lck_grp_attr);
        lck_grp_init(&vm_object_lck_grp, "vm_object", &vm_object_lck_grp_attr);
+       lck_grp_init(&vm_object_cache_lck_grp, "vm_object_cache", &vm_object_lck_grp_attr);
        lck_attr_setdefault(&vm_object_lck_attr);
        lck_attr_setdefault(&kernel_object_lck_attr);
        lck_attr_cleardebug(&kernel_object_lck_attr);
+       lck_attr_setdefault(&compressor_object_lck_attr);
+       lck_attr_cleardebug(&compressor_object_lck_attr);
 }
 
 #if VM_OBJECT_CACHE
@@ -744,13 +783,16 @@ vm_object_deallocate(
        if (object == VM_OBJECT_NULL)
                return;
 
-       if (object == kernel_object) {
+       if (object == kernel_object || object == compressor_object) {
                vm_object_lock_shared(object);
 
                OSAddAtomic(-1, &object->ref_count);
 
                if (object->ref_count == 0) {
-                       panic("vm_object_deallocate: losing kernel_object\n");
+                       if (object == kernel_object)
+                               panic("vm_object_deallocate: losing kernel_object\n");
+                       else
+                               panic("vm_object_deallocate: losing compressor_object\n");
                }
                vm_object_unlock(object);
                return;
@@ -836,17 +878,6 @@ vm_object_deallocate(
                                vm_object_lock(object);
                                vm_object_mapping_end(object);
                        }
-                       /*
-                        * recheck the ref_count since we dropped the object lock
-                        * to call 'memory_object_last_unmap'... it's possible
-                        * additional references got taken and we only want
-                        * to deactivate the pages if this 'named' object will only
-                        * referenced by the backing pager once we drop our reference
-                        * below
-                        */
-                       if (!object->terminating && object->ref_count == 2)
-                               vm_object_deactivate_all_pages(object);
-
                        assert(object->ref_count > 0);
                }
 
@@ -1071,6 +1102,368 @@ vm_object_deallocate(
 }
 
 
+
+vm_page_t
+vm_object_page_grab(
+       vm_object_t     object)
+{
+       vm_page_t       p, next_p;
+       int             p_limit = 0;
+       int             p_skipped = 0;
+
+       vm_object_lock_assert_exclusive(object);
+
+       next_p = (vm_page_t)queue_first(&object->memq);
+       p_limit = MIN(50, object->resident_page_count);
+
+       while (!queue_end(&object->memq, (queue_entry_t)next_p) && --p_limit > 0) {
+
+               p = next_p;
+               next_p = (vm_page_t)queue_next(&next_p->listq);
+
+               if (VM_PAGE_WIRED(p) || p->busy || p->cleaning || p->laundry || p->fictitious)
+                       goto move_page_in_obj;
+
+               if (p->pmapped || p->dirty || p->precious) {
+                       vm_page_lockspin_queues();
+
+                       if (p->pmapped) {
+                               int refmod_state;
+
+                               vm_object_page_grab_pmapped++;
+
+                               if (p->reference == FALSE || p->dirty == FALSE) {
+
+                                       refmod_state = pmap_get_refmod(p->phys_page);
+
+                                       if (refmod_state & VM_MEM_REFERENCED)
+                                               p->reference = TRUE;
+                                       if (refmod_state & VM_MEM_MODIFIED) {
+                                               SET_PAGE_DIRTY(p, FALSE);
+                                       }
+                               }
+                               if (p->dirty == FALSE && p->precious == FALSE) {
+
+                                       refmod_state = pmap_disconnect(p->phys_page);
+
+                                       if (refmod_state & VM_MEM_REFERENCED)
+                                               p->reference = TRUE;
+                                       if (refmod_state & VM_MEM_MODIFIED) {
+                                               SET_PAGE_DIRTY(p, FALSE);
+                                       }
+
+                                       if (p->dirty == FALSE)
+                                               goto take_page;
+                               }
+                       }
+                       if (p->inactive && p->reference == TRUE) {
+                               vm_page_activate(p);
+
+                               VM_STAT_INCR(reactivations);
+                               vm_object_page_grab_reactivations++;
+                       }
+                       vm_page_unlock_queues();
+move_page_in_obj:
+                       queue_remove(&object->memq, p, vm_page_t, listq);
+                       queue_enter(&object->memq, p, vm_page_t, listq);
+
+                       p_skipped++;
+                       continue;
+               }
+               vm_page_lockspin_queues();
+take_page:
+               vm_page_free_prepare_queues(p);
+               vm_object_page_grab_returned++;
+               vm_object_page_grab_skipped += p_skipped;
+
+               vm_page_unlock_queues();
+
+               vm_page_free_prepare_object(p, TRUE);
+               
+               return (p);
+       }
+       vm_object_page_grab_skipped += p_skipped;
+       vm_object_page_grab_failed++;
+
+       return (NULL);
+}
+
+
+
+#define EVICT_PREPARE_LIMIT    64
+#define EVICT_AGE              10
+
+static clock_sec_t     vm_object_cache_aging_ts = 0;
+
+static void
+vm_object_cache_remove_locked(
+       vm_object_t     object)
+{
+       queue_remove(&vm_object_cached_list, object, vm_object_t, objq);
+       object->objq.next = NULL;
+       object->objq.prev = NULL;
+
+       vm_object_cached_count--;
+}
+
+void
+vm_object_cache_remove(
+       vm_object_t     object)
+{
+       vm_object_cache_lock_spin();
+
+       if (object->objq.next || object->objq.prev)
+               vm_object_cache_remove_locked(object);
+
+       vm_object_cache_unlock();
+}
+
+void
+vm_object_cache_add(
+       vm_object_t     object)
+{
+       clock_sec_t sec;
+       clock_nsec_t nsec;
+
+       if (object->resident_page_count == 0)
+               return;
+       clock_get_system_nanotime(&sec, &nsec);
+
+       vm_object_cache_lock_spin();
+
+       if (object->objq.next == NULL && object->objq.prev == NULL) {
+               queue_enter(&vm_object_cached_list, object, vm_object_t, objq);
+               object->vo_cache_ts = sec + EVICT_AGE;
+               object->vo_cache_pages_to_scan = object->resident_page_count;
+
+               vm_object_cached_count++;
+               vm_object_cache_adds++;
+       }
+       vm_object_cache_unlock();
+}
+
+int
+vm_object_cache_evict(
+       int     num_to_evict,
+       int     max_objects_to_examine)
+{
+       vm_object_t     object = VM_OBJECT_NULL;
+       vm_object_t     next_obj = VM_OBJECT_NULL;
+       vm_page_t       local_free_q = VM_PAGE_NULL;
+       vm_page_t       p;
+       vm_page_t       next_p;
+       int             object_cnt = 0;
+       vm_page_t       ep_array[EVICT_PREPARE_LIMIT];
+       int             ep_count;
+       int             ep_limit;
+       int             ep_index;
+       int             ep_freed = 0;
+       int             ep_moved = 0;
+       uint32_t        ep_skipped = 0;
+       clock_sec_t     sec;
+       clock_nsec_t    nsec;
+
+       KERNEL_DEBUG(0x13001ec | DBG_FUNC_START, 0, 0, 0, 0, 0);
+       /*
+        * do a couple of quick checks to see if it's 
+        * worthwhile grabbing the lock
+        */
+       if (queue_empty(&vm_object_cached_list)) {
+               KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0);
+               return (0);
+       }
+       clock_get_system_nanotime(&sec, &nsec);
+
+       /*
+        * the object on the head of the queue has not
+        * yet sufficiently aged
+        */
+       if (sec < vm_object_cache_aging_ts) {
+               KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0);
+               return (0);
+       }
+       /*
+        * don't need the queue lock to find 
+        * and lock an object on the cached list
+        */
+       vm_page_unlock_queues();
+
+       vm_object_cache_lock_spin();
+
+       for (;;) {
+               next_obj = (vm_object_t)queue_first(&vm_object_cached_list);
+
+               while (!queue_end(&vm_object_cached_list, (queue_entry_t)next_obj) && object_cnt++ < max_objects_to_examine) {
+
+                       object = next_obj;
+                       next_obj = (vm_object_t)queue_next(&next_obj->objq);
+                       
+                       if (sec < object->vo_cache_ts) {
+                               KERNEL_DEBUG(0x130020c, object, object->resident_page_count, object->vo_cache_ts, sec, 0);
+
+                               vm_object_cache_aging_ts = object->vo_cache_ts;
+                               object = VM_OBJECT_NULL;
+                               break;
+                       }
+                       if (!vm_object_lock_try_scan(object)) {
+                               /*
+                                * just skip over this guy for now... if we find
+                                * an object to steal pages from, we'll revist in a bit...
+                                * hopefully, the lock will have cleared
+                                */
+                               KERNEL_DEBUG(0x13001f8, object, object->resident_page_count, 0, 0, 0);
+
+                               object = VM_OBJECT_NULL;
+                               continue;
+                       }
+                       if (queue_empty(&object->memq) || object->vo_cache_pages_to_scan == 0) {
+                               /*
+                                * this case really shouldn't happen, but it's not fatal
+                                * so deal with it... if we don't remove the object from
+                                * the list, we'll never move past it.
+                                */
+                               KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0);
+                               
+                               vm_object_cache_remove_locked(object);
+                               vm_object_unlock(object);
+                               object = VM_OBJECT_NULL;
+                               continue;
+                       }
+                       /*
+                        * we have a locked object with pages...
+                        * time to start harvesting
+                        */
+                       break;
+               }
+               vm_object_cache_unlock();
+
+               if (object == VM_OBJECT_NULL)
+                       break;
+
+               /*
+                * object is locked at this point and
+                * has resident pages
+                */
+               next_p = (vm_page_t)queue_first(&object->memq);
+
+               /*
+                * break the page scan into 2 pieces to minimize the time spent
+                * behind the page queue lock...
+                * the list of pages on these unused objects is likely to be cold
+                * w/r to the cpu cache which increases the time to scan the list
+                * tenfold...  and we may have a 'run' of pages we can't utilize that
+                * needs to be skipped over...
+                */
+               if ((ep_limit = num_to_evict - (ep_freed + ep_moved)) > EVICT_PREPARE_LIMIT)
+                       ep_limit = EVICT_PREPARE_LIMIT;
+               ep_count = 0;
+
+               while (!queue_end(&object->memq, (queue_entry_t)next_p) && object->vo_cache_pages_to_scan && ep_count < ep_limit) {
+
+                       p = next_p;
+                       next_p = (vm_page_t)queue_next(&next_p->listq);
+
+                       object->vo_cache_pages_to_scan--;
+
+                       if (VM_PAGE_WIRED(p) || p->busy || p->cleaning || p->laundry) {
+                               queue_remove(&object->memq, p, vm_page_t, listq);
+                               queue_enter(&object->memq, p, vm_page_t, listq);
+
+                               ep_skipped++;
+                               continue;
+                       }
+                       if (p->wpmapped || p->dirty || p->precious) {
+                               queue_remove(&object->memq, p, vm_page_t, listq);
+                               queue_enter(&object->memq, p, vm_page_t, listq);
+
+                               pmap_clear_reference(p->phys_page);
+                       }
+                       ep_array[ep_count++] = p;
+               }
+               KERNEL_DEBUG(0x13001f4 | DBG_FUNC_START, object, object->resident_page_count, ep_freed, ep_moved, 0);
+
+               vm_page_lockspin_queues();
+
+               for (ep_index = 0; ep_index < ep_count; ep_index++) {
+
+                       p = ep_array[ep_index];
+
+                       if (p->wpmapped || p->dirty || p->precious) {
+                               p->reference = FALSE;
+                               p->no_cache = FALSE;
+
+                               /*
+                                * we've already filtered out pages that are in the laundry
+                                * so if we get here, this page can't be on the pageout queue
+                                */
+                               assert(!p->pageout_queue);
+
+                               VM_PAGE_QUEUES_REMOVE(p);
+                               VM_PAGE_ENQUEUE_INACTIVE(p, TRUE);
+
+                               ep_moved++;
+                       } else {
+                               vm_page_free_prepare_queues(p);
+
+                               assert(p->pageq.next == NULL && p->pageq.prev == NULL);
+                               /*
+                                * Add this page to our list of reclaimed pages,
+                                * to be freed later.
+                                */
+                               p->pageq.next = (queue_entry_t) local_free_q;
+                               local_free_q = p;
+
+                               ep_freed++;
+                       }
+               }
+               vm_page_unlock_queues();
+
+               KERNEL_DEBUG(0x13001f4 | DBG_FUNC_END, object, object->resident_page_count, ep_freed, ep_moved, 0);
+
+               if (local_free_q) {
+                       vm_page_free_list(local_free_q, TRUE);
+                       local_free_q = VM_PAGE_NULL;
+               }
+               if (object->vo_cache_pages_to_scan == 0) {
+                       KERNEL_DEBUG(0x1300208, object, object->resident_page_count, ep_freed, ep_moved, 0);
+
+                       vm_object_cache_remove(object);
+
+                       KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0);
+               }
+               /*
+                * done with this object
+                */
+               vm_object_unlock(object);
+               object = VM_OBJECT_NULL;
+
+               /*
+                * at this point, we are not holding any locks
+                */
+               if ((ep_freed + ep_moved) >= num_to_evict) {
+                       /*
+                        * we've reached our target for the
+                        * number of pages to evict
+                        */
+                       break;
+               }
+               vm_object_cache_lock_spin();
+       }
+       /*
+        * put the page queues lock back to the caller's
+        * idea of it 
+        */
+       vm_page_lock_queues();
+
+       vm_object_cache_pages_freed += ep_freed;
+       vm_object_cache_pages_moved += ep_moved;
+       vm_object_cache_pages_skipped += ep_skipped;
+
+       KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, ep_freed, 0, 0, 0, 0);
+       return (ep_freed);
+}
+
+
 #if VM_OBJECT_CACHE
 /*
  *     Check to see whether we really need to trim
@@ -1231,6 +1624,9 @@ vm_object_terminate(
        object->terminating = TRUE;
        object->alive = FALSE;
 
+       if ( !object->internal && (object->objq.next || object->objq.prev))
+               vm_object_cache_remove(object);
+
        if (object->hashed) {
                lck_mtx_t       *lck;
 
@@ -1342,16 +1738,21 @@ vm_object_reap(
        /*
         * remove from purgeable queue if it's on
         */
-       if (object->objq.next || object->objq.prev) {
+       if (object->internal && (object->objq.next || object->objq.prev)) {
                purgeable_q_t queue = vm_purgeable_object_remove(object);
                assert(queue);
 
-               /* Must take page lock for this - using it to protect token queue */
-               vm_page_lock_queues();
-               vm_purgeable_token_delete_first(queue);
+               if (object->purgeable_when_ripe) {
+                       /*
+                        * Must take page lock for this -
+                        * using it to protect token queue
+                        */
+                       vm_page_lock_queues();
+                       vm_purgeable_token_delete_first(queue);
         
-               assert(queue->debug_count_objects>=0);
-               vm_page_unlock_queues();
+                       assert(queue->debug_count_objects>=0);
+                       vm_page_unlock_queues();
+               }
        }
     
        /*
@@ -1391,7 +1792,7 @@ vm_object_reap(
        vm_object_unlock(object);
 
 #if    MACH_PAGEMAP
-       vm_external_destroy(object->existence_map, object->size);
+       vm_external_destroy(object->existence_map, object->vo_size);
 #endif /* MACH_PAGEMAP */
 
        object->shadow = VM_OBJECT_NULL;
@@ -1405,9 +1806,12 @@ vm_object_reap(
 }
 
 
+unsigned int vm_max_batch = 256;
 
 #define V_O_R_MAX_BATCH 128
 
+#define BATCH_LIMIT(max)       (vm_max_batch >= max ? max : vm_max_batch)
+
 
 #define VM_OBJ_REAP_FREELIST(_local_free_q, do_disconnect)             \
        MACRO_BEGIN                                                     \
@@ -1438,6 +1842,7 @@ vm_object_reap_pages(
        vm_page_t       local_free_q = VM_PAGE_NULL;
        int             loop_count;
        boolean_t       disconnect_on_release;
+       pmap_flush_context      pmap_flush_context_storage;
 
        if (reap_type == REAP_DATA_FLUSH) {
                /*
@@ -1459,7 +1864,10 @@ vm_object_reap_pages(
 restart_after_sleep:
        if (queue_empty(&object->memq))
                return;
-       loop_count = V_O_R_MAX_BATCH + 1;
+       loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
+
+       if (reap_type == REAP_PURGEABLE)
+               pmap_flush_context_init(&pmap_flush_context_storage);
 
        vm_page_lockspin_queues();
 
@@ -1475,6 +1883,11 @@ restart_after_sleep:
                        vm_page_unlock_queues();
 
                        if (local_free_q) {
+
+                               if (reap_type == REAP_PURGEABLE) {
+                                       pmap_flush(&pmap_flush_context_storage);
+                                       pmap_flush_context_init(&pmap_flush_context_storage);
+                               }
                                /*
                                 * Free the pages we reclaimed so far
                                 * and take a little break to avoid
@@ -1485,40 +1898,13 @@ restart_after_sleep:
                        } else
                                mutex_pause(0);
 
-                       loop_count = V_O_R_MAX_BATCH + 1;
+                       loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
 
                        vm_page_lockspin_queues();
                }
                if (reap_type == REAP_DATA_FLUSH || reap_type == REAP_TERMINATE) {
 
-                       if (reap_type == REAP_DATA_FLUSH && (p->pageout == TRUE && p->list_req_pending == TRUE)) {
-                               p->list_req_pending = FALSE;
-                               p->cleaning = FALSE;
-                               p->pageout = FALSE;
-                               /*
-                                * need to drop the laundry count...
-                                * we may also need to remove it
-                                * from the I/O paging queue...
-                                * vm_pageout_throttle_up handles both cases
-                                *
-                                * the laundry and pageout_queue flags are cleared...
-                                */
-#if CONFIG_EMBEDDED
-                               if (p->laundry) 
-                                       vm_pageout_throttle_up(p);
-#else
-                               vm_pageout_throttle_up(p);
-#endif
-
-                               /*
-                                * toss the wire count we picked up
-                                * when we intially set this page up
-                                * to be cleaned...
-                                */
-                               vm_page_unwire(p);
-                               PAGE_WAKEUP(p);
-
-                       } else if (p->busy || p->cleaning) {
+                       if (p->busy || p->cleaning) {
 
                                vm_page_unlock_queues();
                                /*
@@ -1531,6 +1917,11 @@ restart_after_sleep:
 
                                goto restart_after_sleep;
                        }
+                       if (p->laundry) {
+                               p->pageout = FALSE;
+
+                               vm_pageout_steal_laundry(p, TRUE);
+                       }
                }
                switch (reap_type) {
 
@@ -1548,15 +1939,29 @@ restart_after_sleep:
                        
                case REAP_PURGEABLE:
                        if (VM_PAGE_WIRED(p)) {
-                               /* can't purge a wired page */
+                               /*
+                                * can't purge a wired page
+                                */
                                vm_page_purged_wired++;
                                continue;
                        }
+                       if (p->laundry && !p->busy && !p->cleaning) {
+                               p->pageout = FALSE;
 
+                               vm_pageout_steal_laundry(p, TRUE);
+                       }
+                       if (p->cleaning || p->laundry) {
+                               /*
+                                * page is being acted upon,
+                                * so don't mess with it
+                                */
+                               vm_page_purged_others++;
+                               continue;
+                       }
                        if (p->busy) {
                                /*
                                 * We can't reclaim a busy page but we can
-                                * make it pageable (it's not wired) to make
+                                * make it more likely to be paged (it's not wired) to make
                                 * sure that it gets considered by
                                 * vm_pageout_scan() later.
                                 */
@@ -1565,35 +1970,18 @@ restart_after_sleep:
                                continue;
                        }
 
-                       if (p->cleaning || p->laundry || p->list_req_pending) {
-                               /*
-                                * page is being acted upon,
-                                * so don't mess with it
-                                */
-                               vm_page_purged_others++;
-                               continue;
-                       }
                        assert(p->object != kernel_object);
 
                        /*
                         * we can discard this page...
                         */
                        if (p->pmapped == TRUE) {
-                               int refmod_state;
                                /*
                                 * unmap the page
                                 */
-                               refmod_state = pmap_disconnect(p->phys_page);
-                               if (refmod_state & VM_MEM_MODIFIED) {
-                                       p->dirty = TRUE;
-                               }
-                       }
-                       if (p->dirty || p->precious) {
-                               /*
-                                * we saved the cost of cleaning this page !
-                                */
-                               vm_page_purged_count++;
+                               pmap_disconnect_options(p->phys_page, PMAP_OPTIONS_NOFLUSH | PMAP_OPTIONS_NOREFMOD, (void *)&pmap_flush_context_storage);
                        }
+                       vm_page_purged_count++;
 
                        break;
 
@@ -1617,10 +2005,14 @@ restart_after_sleep:
 
                        if ((p->dirty || p->precious) && !p->error && object->alive) {
 
-                               p->busy = TRUE;
-
-                               VM_PAGE_QUEUES_REMOVE(p);
-
+                               if (!p->laundry) {
+                                       VM_PAGE_QUEUES_REMOVE(p);
+                                       /*
+                                        * flush page... page will be freed
+                                        * upon completion of I/O
+                                        */
+                                       vm_pageout_cluster(p, TRUE);
+                               }
                                vm_page_unlock_queues();
                                /*
                                 * free the pages reclaimed so far
@@ -1628,11 +2020,6 @@ restart_after_sleep:
                                VM_OBJ_REAP_FREELIST(local_free_q,
                                                     disconnect_on_release);
 
-                               /*
-                                * flush page... page will be freed
-                                * upon completion of I/O
-                                */
-                               vm_pageout_cluster(p);
                                vm_object_paging_wait(object, THREAD_UNINT);
 
                                goto restart_after_sleep;
@@ -1656,6 +2043,9 @@ restart_after_sleep:
        /*
         * Free the remaining reclaimed pages
         */
+       if (reap_type == REAP_PURGEABLE)
+               pmap_flush(&pmap_flush_context_storage);
+
        VM_OBJ_REAP_FREELIST(local_free_q,
                             disconnect_on_release);
 }
@@ -1881,6 +2271,8 @@ vm_object_destroy(
 }
 
 
+#if VM_OBJECT_CACHE
+
 #define VM_OBJ_DEACT_ALL_STATS DEBUG
 #if VM_OBJ_DEACT_ALL_STATS
 uint32_t vm_object_deactivate_all_pages_batches = 0;
@@ -1905,7 +2297,7 @@ vm_object_deactivate_all_pages(
 #endif /* VM_OBJ_DEACT_ALL_STATS */
 #define V_O_D_A_P_MAX_BATCH    256
 
-       loop_count = V_O_D_A_P_MAX_BATCH;
+       loop_count = BATCH_LIMIT(V_O_D_A_P_MAX_BATCH);
 #if VM_OBJ_DEACT_ALL_STATS
        pages_count = 0;
 #endif /* VM_OBJ_DEACT_ALL_STATS */
@@ -1920,7 +2312,7 @@ vm_object_deactivate_all_pages(
                        pages_count = 0;
 #endif /* VM_OBJ_DEACT_ALL_STATS */
                        lck_mtx_yield(&vm_page_queue_lock);
-                       loop_count = V_O_D_A_P_MAX_BATCH;
+                       loop_count = BATCH_LIMIT(V_O_D_A_P_MAX_BATCH);
                }
                if (!p->busy && !p->throttled) {
 #if VM_OBJ_DEACT_ALL_STATS
@@ -1939,133 +2331,7 @@ vm_object_deactivate_all_pages(
 #endif /* VM_OBJ_DEACT_ALL_STATS */
        vm_page_unlock_queues();
 }
-
-
-
-/*
- * when deallocating pages it is necessary to hold 
- * the vm_page_queue_lock (a hot global lock) for certain operations
- * on the page... however, the majority of the work can be done
- * while merely holding the object lock... to mitigate the time spent behind the
- * global lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
- * while doing all of the work that doesn't require the vm_page_queue_lock...
- * them call dw_do_work to acquire the vm_page_queue_lock and do the
- * necessary work for each page... we will grab the busy bit on the page
- * so that dw_do_work can drop the object lock if it can't immediately take the
- * vm_page_queue_lock in order to compete for the locks in the same order that
- * vm_pageout_scan takes them.
- */
-
-#define DELAYED_WORK_LIMIT     32
-
-#define DW_clear_reference     0x01
-#define DW_move_page           0x02
-#define DW_clear_busy          0x04
-#define DW_PAGE_WAKEUP         0x08
-
-
-struct dw {
-       vm_page_t       dw_m;
-       int             dw_mask;
-};
-
-static void dw_do_work(vm_object_t object, struct dw *dwp, int dw_count);
-
-
-static void
-dw_do_work(
-       vm_object_t     object,
-       struct dw       *dwp,
-       int             dw_count)
-{
-       vm_page_t       m;
-       int             j;
-
-       /*
-        * pageout_scan takes the vm_page_lock_queues first
-        * then tries for the object lock... to avoid what
-        * is effectively a lock inversion, we'll go to the
-        * trouble of taking them in that same order... otherwise
-        * if this object contains the majority of the pages resident
-        * in the UBC (or a small set of large objects actively being
-        * worked on contain the majority of the pages), we could
-        * cause the pageout_scan thread to 'starve' in its attempt
-        * to find pages to move to the free queue, since it has to
-        * successfully acquire the object lock of any candidate page
-        * before it can steal/clean it.
-        */
-       if (!vm_page_trylockspin_queues()) {
-               vm_object_unlock(object);
-
-               vm_page_lockspin_queues();
-
-               for (j = 0; ; j++) {
-                       if (!vm_object_lock_avoid(object) &&
-                           _vm_object_lock_try(object))
-                               break;
-                       vm_page_unlock_queues();
-                       mutex_pause(j);
-                       vm_page_lockspin_queues();
-               }
-       }
-       for (j = 0; j < dw_count; j++, dwp++) {
-
-               m = dwp->dw_m;
-
-               if (dwp->dw_mask & DW_clear_reference)
-                       m->reference = FALSE;
-
-               if (dwp->dw_mask & DW_move_page) {
-                       VM_PAGE_QUEUES_REMOVE(m);
-
-                       assert(!m->laundry);
-                       assert(m->object != kernel_object);
-                       assert(m->pageq.next == NULL &&
-                              m->pageq.prev == NULL);
-                                       
-                       if (m->zero_fill) {
-                               queue_enter_first(&vm_page_queue_zf, m, vm_page_t, pageq);
-                               vm_zf_queue_count++;
-                       } else {
-                               queue_enter_first(&vm_page_queue_inactive, m, vm_page_t, pageq);
-                       }
-                       m->inactive = TRUE;
-
-                       if (!m->fictitious) {
-                               vm_page_inactive_count++;
-                               token_new_pagecount++;
-                       } else {
-                               assert(m->phys_page == vm_page_fictitious_addr);
-                       }
-               }
-               if (dwp->dw_mask & DW_clear_busy)
-                       dwp->dw_m->busy = FALSE;
-
-               if (dwp->dw_mask & DW_PAGE_WAKEUP)
-                       PAGE_WAKEUP(dwp->dw_m);
-       }
-       vm_page_unlock_queues();
-
-#if CONFIG_EMBEDDED
-       {
-       int percent_avail;
-
-       /*
-        * Decide if we need to send a memory status notification.
-        */
-       percent_avail = 
-               (vm_page_active_count + vm_page_inactive_count + 
-                vm_page_speculative_count + vm_page_free_count +
-                (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
-               atop_64(max_mem);
-       if (percent_avail >= (kern_memorystatus_level + 5) || 
-           percent_avail <= (kern_memorystatus_level - 5)) {
-               kern_memorystatus_level = percent_avail;
-               thread_wakeup((event_t)&kern_memorystatus_wakeup);
-       }
-       }
-#endif
-}
+#endif /* VM_OBJECT_CACHE */
 
 
 
@@ -2117,6 +2383,7 @@ typedef uint64_t  chunk_state_t;
                MARK_PAGE_HANDLED(c, p);                                \
        MACRO_END
 
+
 /*
  * Return true if all pages in the chunk have not yet been processed.
  */
@@ -2169,11 +2436,20 @@ page_is_paged_out(
                        return TRUE;
                }
        } else
-#endif
-               if (object->internal &&
-                  object->alive &&
-                  !object->terminating &&
-                  object->pager_ready) {
+#endif /* MACH_PAGEMAP */
+       if (object->internal &&
+          object->alive &&
+          !object->terminating &&
+          object->pager_ready) {
+
+               if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
+                       if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) 
+                           == VM_EXTERNAL_STATE_EXISTS) {
+                               return TRUE;
+                       } else {
+                               return FALSE;
+                       }
+               }
 
                /*
                 * We're already holding a "paging in progress" reference
@@ -2207,6 +2483,21 @@ page_is_paged_out(
 }
 
 
+
+/*
+ * madvise_free_debug
+ *
+ * To help debug madvise(MADV_FREE*) mis-usage, this triggers a
+ * zero-fill as soon as a page is affected by a madvise(MADV_FREE*), to
+ * simulate the loss of the page's contents as if the page had been
+ * reclaimed and then re-faulted.
+ */
+#if DEVELOPMENT || DEBUG
+int madvise_free_debug = 1;
+#else /* DEBUG */
+int madvise_free_debug = 0;
+#endif /* DEBUG */
+
 /*
  * Deactivate the pages in the specified object and range.  If kill_page is set, also discard any
  * page modified state from the pmap.  Update the chunk_state as we go along.  The caller must specify
@@ -2220,20 +2511,18 @@ deactivate_pages_in_object(
        vm_object_size_t        size,
        boolean_t               kill_page,
        boolean_t               reusable_page,
-#if !MACH_ASSERT
-       __unused
-#endif
        boolean_t               all_reusable,
-       chunk_state_t           *chunk_state)
+       chunk_state_t           *chunk_state,
+       pmap_flush_context      *pfc)
 {
        vm_page_t       m;
        int             p;
-       struct  dw      dw_array[DELAYED_WORK_LIMIT];
-       struct  dw      *dwp;
+       struct vm_page_delayed_work     dw_array[DEFAULT_DELAYED_WORK_LIMIT];
+       struct vm_page_delayed_work     *dwp;
        int             dw_count;
+       int             dw_limit;
        unsigned int    reusable = 0;
 
-
        /*
         * Examine each page in the chunk.  The variable 'p' is the page number relative to the start of the
         * chunk.  Since this routine is called once for each level in the shadow chain, the chunk_state may
@@ -2243,6 +2532,7 @@ deactivate_pages_in_object(
 
        dwp = &dw_array[0];
        dw_count = 0;
+       dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
 
        for(p = 0; size && CHUNK_NOT_COMPLETE(*chunk_state); p++, size -= PAGE_SIZE_64, offset += PAGE_SIZE_64) {
 
@@ -2269,15 +2559,23 @@ deactivate_pages_in_object(
 
                        MARK_PAGE_HANDLED(*chunk_state, p);
        
-                       if (( !VM_PAGE_WIRED(m)) && (!m->private) && (!m->gobbled) && (!m->busy)) {
+                       if (( !VM_PAGE_WIRED(m)) && (!m->private) && (!m->gobbled) && (!m->busy) && (!m->laundry)) {
                                int     clear_refmod;
        
-                               assert(!m->laundry);
-       
+                               dwp->dw_mask = 0;
+
                                clear_refmod = VM_MEM_REFERENCED;
-                               dwp->dw_mask = DW_clear_reference;
+                               dwp->dw_mask |= DW_clear_reference;
 
                                if ((kill_page) && (object->internal)) {
+                                       if (madvise_free_debug) {
+                                               /*
+                                                * zero-fill the page now
+                                                * to simulate it being
+                                                * reclaimed and re-faulted.
+                                                */
+                                               pmap_zero_page(m->phys_page);
+                                       }
                                        m->precious = FALSE;
                                        m->dirty = FALSE;
 
@@ -2295,6 +2593,8 @@ deactivate_pages_in_object(
 #if    MACH_PAGEMAP
                                        vm_external_state_clr(object->existence_map, offset);
 #endif /* MACH_PAGEMAP */
+                                       VM_COMPRESSOR_PAGER_STATE_CLR(object,
+                                                                     offset);
 
                                        if (reusable_page && !m->reusable) {
                                                assert(!all_reusable);
@@ -2303,39 +2603,25 @@ deactivate_pages_in_object(
                                                object->reusable_page_count++;
                                                assert(object->resident_page_count >= object->reusable_page_count);
                                                reusable++;
-#if CONFIG_EMBEDDED
-                                       } else {
-                                               if (m->reusable) {
-                                                       m->reusable = FALSE;
-                                                       object->reusable_page_count--;
-                                               }
-#endif
                                        }
                                }
-                               pmap_clear_refmod(m->phys_page, clear_refmod);
+                               pmap_clear_refmod_options(m->phys_page, clear_refmod, PMAP_OPTIONS_NOFLUSH, (void *)pfc);
 
                                if (!m->throttled && !(reusable_page || all_reusable))
                                        dwp->dw_mask |= DW_move_page;
-                               /*
-                                * dw_do_work may need to drop the object lock
-                                * if it does, we need the pages its looking at to
-                                * be held stable via the busy bit.
-                                */
-                               m->busy = TRUE;
-                               dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
-
-                               dwp->dw_m = m;
-                               dwp++;
-                               dw_count++;
+                               
+                               if (dwp->dw_mask)
+                                       VM_PAGE_ADD_DELAYED_WORK(dwp, m,
+                                                                dw_count);
 
-                               if (dw_count >= DELAYED_WORK_LIMIT) {
+                               if (dw_count >= dw_limit) {
                                        if (reusable) {
                                                OSAddAtomic(reusable,
                                                            &vm_page_stats_reusable.reusable_count);
                                                vm_page_stats_reusable.reusable += reusable;
                                                reusable = 0;
                                        }
-                                       dw_do_work(object, &dw_array[0], dw_count);
+                                       vm_page_do_delayed_work(object, &dw_array[0], dw_count);
 
                                        dwp = &dw_array[0];
                                        dw_count = 0;
@@ -2362,6 +2648,8 @@ deactivate_pages_in_object(
 #if    MACH_PAGEMAP
                                        vm_external_state_clr(object->existence_map, offset);
 #endif /* MACH_PAGEMAP */
+                                       VM_COMPRESSOR_PAGER_STATE_CLR(object,
+                                                                     offset);
                                }
                        }
                }
@@ -2374,7 +2662,7 @@ deactivate_pages_in_object(
        }
                
        if (dw_count)
-               dw_do_work(object, &dw_array[0], dw_count);
+               vm_page_do_delayed_work(object, &dw_array[0], dw_count);
 }
 
 
@@ -2394,7 +2682,8 @@ deactivate_a_chunk(
        vm_object_size_t        size,
        boolean_t               kill_page,
        boolean_t               reusable_page,
-       boolean_t               all_reusable)
+       boolean_t               all_reusable,
+       pmap_flush_context      *pfc)
 {
        vm_object_t             object;
        vm_object_t             tmp_object;
@@ -2427,7 +2716,7 @@ deactivate_a_chunk(
        while (object && CHUNK_NOT_COMPLETE(chunk_state)) {
                vm_object_paging_begin(object);
 
-               deactivate_pages_in_object(object, offset, length, kill_page, reusable_page, all_reusable, &chunk_state);
+               deactivate_pages_in_object(object, offset, length, kill_page, reusable_page, all_reusable, &chunk_state, pfc);
 
                vm_object_paging_end(object);
 
@@ -2443,7 +2732,7 @@ deactivate_a_chunk(
                        kill_page = FALSE;
                        reusable_page = FALSE;
                        all_reusable = FALSE;
-                       offset += object->shadow_offset;
+                       offset += object->vo_shadow_offset;
                        vm_object_lock(tmp_object);
                }
 
@@ -2477,6 +2766,7 @@ vm_object_deactivate_pages(
 {
        vm_object_size_t        length;
        boolean_t               all_reusable;
+       pmap_flush_context      pmap_flush_context_storage;
 
        /*
         * We break the range up into chunks and do one chunk at a time.  This is for
@@ -2488,27 +2778,29 @@ vm_object_deactivate_pages(
 
        all_reusable = FALSE;
        if (reusable_page &&
-           object->size != 0 &&
-           object->size == size &&
+           object->internal &&
+           object->vo_size != 0 &&
+           object->vo_size == size &&
            object->reusable_page_count == 0) {
                all_reusable = TRUE;
                reusable_page = FALSE;
        }
 
-#if CONFIG_EMBEDDED
        if ((reusable_page || all_reusable) && object->all_reusable) {
                /* This means MADV_FREE_REUSABLE has been called twice, which 
                 * is probably illegal. */
                return;
        }
-#endif
+
+       pmap_flush_context_init(&pmap_flush_context_storage);
 
        while (size) {
-               length = deactivate_a_chunk(object, offset, size, kill_page, reusable_page, all_reusable);
+               length = deactivate_a_chunk(object, offset, size, kill_page, reusable_page, all_reusable, &pmap_flush_context_storage);
 
                size -= length;
                offset += length;
        }
+       pmap_flush(&pmap_flush_context_storage);
 
        if (all_reusable) {
                if (!object->all_reusable) {
@@ -2560,7 +2852,7 @@ vm_object_reuse_pages(
        if (object->all_reusable) {
                assert(object->reusable_page_count == 0);
                object->all_reusable = FALSE;
-               if (end_offset - start_offset == object->size ||
+               if (end_offset - start_offset == object->vo_size ||
                    !allow_partial_reuse) {
                        vm_page_stats_reusable.all_reuse_calls++;
                        reused = object->resident_page_count;
@@ -2644,8 +2936,25 @@ vm_object_pmap_protect(
        vm_map_offset_t                 pmap_start,
        vm_prot_t                       prot)
 {
+       vm_object_pmap_protect_options(object, offset, size,
+                                      pmap, pmap_start, prot, 0);
+}
+
+__private_extern__ void
+vm_object_pmap_protect_options(
+       register vm_object_t            object,
+       register vm_object_offset_t     offset,
+       vm_object_size_t                size,
+       pmap_t                          pmap,
+       vm_map_offset_t                 pmap_start,
+       vm_prot_t                       prot,
+       int                             options)
+{
+       pmap_flush_context      pmap_flush_context_storage;
+       boolean_t               delayed_pmap_flush = FALSE;
+
        if (object == VM_OBJECT_NULL)
-           return;
+               return;
        size = vm_object_round_page(size);
        offset = vm_object_trunc_page(offset);
 
@@ -2654,21 +2963,36 @@ vm_object_pmap_protect(
        if (object->phys_contiguous) {
                if (pmap != NULL) {
                        vm_object_unlock(object);
-                       pmap_protect(pmap, pmap_start, pmap_start + size, prot);
+                       pmap_protect_options(pmap,
+                                            pmap_start,
+                                            pmap_start + size,
+                                            prot,
+                                            options & ~PMAP_OPTIONS_NOFLUSH,
+                                            NULL);
                } else {
                        vm_object_offset_t phys_start, phys_end, phys_addr;
 
-                       phys_start = object->shadow_offset + offset;
+                       phys_start = object->vo_shadow_offset + offset;
                        phys_end = phys_start + size;
                        assert(phys_start <= phys_end);
-                       assert(phys_end <= object->shadow_offset + object->size);
+                       assert(phys_end <= object->vo_shadow_offset + object->vo_size);
                        vm_object_unlock(object);
 
+                       pmap_flush_context_init(&pmap_flush_context_storage);
+                       delayed_pmap_flush = FALSE;
+
                        for (phys_addr = phys_start;
                             phys_addr < phys_end;
                             phys_addr += PAGE_SIZE_64) {
-                               pmap_page_protect((ppnum_t) (phys_addr >> PAGE_SHIFT), prot);
+                               pmap_page_protect_options(
+                                       (ppnum_t) (phys_addr >> PAGE_SHIFT),
+                                       prot,
+                                       options | PMAP_OPTIONS_NOFLUSH,
+                                       (void *)&pmap_flush_context_storage);
+                               delayed_pmap_flush = TRUE;
                        }
+                       if (delayed_pmap_flush == TRUE)
+                               pmap_flush(&pmap_flush_context_storage);
                }
                return;
        }
@@ -2678,38 +3002,49 @@ vm_object_pmap_protect(
        while (TRUE) {
           if (ptoa_64(object->resident_page_count) > size/2 && pmap != PMAP_NULL) {
                vm_object_unlock(object);
-               pmap_protect(pmap, pmap_start, pmap_start + size, prot);
+               pmap_protect_options(pmap, pmap_start, pmap_start + size, prot,
+                                    options & ~PMAP_OPTIONS_NOFLUSH, NULL);
                return;
            }
 
-           /* if we are doing large ranges with respect to resident */
-           /* page count then we should interate over pages otherwise */
-           /* inverse page look-up will be faster */
+          pmap_flush_context_init(&pmap_flush_context_storage);
+          delayed_pmap_flush = FALSE;
+
+           /*
+            * if we are doing large ranges with respect to resident
+            * page count then we should interate over pages otherwise
+            * inverse page look-up will be faster
+            */
            if (ptoa_64(object->resident_page_count / 4) <  size) {
                vm_page_t               p;
                vm_object_offset_t      end;
 
                end = offset + size;
 
-               if (pmap != PMAP_NULL) {
-                 queue_iterate(&object->memq, p, vm_page_t, listq) {
-                   if (!p->fictitious &&
-                       (offset <= p->offset) && (p->offset < end)) {
-                       vm_map_offset_t start;
-
-                       start = pmap_start + p->offset - offset;
-                       pmap_protect(pmap, start, start + PAGE_SIZE_64, prot);
-                   }
-                 }
-               } else {
-                 queue_iterate(&object->memq, p, vm_page_t, listq) {
-                   if (!p->fictitious &&
-                       (offset <= p->offset) && (p->offset < end)) {
-
-                       pmap_page_protect(p->phys_page, prot);
-                   }
-                 }
+               queue_iterate(&object->memq, p, vm_page_t, listq) {
+                       if (!p->fictitious && (offset <= p->offset) && (p->offset < end)) {
+                               vm_map_offset_t start;
+
+                               start = pmap_start + p->offset - offset;
+
+                               if (pmap != PMAP_NULL)
+                                       pmap_protect_options(
+                                               pmap,
+                                               start,
+                                               start + PAGE_SIZE_64,
+                                               prot,
+                                               options | PMAP_OPTIONS_NOFLUSH,
+                                               &pmap_flush_context_storage);
+                               else
+                                       pmap_page_protect_options(
+                                               p->phys_page,
+                                               prot,
+                                               options | PMAP_OPTIONS_NOFLUSH,
+                                               &pmap_flush_context_storage);
+                                       delayed_pmap_flush = TRUE;
+                       }
                }
+
           } else {
                vm_page_t               p;
                vm_object_offset_t      end;
@@ -2717,29 +3052,36 @@ vm_object_pmap_protect(
 
                end = offset + size;
 
-               if (pmap != PMAP_NULL) {
-                       for(target_off = offset; 
-                           target_off < end;
-                           target_off += PAGE_SIZE) {
-                               p = vm_page_lookup(object, target_off);
-                               if (p != VM_PAGE_NULL) {
-                                       vm_object_offset_t start;
-                                       start = pmap_start + 
-                                               (p->offset - offset);
-                                       pmap_protect(pmap, start, 
-                                                    start + PAGE_SIZE, prot);
-                               }
-                       }
-               } else {
-                       for(target_off = offset; 
-                               target_off < end; target_off += PAGE_SIZE) {
-                               p = vm_page_lookup(object, target_off);
-                               if (p != VM_PAGE_NULL) {
-                                       pmap_page_protect(p->phys_page, prot);
-                               }
+               for (target_off = offset; 
+                    target_off < end; target_off += PAGE_SIZE) {
+
+                       p = vm_page_lookup(object, target_off);
+
+                       if (p != VM_PAGE_NULL) {
+                               vm_object_offset_t start;
+
+                               start = pmap_start + (p->offset - offset);
+
+                               if (pmap != PMAP_NULL)
+                                       pmap_protect_options(
+                                               pmap,
+                                               start,
+                                               start + PAGE_SIZE_64,
+                                               prot,
+                                               options | PMAP_OPTIONS_NOFLUSH,
+                                               &pmap_flush_context_storage);
+                               else
+                                       pmap_page_protect_options(
+                                               p->phys_page,
+                                               prot,
+                                               options | PMAP_OPTIONS_NOFLUSH,
+                                               &pmap_flush_context_storage);
+                                       delayed_pmap_flush = TRUE;
                        }
                }
-         }
+           }
+           if (delayed_pmap_flush == TRUE)
+                   pmap_flush(&pmap_flush_context_storage);
 
            if (prot == VM_PROT_NONE) {
                /*
@@ -2750,7 +3092,7 @@ vm_object_pmap_protect(
 
                next_object = object->shadow;
                if (next_object != VM_OBJECT_NULL) {
-                   offset += object->shadow_offset;
+                   offset += object->vo_shadow_offset;
                    vm_object_lock(next_object);
                    vm_object_unlock(object);
                    object = next_object;
@@ -2854,6 +3196,10 @@ vm_object_copy_slowly(
        fault_info.hi_offset = src_offset + size;
        fault_info.no_cache  = FALSE;
        fault_info.stealth = TRUE;
+       fault_info.io_sync = FALSE;
+       fault_info.cs_bypass = FALSE;
+       fault_info.mark_zf_absent = FALSE;
+       fault_info.batch_pmap_op = FALSE;
 
        for ( ;
            size != 0 ;
@@ -2900,8 +3246,10 @@ vm_object_copy_slowly(
                        }
 
                        XPR(XPR_VM_FAULT,"vm_object_copy_slowly -> vm_fault_page",0,0,0,0,0);
+                       _result_page = VM_PAGE_NULL;
                        result = vm_fault_page(src_object, src_offset,
                                VM_PROT_READ, FALSE,
+                               FALSE, /* page not looked up */
                                &prot, &_result_page, &top_page,
                                (int *)0,
                                &error_code, FALSE, FALSE, &fault_info);
@@ -2911,11 +3259,6 @@ vm_object_copy_slowly(
                                result_page = _result_page;
 
                                /*
-                                *      We don't need to hold the object
-                                *      lock -- the busy page will be enough.
-                                *      [We don't care about picking up any
-                                *      new modifications.]
-                                *
                                 *      Copy the page to the new object.
                                 *
                                 *      POLICY DECISION:
@@ -2924,15 +3267,15 @@ vm_object_copy_slowly(
                                 *              of copying.
                                 */
 
-                               vm_object_unlock(result_page->object);
                                vm_page_copy(result_page, new_page);
+                               vm_object_unlock(result_page->object);
 
                                /*
                                 *      Let go of both pages (make them
                                 *      not busy, perform wakeup, activate).
                                 */
                                vm_object_lock(new_object);
-                               new_page->dirty = TRUE;
+                               SET_PAGE_DIRTY(new_page, FALSE);
                                PAGE_WAKEUP_DONE(new_page);
                                vm_object_unlock(new_object);
 
@@ -2960,10 +3303,6 @@ vm_object_copy_slowly(
                        case VM_FAULT_RETRY:
                                break;
 
-                       case VM_FAULT_FICTITIOUS_SHORTAGE:
-                               vm_page_more_fictitious();
-                               break;
-
                        case VM_FAULT_MEMORY_SHORTAGE:
                                if (vm_page_wait(interruptible))
                                        break;
@@ -3193,8 +3532,8 @@ Retry:
                vm_object_lock(src_object);
                goto Retry;
        }
-       if (copy->size < src_offset+size)
-               copy->size = src_offset+size;
+       if (copy->vo_size < src_offset+size)
+               copy->vo_size = src_offset+size;
 
        if (!copy->pager_ready)
                check_ready = TRUE;
@@ -3248,6 +3587,8 @@ vm_object_copy_delayed(
        vm_object_t             old_copy;
        vm_page_t               p;
        vm_object_size_t        copy_size = src_offset + size;
+       pmap_flush_context      pmap_flush_context_storage;
+       boolean_t               delayed_pmap_flush = FALSE;
 
 
        int collisions = 0;
@@ -3360,7 +3701,7 @@ vm_object_copy_delayed(
                         *      needed).
                         */
 
-                       if (old_copy->size < copy_size) {
+                       if (old_copy->vo_size < copy_size) {
                                if (src_object_shared == TRUE) {
                                        vm_object_unlock(old_copy);
                                        vm_object_unlock(src_object);
@@ -3378,9 +3719,12 @@ vm_object_copy_delayed(
                                 */
                                copy_delayed_protect_iterate++;
 
+                               pmap_flush_context_init(&pmap_flush_context_storage);
+                               delayed_pmap_flush = FALSE;
+
                                queue_iterate(&src_object->memq, p, vm_page_t, listq) {
                                        if (!p->fictitious && 
-                                           p->offset >= old_copy->size && 
+                                           p->offset >= old_copy->vo_size && 
                                            p->offset < copy_size) {
                                                if (VM_PAGE_WIRED(p)) {
                                                        vm_object_unlock(old_copy);
@@ -3390,15 +3734,21 @@ vm_object_copy_delayed(
                                                                vm_object_unlock(new_copy);
                                                                vm_object_deallocate(new_copy);
                                                        }
+                                                       if (delayed_pmap_flush == TRUE)
+                                                               pmap_flush(&pmap_flush_context_storage);
 
                                                        return VM_OBJECT_NULL;
                                                } else {
-                                                       pmap_page_protect(p->phys_page, 
-                                                                         (VM_PROT_ALL & ~VM_PROT_WRITE));
+                                                       pmap_page_protect_options(p->phys_page, (VM_PROT_ALL & ~VM_PROT_WRITE),
+                                                                                 PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
+                                                       delayed_pmap_flush = TRUE;
                                                }
                                        }
                                }
-                               old_copy->size = copy_size;
+                               if (delayed_pmap_flush == TRUE)
+                                       pmap_flush(&pmap_flush_context_storage);
+
+                               old_copy->vo_size = copy_size;
                        }
                        if (src_object_shared == TRUE)
                                vm_object_reference_shared(old_copy);
@@ -3421,8 +3771,8 @@ vm_object_copy_delayed(
                 * copy object will be large enough to back either the
                 * old copy object or the new mapping.
                 */
-               if (old_copy->size > copy_size)
-                       copy_size = old_copy->size;
+               if (old_copy->vo_size > copy_size)
+                       copy_size = old_copy->vo_size;
 
                if (new_copy == VM_OBJECT_NULL) {
                        vm_object_unlock(old_copy);
@@ -3434,7 +3784,7 @@ vm_object_copy_delayed(
                        src_object_shared = FALSE;
                        goto Retry;
                }
-               new_copy->size = copy_size;     
+               new_copy->vo_size = copy_size;  
 
                /*
                 *      The copy-object is always made large enough to
@@ -3444,7 +3794,7 @@ vm_object_copy_delayed(
                 */
 
                assert((old_copy->shadow == src_object) &&
-                   (old_copy->shadow_offset == (vm_object_offset_t) 0));
+                   (old_copy->vo_shadow_offset == (vm_object_offset_t) 0));
 
        } else if (new_copy == VM_OBJECT_NULL) {
                vm_object_unlock(src_object);
@@ -3469,6 +3819,9 @@ vm_object_copy_delayed(
         */
        copy_delayed_protect_iterate++;
 
+       pmap_flush_context_init(&pmap_flush_context_storage);
+       delayed_pmap_flush = FALSE;
+
        queue_iterate(&src_object->memq, p, vm_page_t, listq) {
                if (!p->fictitious && p->offset < copy_size) {
                        if (VM_PAGE_WIRED(p)) {
@@ -3477,13 +3830,21 @@ vm_object_copy_delayed(
                                vm_object_unlock(src_object);
                                vm_object_unlock(new_copy);
                                vm_object_deallocate(new_copy);
+
+                               if (delayed_pmap_flush == TRUE)
+                                       pmap_flush(&pmap_flush_context_storage);
+
                                return VM_OBJECT_NULL;
                        } else {
-                               pmap_page_protect(p->phys_page, 
-                                                 (VM_PROT_ALL & ~VM_PROT_WRITE));
+                               pmap_page_protect_options(p->phys_page, (VM_PROT_ALL & ~VM_PROT_WRITE),
+                                                         PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
+                               delayed_pmap_flush = TRUE;
                        }
                }
        }
+       if (delayed_pmap_flush == TRUE)
+               pmap_flush(&pmap_flush_context_storage);
+
        if (old_copy != VM_OBJECT_NULL) {
                /*
                 *      Make the old copy-object shadow the new one.
@@ -3516,7 +3877,7 @@ vm_object_copy_delayed(
         */
        vm_object_lock_assert_exclusive(new_copy);
        new_copy->shadow = src_object;
-       new_copy->shadow_offset = 0;
+       new_copy->vo_shadow_offset = 0;
        new_copy->shadowed = TRUE;      /* caller must set needs_copy */
 
        vm_object_lock_assert_exclusive(src_object);
@@ -3648,7 +4009,7 @@ vm_object_copy_strategically(
  *     The new object and offset into that object
  *     are returned in the source parameters.
  */
-boolean_t vm_object_shadow_check = FALSE;
+boolean_t vm_object_shadow_check = TRUE;
 
 __private_extern__ boolean_t
 vm_object_shadow(
@@ -3660,6 +4021,10 @@ vm_object_shadow(
        register vm_object_t    result;
 
        source = *object;
+       assert(source != VM_OBJECT_NULL);
+       if (source == VM_OBJECT_NULL)
+               return FALSE;
+
 #if 0
        /*
         * XXX FBDP
@@ -3679,11 +4044,19 @@ vm_object_shadow(
 
        /*
         *      Determine if we really need a shadow.
+        *
+        *      If the source object is larger than what we are trying
+        *      to create, then force the shadow creation even if the
+        *      ref count is 1.  This will allow us to [potentially]
+        *      collapse the underlying object away in the future
+        *      (freeing up the extra data it might contain and that
+        *      we don't need).
         */
-
-       if (vm_object_shadow_check && source->ref_count == 1 &&
+       if (vm_object_shadow_check &&
+           source->vo_size == length &&
+           source->ref_count == 1 &&
            (source->shadow == VM_OBJECT_NULL ||
-            source->shadow->copy == VM_OBJECT_NULL))
+            source->shadow->copy == VM_OBJECT_NULL) )
        {
                source->shadowed = FALSE;
                return FALSE;
@@ -3710,7 +4083,7 @@ vm_object_shadow(
         *      and fix up the offset into the new object.
         */
 
-       result->shadow_offset = *offset;
+       result->vo_shadow_offset = *offset;
 
        /*
         *      Return the new things
@@ -4083,21 +4456,23 @@ vm_object_pager_create(
        object->paging_offset = 0;
                
 #if    MACH_PAGEMAP
-       size = object->size;
+       size = object->vo_size;
 #endif /* MACH_PAGEMAP */
        vm_object_unlock(object);
 
 #if    MACH_PAGEMAP
-       map = vm_external_create(size);
-       vm_object_lock(object);
-       assert(object->size == size);
-       object->existence_map = map;
-       vm_object_unlock(object);
+       if (DEFAULT_PAGER_IS_ACTIVE) {
+               map = vm_external_create(size);
+               vm_object_lock(object);
+               assert(object->vo_size == size);
+               object->existence_map = map;
+               vm_object_unlock(object);
+       }
 #endif /* MACH_PAGEMAP */
 
-       if ((uint32_t) object->size != object->size) {
+       if ((uint32_t) object->vo_size != object->vo_size) {
                panic("vm_object_pager_create(): object size 0x%llx >= 4GB\n",
-                     (uint64_t) object->size);
+                     (uint64_t) object->vo_size);
        }
 
        /*
@@ -4116,8 +4491,8 @@ vm_object_pager_create(
                assert(object->temporary);
 
                /* create our new memory object */
-               assert((vm_size_t) object->size == object->size);
-               (void) memory_object_create(dmm, (vm_size_t) object->size,
+               assert((vm_size_t) object->vo_size == object->vo_size);
+               (void) memory_object_create(dmm, (vm_size_t) object->vo_size,
                                            &pager);
 
                memory_object_default_deallocate(dmm);
@@ -4135,7 +4510,7 @@ vm_object_pager_create(
         *      copied by vm_object_enter().
         */
 
-       if (vm_object_enter(pager, object->size, TRUE, TRUE, FALSE) != object)
+       if (vm_object_enter(pager, object->vo_size, TRUE, TRUE, FALSE) != object)
                panic("vm_object_pager_create: mismatch");
 
        /*
@@ -4151,6 +4526,105 @@ vm_object_pager_create(
        vm_object_paging_end(object);
 }
 
+void
+vm_object_compressor_pager_create(
+       register vm_object_t    object)
+{
+       memory_object_t         pager;
+       vm_object_hash_entry_t  entry;
+       lck_mtx_t               *lck;
+
+       assert(object != kernel_object);
+
+       /*
+        *      Prevent collapse or termination by holding a paging reference
+        */
+
+       vm_object_paging_begin(object);
+       if (object->pager_created) {
+               /*
+                *      Someone else got to it first...
+                *      wait for them to finish initializing the ports
+                */
+               while (!object->pager_initialized) {
+                       vm_object_sleep(object,
+                                       VM_OBJECT_EVENT_INITIALIZED,
+                                       THREAD_UNINT);
+               }
+               vm_object_paging_end(object);
+               return;
+       }
+
+       /*
+        *      Indicate that a memory object has been assigned
+        *      before dropping the lock, to prevent a race.
+        */
+
+       object->pager_created = TRUE;
+       object->paging_offset = 0;
+               
+       vm_object_unlock(object);
+
+       if ((uint32_t) (object->vo_size/PAGE_SIZE) !=
+           (object->vo_size/PAGE_SIZE)) {
+               panic("vm_object_compressor_pager_create(%p): "
+                     "object size 0x%llx >= 0x%llx\n",
+                     object,
+                     (uint64_t) object->vo_size,
+                     0x0FFFFFFFFULL*PAGE_SIZE);
+       }
+
+       /*
+        *      Create the [internal] pager, and associate it with this object.
+        *
+        *      We make the association here so that vm_object_enter()
+        *      can look up the object to complete initializing it.  No
+        *      user will ever map this object.
+        */
+       {
+               assert(object->temporary);
+
+               /* create our new memory object */
+               assert((uint32_t) (object->vo_size/PAGE_SIZE) ==
+                      (object->vo_size/PAGE_SIZE));
+               (void) compressor_memory_object_create(
+                       (memory_object_size_t) object->vo_size,
+                       &pager);
+               if (pager == NULL) {
+                       panic("vm_object_compressor_pager_create(): "
+                             "no pager for object %p size 0x%llx\n",
+                             object, (uint64_t) object->vo_size);
+               }
+       }
+
+       entry = vm_object_hash_entry_alloc(pager);
+
+       lck = vm_object_hash_lock_spin(pager);
+       vm_object_hash_insert(entry, object);
+       vm_object_hash_unlock(lck);
+
+       /*
+        *      A reference was returned by
+        *      memory_object_create(), and it is
+        *      copied by vm_object_enter().
+        */
+
+       if (vm_object_enter(pager, object->vo_size, TRUE, TRUE, FALSE) != object)
+               panic("vm_object_compressor_pager_create: mismatch");
+
+       /*
+        *      Drop the reference we were passed.
+        */
+       memory_object_deallocate(pager);
+
+       vm_object_lock(object);
+
+       /*
+        *      Release the paging reference
+        */
+       vm_object_paging_end(object);
+}
+
 /*
  *     Routine:        vm_object_remove
  *     Purpose:
@@ -4217,8 +4691,8 @@ vm_object_do_collapse(
        vm_object_lock_assert_exclusive(object);
        vm_object_lock_assert_exclusive(backing_object);
 
-       backing_offset = object->shadow_offset;
-       size = object->size;
+       backing_offset = object->vo_shadow_offset;
+       size = object->vo_size;
 
        /*
         *      Move all in-memory pages from backing_object
@@ -4309,6 +4783,14 @@ vm_object_do_collapse(
        if (backing_object->pager != MEMORY_OBJECT_NULL) {
                vm_object_hash_entry_t  entry;
 
+#if 00
+               if (COMPRESSED_PAGER_IS_ACTIVE) {
+                       panic("vm_object_do_collapse(%p,%p): "
+                             "backing_object has a compressor pager",
+                             object, backing_object);
+               }
+#endif
+
                /*
                 *      Move the pager from backing_object to object.
                 *
@@ -4358,10 +4840,10 @@ vm_object_do_collapse(
         *      this code should be fixed to salvage the map.
         */
        assert(object->existence_map == VM_EXTERNAL_NULL);
-       if (backing_offset || (size != backing_object->size)) {
+       if (backing_offset || (size != backing_object->vo_size)) {
                vm_external_discarded++;
                vm_external_destroy(backing_object->existence_map,
-                       backing_object->size);
+                       backing_object->vo_size);
        }
        else {
                vm_external_collapsed++;
@@ -4380,10 +4862,10 @@ vm_object_do_collapse(
        assert(!backing_object->phys_contiguous);
        object->shadow = backing_object->shadow;
        if (object->shadow) {
-               object->shadow_offset += backing_object->shadow_offset;
+               object->vo_shadow_offset += backing_object->vo_shadow_offset;
        } else {
                /* no shadow, therefore no shadow offset... */
-               object->shadow_offset = 0;
+               object->vo_shadow_offset = 0;
        }
        assert((object->shadow == VM_OBJECT_NULL) ||
               (object->shadow->copy != backing_object));
@@ -4451,10 +4933,10 @@ vm_object_do_bypass(
        assert(!backing_object->phys_contiguous);
        object->shadow = backing_object->shadow;
        if (object->shadow) {
-               object->shadow_offset += backing_object->shadow_offset;
+               object->vo_shadow_offset += backing_object->vo_shadow_offset;
        } else {
                /* no shadow, therefore no shadow offset... */
-               object->shadow_offset = 0;
+               object->vo_shadow_offset = 0;
        }
        
        /*
@@ -4512,7 +4994,19 @@ vm_object_do_bypass(
                        vm_object_res_reference(backing_object);
                }
 #endif /* TASK_SWAPPER */
+               /*
+                * vm_object_collapse (the caller of this function) is
+                * now called from contexts that may not guarantee that a
+                * valid reference is held on the object... w/o a valid
+                * reference, it is unsafe and unwise (you will definitely
+                * regret it) to unlock the object and then retake the lock
+                * since the object may be terminated and recycled in between.
+                * The "activity_in_progress" reference will keep the object
+                * 'stable'.
+                */
+               vm_object_activity_begin(object);
                vm_object_unlock(object);
+
                vm_object_unlock(backing_object);
                vm_object_deallocate(backing_object);
 
@@ -4524,6 +5018,7 @@ vm_object_do_bypass(
                 */
 
                vm_object_lock(object);
+               vm_object_activity_end(object);
        }
        
        object_bypasses++;
@@ -4544,7 +5039,7 @@ static unsigned long vm_object_collapse_calls = 0;
 static unsigned long vm_object_collapse_objects = 0;
 static unsigned long vm_object_collapse_do_collapse = 0;
 static unsigned long vm_object_collapse_do_bypass = 0;
-static unsigned long vm_object_collapse_delays = 0;
+
 __private_extern__ void
 vm_object_collapse(
        register vm_object_t                    object,
@@ -4690,7 +5185,7 @@ retry:
                if (backing_object->ref_count == 1 &&
                    (!object->pager_created 
 #if    !MACH_PAGEMAP
-                    || !backing_object->pager_created
+                    || (!backing_object->pager_created)
 #endif /*!MACH_PAGEMAP */
                    ) && vm_object_collapse_allowed) {
 
@@ -4757,13 +5252,13 @@ retry:
                 *      we have to make sure no pages in the backing object
                 *      "show through" before bypassing it.
                 */
-               size = atop(object->size);
+               size = (unsigned int)atop(object->vo_size);
                rcount = object->resident_page_count;
+
                if (rcount != size) {
                        vm_object_offset_t      offset;
                        vm_object_offset_t      backing_offset;
                        unsigned int            backing_rcount;
-                       unsigned int            lookups = 0;
 
                        /*
                         *      If the backing object has a pager but no pagemap,
@@ -4803,6 +5298,24 @@ retry:
                                continue;
                        }
 
+                       backing_offset = object->vo_shadow_offset;
+                       backing_rcount = backing_object->resident_page_count;
+
+                       if ( (int)backing_rcount - (int)(atop(backing_object->vo_size) - size) > (int)rcount) {
+                                /*
+                                * we have enough pages in the backing object to guarantee that
+                                * at least 1 of them must be 'uncovered' by a resident page
+                                * in the object we're evaluating, so move on and
+                                * try to collapse the rest of the shadow chain
+                                */
+                               if (object != original_object) {
+                                       vm_object_unlock(object);
+                               }
+                               object = backing_object;
+                               object_lock_type = backing_object_lock_type;
+                               continue;
+                       }
+
                        /*
                         *      If all of the pages in the backing object are
                         *      shadowed by the parent object, the parent
@@ -4816,17 +5329,19 @@ retry:
                         *
                         */
 
-                       backing_offset = object->shadow_offset;
-                       backing_rcount = backing_object->resident_page_count;
-
 #if    MACH_PAGEMAP
 #define EXISTS_IN_OBJECT(obj, off, rc) \
-       (vm_external_state_get((obj)->existence_map, \
-        (vm_offset_t)(off)) == VM_EXTERNAL_STATE_EXISTS || \
-        ((rc) && ++lookups && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
-#else
-#define EXISTS_IN_OBJECT(obj, off, rc) \
-       (((rc) && ++lookups && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
+       ((vm_external_state_get((obj)->existence_map,   \
+                               (vm_offset_t)(off))     \
+         == VM_EXTERNAL_STATE_EXISTS) ||               \
+        (VM_COMPRESSOR_PAGER_STATE_GET((obj), (off))   \
+         == VM_EXTERNAL_STATE_EXISTS) ||               \
+        ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
+#else  /* MACH_PAGEMAP */
+#define EXISTS_IN_OBJECT(obj, off, rc)                 \
+       ((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off))   \
+         == VM_EXTERNAL_STATE_EXISTS) ||               \
+        ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
 #endif /* MACH_PAGEMAP */
 
                        /*
@@ -4859,37 +5374,24 @@ retry:
                         * pages in the backing object, it makes sense to
                         * walk the backing_object's resident pages first.
                         *
-                        * NOTE: Pages may be in both the existence map and 
-                        * resident.  So, we can't permanently decrement
-                        * the rcount here because the second loop may
-                        * find the same pages in the backing object'
-                        * existence map that we found here and we would
-                        * double-decrement the rcount.  We also may or
-                        * may not have found the 
+                        * NOTE: Pages may be in both the existence map and/or
+                         * resident, so if we don't find a dependency while
+                        * walking the backing object's resident page list
+                        * directly, and there is an existence map, we'll have
+                        * to run the offset based 2nd pass.  Because we may
+                        * have to run both passes, we need to be careful
+                        * not to decrement 'rcount' in the 1st pass
                         */
-                       if (backing_rcount && 
-#if    MACH_PAGEMAP
-                           size > ((backing_object->existence_map) ?
-                            backing_rcount : (backing_rcount >> 1))
-#else
-                           size > (backing_rcount >> 1)
-#endif /* MACH_PAGEMAP */
-                               ) {
+                       if (backing_rcount && backing_rcount < (size / 8)) {
                                unsigned int rc = rcount;
                                vm_page_t p;
 
                                backing_rcount = backing_object->resident_page_count;
                                p = (vm_page_t)queue_first(&backing_object->memq);
                                do {
-                                       /* Until we get more than one lookup lock */
-                                       if (lookups > 256) {
-                                               vm_object_collapse_delays++;
-                                               lookups = 0;
-                                               mutex_pause(0);
-                                       }
-
                                        offset = (p->offset - backing_offset);
-                                       if (offset < object->size &&
+
+                                       if (offset < object->vo_size &&
                                            offset != hint_offset &&
                                            !EXISTS_IN_OBJECT(object, offset, rc)) {
                                                /* found a dependency */
@@ -4923,16 +5425,9 @@ retry:
                                offset = hint_offset;
                                
                                while((offset =
-                                     (offset + PAGE_SIZE_64 < object->size) ?
+                                     (offset + PAGE_SIZE_64 < object->vo_size) ?
                                      (offset + PAGE_SIZE_64) : 0) != hint_offset) {
 
-                                       /* Until we get more than one lookup lock */
-                                       if (lookups > 256) {
-                                               vm_object_collapse_delays++;
-                                               lookups = 0;
-                                               mutex_pause(0);
-                                       }
-
                                        if (EXISTS_IN_OBJECT(backing_object, offset +
                                            backing_offset, backing_rcount) &&
                                            !EXISTS_IN_OBJECT(object, offset, rcount)) {
@@ -5023,7 +5518,7 @@ vm_object_page_remove(
                for (; start < end; start += PAGE_SIZE_64) {
                        p = vm_page_lookup(object, start);
                        if (p != VM_PAGE_NULL) {
-                               assert(!p->cleaning && !p->pageout);
+                               assert(!p->cleaning && !p->pageout && !p->laundry);
                                if (!p->fictitious && p->pmapped)
                                        pmap_disconnect(p->phys_page);
                                VM_PAGE_FREE(p);
@@ -5036,7 +5531,7 @@ vm_object_page_remove(
                while (!queue_end(&object->memq, (queue_entry_t) p)) {
                        next = (vm_page_t) queue_next(&p->listq);
                        if ((start <= p->offset) && (p->offset < end)) {
-                               assert(!p->cleaning && !p->pageout);
+                               assert(!p->cleaning && !p->pageout && !p->laundry);
                                if (!p->fictitious && p->pmapped)
                                        pmap_disconnect(p->phys_page);
                                VM_PAGE_FREE(p);
@@ -5143,7 +5638,7 @@ vm_object_coalesce(
         *      Extend the object if necessary.
         */
        newsize = prev_offset + prev_size + next_size;
-       if (newsize > prev_object->size) {
+       if (newsize > prev_object->vo_size) {
 #if    MACH_PAGEMAP
                /*
                 *      We cannot extend an object that has existence info,
@@ -5156,7 +5651,7 @@ vm_object_coalesce(
                 */
                assert(prev_object->existence_map == VM_EXTERNAL_NULL);
 #endif /* MACH_PAGEMAP */
-               prev_object->size = newsize;
+               prev_object->vo_size = newsize;
        }
 
        vm_object_unlock(prev_object);
@@ -5202,7 +5697,7 @@ vm_object_page_map(
            }
 
            assert((ppnum_t) addr == addr);
-           vm_page_init(m, (ppnum_t) addr);
+           vm_page_init(m, (ppnum_t) addr, FALSE);
            /*
             * private normally requires lock_queues but since we
             * are initializing the page, its not necessary here
@@ -5216,332 +5711,6 @@ vm_object_page_map(
        }
 }
 
-#include <mach_kdb.h>
-
-#if    MACH_KDB
-#include <ddb/db_output.h>
-#include <vm/vm_print.h>
-
-#define printf kdbprintf
-
-extern boolean_t       vm_object_cached(
-                               vm_object_t object);
-
-extern void            print_bitstring(
-                               char byte);
-
-boolean_t      vm_object_print_pages = FALSE;
-
-void
-print_bitstring(
-       char byte)
-{
-       printf("%c%c%c%c%c%c%c%c",
-              ((byte & (1 << 0)) ? '1' : '0'),
-              ((byte & (1 << 1)) ? '1' : '0'),
-              ((byte & (1 << 2)) ? '1' : '0'),
-              ((byte & (1 << 3)) ? '1' : '0'),
-              ((byte & (1 << 4)) ? '1' : '0'),
-              ((byte & (1 << 5)) ? '1' : '0'),
-              ((byte & (1 << 6)) ? '1' : '0'),
-              ((byte & (1 << 7)) ? '1' : '0'));
-}
-
-boolean_t
-vm_object_cached(
-       __unused register vm_object_t object)
-{
-#if VM_OBJECT_CACHE
-       register vm_object_t o;
-
-       queue_iterate(&vm_object_cached_list, o, vm_object_t, cached_list) {
-               if (object == o) {
-                       return TRUE;
-               }
-       }
-#endif
-       return FALSE;
-}
-
-#if    MACH_PAGEMAP
-/*
- *     vm_external_print:      [ debug ]
- */
-void
-vm_external_print(
-       vm_external_map_t       emap,
-       vm_object_size_t        size)
-{
-       if (emap == VM_EXTERNAL_NULL) {
-               printf("0  ");
-       } else {
-               vm_object_size_t existence_size = stob(size);
-               printf("{ size=%lld, map=[", (uint64_t) existence_size);
-               if (existence_size > 0) {
-                       print_bitstring(emap[0]);
-               }
-               if (existence_size > 1) {
-                       print_bitstring(emap[1]);
-               }
-               if (existence_size > 2) {
-                       printf("...");
-                       print_bitstring(emap[existence_size-1]);
-               }
-               printf("] }\n");
-       }
-       return;
-}
-#endif /* MACH_PAGEMAP */
-
-int
-vm_follow_object(
-       vm_object_t object)
-{
-       int count = 0;
-       int orig_db_indent = db_indent;
-
-       while (TRUE) {
-               if (object == VM_OBJECT_NULL) {
-                       db_indent = orig_db_indent;
-                       return count;
-               }
-
-               count += 1;
-
-               iprintf("object 0x%x", object);
-               printf(", shadow=0x%x", object->shadow);
-               printf(", copy=0x%x", object->copy);
-               printf(", pager=0x%x", object->pager);
-               printf(", ref=%d\n", object->ref_count);
-
-               db_indent += 2;
-               object = object->shadow;
-       }
-
-}
-
-/*
- *     vm_object_print:        [ debug ]
- */
-void
-vm_object_print(db_expr_t db_addr, __unused boolean_t have_addr,
-               __unused db_expr_t arg_count, __unused char *modif)
-{
-       vm_object_t     object;
-       register vm_page_t p;
-       const char *s;
-
-       register int count;
-
-       object = (vm_object_t) (long) db_addr;
-       if (object == VM_OBJECT_NULL)
-               return;
-
-       iprintf("object 0x%x\n", object);
-
-       db_indent += 2;
-
-       iprintf("size=0x%x", object->size);
-       printf(", memq_hint=%p", object->memq_hint);
-       printf(", ref_count=%d\n", object->ref_count);
-       iprintf("");
-#if    TASK_SWAPPER
-       printf("res_count=%d, ", object->res_count);
-#endif /* TASK_SWAPPER */
-       printf("resident_page_count=%d\n", object->resident_page_count);
-
-       iprintf("shadow=0x%x", object->shadow);
-       if (object->shadow) {
-               register int i = 0;
-               vm_object_t shadow = object;
-               while((shadow = shadow->shadow))
-                       i++;
-               printf(" (depth %d)", i);
-       }
-       printf(", copy=0x%x", object->copy);
-       printf(", shadow_offset=0x%x", object->shadow_offset);
-       printf(", last_alloc=0x%x\n", object->last_alloc);
-
-       iprintf("pager=0x%x", object->pager);
-       printf(", paging_offset=0x%x", object->paging_offset);
-       printf(", pager_control=0x%x\n", object->pager_control);
-
-       iprintf("copy_strategy=%d[", object->copy_strategy);
-       switch (object->copy_strategy) {
-               case MEMORY_OBJECT_COPY_NONE:
-               printf("copy_none");
-               break;
-
-               case MEMORY_OBJECT_COPY_CALL:
-               printf("copy_call");
-               break;
-
-               case MEMORY_OBJECT_COPY_DELAY:
-               printf("copy_delay");
-               break;
-
-               case MEMORY_OBJECT_COPY_SYMMETRIC:
-               printf("copy_symmetric");
-               break;
-
-               case MEMORY_OBJECT_COPY_INVALID:
-               printf("copy_invalid");
-               break;
-
-               default:
-               printf("?");
-       }
-       printf("]");
-
-       iprintf("all_wanted=0x%x<", object->all_wanted);
-       s = "";
-       if (vm_object_wanted(object, VM_OBJECT_EVENT_INITIALIZED)) {
-               printf("%sinit", s);
-               s = ",";
-       }
-       if (vm_object_wanted(object, VM_OBJECT_EVENT_PAGER_READY)) {
-               printf("%sready", s);
-               s = ",";
-       }
-       if (vm_object_wanted(object, VM_OBJECT_EVENT_PAGING_IN_PROGRESS)) {
-               printf("%spaging", s);
-               s = ",";
-       }
-       if (vm_object_wanted(object, VM_OBJECT_EVENT_LOCK_IN_PROGRESS)) {
-               printf("%slock", s);
-               s = ",";
-       }
-       if (vm_object_wanted(object, VM_OBJECT_EVENT_UNCACHING)) {
-               printf("%suncaching", s);
-               s = ",";
-       }
-       if (vm_object_wanted(object, VM_OBJECT_EVENT_COPY_CALL)) {
-               printf("%scopy_call", s);
-               s = ",";
-       }
-       if (vm_object_wanted(object, VM_OBJECT_EVENT_CACHING)) {
-               printf("%scaching", s);
-               s = ",";
-       }
-       printf(">");
-       printf(", paging_in_progress=%d\n", object->paging_in_progress);
-       printf(", activity_in_progress=%d\n", object->activity_in_progress);
-
-       iprintf("%screated, %sinit, %sready, %spersist, %strusted, %spageout, %s, %s\n",
-               (object->pager_created ? "" : "!"),
-               (object->pager_initialized ? "" : "!"),
-               (object->pager_ready ? "" : "!"),
-               (object->can_persist ? "" : "!"),
-               (object->pager_trusted ? "" : "!"),
-               (object->pageout ? "" : "!"),
-               (object->internal ? "internal" : "external"),
-               (object->temporary ? "temporary" : "permanent"));
-       iprintf("%salive, %spurgeable, %spurgeable_volatile, %spurgeable_empty, %sshadowed, %scached, %sprivate\n",
-               (object->alive ? "" : "!"),
-               ((object->purgable != VM_PURGABLE_DENY) ? "" : "!"),
-               ((object->purgable == VM_PURGABLE_VOLATILE) ? "" : "!"),
-               ((object->purgable == VM_PURGABLE_EMPTY) ? "" : "!"),
-               (object->shadowed ? "" : "!"),
-               (vm_object_cached(object) ? "" : "!"),
-               (object->private ? "" : "!"));
-       iprintf("%sadvisory_pageout, %ssilent_overwrite\n",
-               (object->advisory_pageout ? "" : "!"),
-               (object->silent_overwrite ? "" : "!"));
-
-#if    MACH_PAGEMAP
-       iprintf("existence_map=");
-       vm_external_print(object->existence_map, object->size);
-#endif /* MACH_PAGEMAP */
-#if    MACH_ASSERT
-       iprintf("paging_object=0x%x\n", object->paging_object);
-#endif /* MACH_ASSERT */
-
-       if (vm_object_print_pages) {
-               count = 0;
-               p = (vm_page_t) queue_first(&object->memq);
-               while (!queue_end(&object->memq, (queue_entry_t) p)) {
-                       if (count == 0) {
-                               iprintf("memory:=");
-                       } else if (count == 2) {
-                               printf("\n");
-                               iprintf(" ...");
-                               count = 0;
-                       } else {
-                               printf(",");
-                       }
-                       count++;
-
-                       printf("(off=0x%llX,page=%p)", p->offset, p);
-                       p = (vm_page_t) queue_next(&p->listq);
-               }
-               if (count != 0) {
-                       printf("\n");
-               }
-       }
-       db_indent -= 2;
-}
-
-
-/*
- *     vm_object_find          [ debug ]
- *
- *     Find all tasks which reference the given vm_object.
- */
-
-boolean_t vm_object_find(vm_object_t object);
-boolean_t vm_object_print_verbose = FALSE;
-
-boolean_t
-vm_object_find(
-       vm_object_t     object)
-{
-        task_t task;
-       vm_map_t map;
-       vm_map_entry_t entry;
-       boolean_t found = FALSE;
-
-       queue_iterate(&tasks, task, task_t, tasks) {
-               map = task->map;
-               for (entry = vm_map_first_entry(map);
-                        entry && entry != vm_map_to_entry(map);
-                        entry = entry->vme_next) {
-
-                       vm_object_t obj;
-
-                       /* 
-                        * For the time being skip submaps,
-                        * only the kernel can have submaps,
-                        * and unless we are interested in 
-                        * kernel objects, we can simply skip 
-                        * submaps. See sb/dejan/nmk18b7/src/mach_kernel/vm
-                        * for a full solution.
-                        */
-                       if (entry->is_sub_map)
-                               continue;
-                       if (entry) 
-                               obj = entry->object.vm_object;
-                       else 
-                               continue;
-
-                       while (obj != VM_OBJECT_NULL) {
-                               if (obj == object) {
-                                       if (!found) {
-                                               printf("TASK\t\tMAP\t\tENTRY\n");
-                                               found = TRUE;
-                                       }
-                                       printf("0x%x\t0x%x\t0x%x\n", 
-                                                  task, map, entry);
-                               }
-                               obj = obj->shadow;
-                       }
-               }
-       }
-
-       return(found);
-}
-
-#endif /* MACH_KDB */
-
 kern_return_t
 vm_object_populate_with_private(
                vm_object_t             object,
@@ -5553,23 +5722,27 @@ vm_object_populate_with_private(
        vm_object_offset_t      base_offset;
 
 
-       if(!object->private)
+       if (!object->private)
                return KERN_FAILURE;
 
        base_page = phys_page;
 
        vm_object_lock(object);
-       if(!object->phys_contiguous) {
+
+       if (!object->phys_contiguous) {
                vm_page_t       m;
-               if((base_offset = trunc_page_64(offset)) != offset) {
+
+               if ((base_offset = trunc_page_64(offset)) != offset) {
                        vm_object_unlock(object);
                        return KERN_FAILURE;
                }
                base_offset += object->paging_offset;
-               while(size) {
+
+               while (size) {
                        m = vm_page_lookup(object, base_offset);
-                       if(m != VM_PAGE_NULL) {
-                               if(m->fictitious) {
+
+                       if (m != VM_PAGE_NULL) {
+                               if (m->fictitious) {
                                        if (m->phys_page != vm_page_guard_addr) {
 
                                                vm_page_lockspin_queues();
@@ -5578,16 +5751,16 @@ vm_object_populate_with_private(
 
                                                m->fictitious = FALSE;
                                                m->phys_page = base_page;
-                                               if(!m->busy) {
-                                                       m->busy = TRUE;
-                                               }
-                                               if(!m->absent) {
-                                                       m->absent = TRUE;
-                                               }
-                                               m->list_req_pending = TRUE;
                                        }
                                } else if (m->phys_page != base_page) {
-                                       if (m->pmapped) {
+
+                                       if ( !m->private) {
+                                               /*
+                                                * we'd leak a real page... that can't be right
+                                                */
+                                               panic("vm_object_populate_with_private - %p not private", m);
+                                       }
+                                       if (m->pmapped) {
                                                /*
                                                 * pmap call to clear old mapping
                                                 */
@@ -5595,17 +5768,12 @@ vm_object_populate_with_private(
                                        }
                                        m->phys_page = base_page;
                                }
-
-                               /*
-                                * ENCRYPTED SWAP:
-                                * We're not pointing to the same
-                                * physical page any longer and the
-                                * contents of the new one are not
-                                * supposed to be encrypted.
-                                * XXX What happens to the original
-                                * physical page. Is it lost ?
-                                */
-                               m->encrypted = FALSE;
+                               if (m->encrypted) {
+                                       /*
+                                        * we should never see this on a ficticious or private page
+                                        */
+                                       panic("vm_object_populate_with_private - %p encrypted", m);
+                               }
 
                        } else {
                                while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL)
@@ -5618,9 +5786,8 @@ vm_object_populate_with_private(
                                m->private = TRUE;
                                m->fictitious = FALSE;
                                m->phys_page = base_page;
-                               m->list_req_pending = TRUE;
-                               m->absent = TRUE;
                                m->unusual = TRUE;
+                               m->busy = FALSE;
 
                                vm_page_insert(m, object, base_offset);
                        }
@@ -5637,10 +5804,11 @@ vm_object_populate_with_private(
                
                /* shadows on contiguous memory are not allowed */
                /* we therefore can use the offset field */
-               object->shadow_offset = (vm_object_offset_t)phys_page << PAGE_SHIFT;
-               object->size = size;
+               object->vo_shadow_offset = (vm_object_offset_t)phys_page << PAGE_SHIFT;
+               object->vo_size = size;
        }
        vm_object_unlock(object);
+
        return KERN_SUCCESS;
 }
 
@@ -6198,7 +6366,9 @@ vm_object_purgable_control(
                        purgeable_q_t queue = vm_purgeable_object_remove(object);
                        assert(queue);
 
-                       vm_purgeable_token_delete_first(queue);
+                       if (object->purgeable_when_ripe) {
+                               vm_purgeable_token_delete_last(queue);
+                       }
                        assert(queue->debug_count_objects>=0);
 
                        vm_page_unlock_queues();
@@ -6219,7 +6389,7 @@ vm_object_purgable_control(
                                refmod = pmap_disconnect(p->phys_page);
                                if ((refmod & VM_MEM_MODIFIED) &&
                                    !p->dirty) {
-                                       p->dirty = TRUE;
+                                       SET_PAGE_DIRTY(p, FALSE);
                                }
                        }
                }
@@ -6244,15 +6414,26 @@ vm_object_purgable_control(
                    old_state == VM_PURGABLE_EMPTY) {
                        unsigned int delta;
 
-                       /* try to add token... this can fail */
-                       vm_page_lock_queues();
+                       if ((*state & VM_PURGABLE_NO_AGING_MASK) ==
+                           VM_PURGABLE_NO_AGING) {
+                               object->purgeable_when_ripe = FALSE;
+                       } else {
+                               object->purgeable_when_ripe = TRUE;
+                       }
+                               
+                       if (object->purgeable_when_ripe) {
+                               kern_return_t result;
+
+                               /* try to add token... this can fail */
+                               vm_page_lock_queues();
 
-                       kern_return_t result = vm_purgeable_token_add(queue);
-                       if (result != KERN_SUCCESS) {
-                               vm_page_unlock_queues();
-                               return result;
+                               result = vm_purgeable_token_add(queue);
+                               if (result != KERN_SUCCESS) {
+                                       vm_page_unlock_queues();
+                                       return result;
+                               }
+                               vm_page_unlock_queues();
                        }
-                       vm_page_unlock_queues();
 
                        assert(object->resident_page_count >=
                               object->wired_page_count);
@@ -6274,6 +6455,9 @@ vm_object_purgable_control(
                        assert(object->objq.next == NULL && object->objq.prev == NULL);
                }
                else if (old_state == VM_PURGABLE_VOLATILE) {
+                       purgeable_q_t   old_queue;
+                       boolean_t       purgeable_when_ripe;
+
                        /*
                         * if reassigning priorities / purgeable groups, we don't change the
                         * token queue. So moving priorities will not make pages stay around longer.
@@ -6284,19 +6468,33 @@ vm_object_purgable_control(
                         */
                        assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */
             
-                       purgeable_q_t old_queue=vm_purgeable_object_remove(object);
+                       old_queue = vm_purgeable_object_remove(object);
                        assert(old_queue);
             
-                       if (old_queue != queue) {
+                       if ((*state & VM_PURGABLE_NO_AGING_MASK) ==
+                           VM_PURGABLE_NO_AGING) {
+                               purgeable_when_ripe = FALSE;
+                       } else {
+                               purgeable_when_ripe = TRUE;
+                       }
+                               
+                       if (old_queue != queue ||
+                           (purgeable_when_ripe !=
+                            object->purgeable_when_ripe)) {
                                kern_return_t result;
 
                                /* Changing queue. Have to move token. */
                                vm_page_lock_queues();
-                               vm_purgeable_token_delete_first(old_queue);
-                               result = vm_purgeable_token_add(queue);
+                               if (object->purgeable_when_ripe) {
+                                       vm_purgeable_token_delete_last(old_queue);
+                               }
+                               object->purgeable_when_ripe = purgeable_when_ripe;
+                               if (object->purgeable_when_ripe) {
+                                       result = vm_purgeable_token_add(queue);
+                                       assert(result==KERN_SUCCESS);   /* this should never fail since we just freed a token */
+                               }
                                vm_page_unlock_queues();
 
-                               assert(result==KERN_SUCCESS);   /* this should never fail since we just freed a token */
                        }
                };
                vm_purgeable_object_add(object, queue, (*state&VM_VOLATILE_GROUP_MASK)>>VM_VOLATILE_GROUP_SHIFT );
@@ -6320,7 +6518,7 @@ vm_object_purgable_control(
                                refmod = pmap_disconnect(p->phys_page);
                                if ((refmod & VM_MEM_MODIFIED) &&
                                    !p->dirty) {
-                                       p->dirty = TRUE;
+                                       SET_PAGE_DIRTY(p, FALSE);
                                }
                        }
                }
@@ -6336,9 +6534,11 @@ vm_object_purgable_control(
                                       object->objq.prev != NULL);
                                old_queue = vm_purgeable_object_remove(object);
                                assert(old_queue);
-                               vm_page_lock_queues();
-                               vm_purgeable_token_delete_first(old_queue);
-                               vm_page_unlock_queues();
+                               if (object->purgeable_when_ripe) {
+                                       vm_page_lock_queues();
+                                       vm_purgeable_token_delete_first(old_queue);
+                                       vm_page_unlock_queues();
+                               }
                        }
                        (void) vm_object_purge(object);
                }
@@ -6350,6 +6550,103 @@ vm_object_purgable_control(
        return KERN_SUCCESS;
 }
 
+kern_return_t
+vm_object_get_page_counts(
+       vm_object_t             object,
+       vm_object_offset_t      offset,
+       vm_object_size_t        size,
+       unsigned int            *resident_page_count,
+       unsigned int            *dirty_page_count)
+{
+
+       kern_return_t           kr = KERN_SUCCESS;
+       boolean_t               count_dirty_pages = FALSE;
+       vm_page_t               p = VM_PAGE_NULL;
+       unsigned int            local_resident_count = 0;
+       unsigned int            local_dirty_count = 0;
+       vm_object_offset_t      cur_offset = 0;
+       vm_object_offset_t      end_offset = 0;
+
+       if (object == VM_OBJECT_NULL)
+               return KERN_INVALID_ARGUMENT;
+
+
+       cur_offset = offset;
+       
+       end_offset = offset + size;
+
+       vm_object_lock_assert_exclusive(object);
+
+       if (dirty_page_count != NULL) {
+
+               count_dirty_pages = TRUE;
+       }
+
+       if (resident_page_count != NULL && count_dirty_pages == FALSE) {
+               /*
+                * Fast path when:
+                * - we only want the resident page count, and,
+                * - the entire object is exactly covered by the request.
+                */
+               if (offset == 0 && (object->vo_size == size)) {
+
+                       *resident_page_count = object->resident_page_count;
+                       goto out;
+               }
+       }
+
+       if (object->resident_page_count <= (size >> PAGE_SHIFT)) {
+
+               queue_iterate(&object->memq, p, vm_page_t, listq) {
+               
+                       if (p->offset >= cur_offset && p->offset < end_offset) {
+
+                               local_resident_count++;
+
+                               if (count_dirty_pages) {
+                                       
+                                       if (p->dirty || (p->wpmapped && pmap_is_modified(p->phys_page))) {
+                                               
+                                               local_dirty_count++;
+                                       }
+                               }
+                       }
+               }
+       } else {
+
+               for (cur_offset = offset; cur_offset < end_offset; cur_offset += PAGE_SIZE_64) {
+       
+                       p = vm_page_lookup(object, cur_offset);
+               
+                       if (p != VM_PAGE_NULL) {
+
+                               local_resident_count++;
+
+                               if (count_dirty_pages) {
+                                       
+                                       if (p->dirty || (p->wpmapped && pmap_is_modified(p->phys_page))) {
+                               
+                                               local_dirty_count++;
+                                       }
+                               }
+                       }
+               }
+
+       }
+
+       if (resident_page_count != NULL) {
+               *resident_page_count = local_resident_count;
+       }
+
+       if (dirty_page_count != NULL) {
+               *dirty_page_count = local_dirty_count;
+       }
+
+out:
+       return kr;
+}
+
+
 #if    TASK_SWAPPER
 /*
  * vm_object_res_deallocate
@@ -6580,8 +6877,8 @@ vm_object_transpose(
        vm_object_paging_only_wait(object2, THREAD_UNINT);
 
 
-       if (object1->size != object2->size ||
-           object1->size != transpose_size) {
+       if (object1->vo_size != object2->vo_size ||
+           object1->vo_size != transpose_size) {
                /*
                 * If the 2 objects don't have the same size, we can't
                 * exchange their backing stores or one would overflow.
@@ -6656,7 +6953,7 @@ MACRO_END
 
        /* "Lock" refers to the object not its contents */
        /* "size" should be identical */
-       assert(object1->size == object2->size);
+       assert(object1->vo_size == object2->vo_size);
        /* "memq_hint" was updated above when transposing pages */
        /* "ref_count" refers to the object not its contents */
 #if TASK_SWAPPER
@@ -6671,7 +6968,7 @@ MACRO_END
        /* there should be no "shadow" */
        assert(!object1->shadow);
        assert(!object2->shadow);
-       __TRANSPOSE_FIELD(shadow_offset); /* used by phys_contiguous objects */
+       __TRANSPOSE_FIELD(vo_shadow_offset); /* used by phys_contiguous objects */
        __TRANSPOSE_FIELD(pager);
        __TRANSPOSE_FIELD(paging_offset);
        __TRANSPOSE_FIELD(pager_control);
@@ -6707,7 +7004,7 @@ MACRO_END
        assert(object1->purgable == VM_PURGABLE_DENY);
        assert(object2->purgable == VM_PURGABLE_DENY);
        /* "shadowed" refers to the the object not its contents */
-       __TRANSPOSE_FIELD(silent_overwrite);
+       __TRANSPOSE_FIELD(purgeable_when_ripe);
        __TRANSPOSE_FIELD(advisory_pageout);
        __TRANSPOSE_FIELD(true_share);
        /* "terminating" should not be set */
@@ -6730,6 +7027,7 @@ MACRO_END
        __TRANSPOSE_FIELD(sequential);
        __TRANSPOSE_FIELD(pages_created);
        __TRANSPOSE_FIELD(pages_used);
+       __TRANSPOSE_FIELD(scan_collisions);
 #if MACH_PAGEMAP
        __TRANSPOSE_FIELD(existence_map);
 #endif
@@ -6738,6 +7036,7 @@ MACRO_END
        __TRANSPOSE_FIELD(paging_object);
 #endif
        __TRANSPOSE_FIELD(wimg_bits);
+       __TRANSPOSE_FIELD(set_cache_attr);
        __TRANSPOSE_FIELD(code_signed);
        if (object1->hashed) {
                hash_lck = vm_object_hash_lock_spin(object2->pager);
@@ -6820,15 +7119,10 @@ done:
  *
  */
 extern int speculative_reads_disabled;
-#if CONFIG_EMBEDDED
-unsigned int preheat_pages_max = MAX_UPL_TRANSFER;
-unsigned int preheat_pages_min = 8;
-unsigned int preheat_pages_mult = 4;
-#else
+extern int ignore_is_ssd;
+
 unsigned int preheat_pages_max = MAX_UPL_TRANSFER;
 unsigned int preheat_pages_min = 8;
-unsigned int preheat_pages_mult = 4;
-#endif
 
 uint32_t pre_heat_scaling[MAX_UPL_TRANSFER + 1];
 uint32_t pre_heat_cluster[MAX_UPL_TRANSFER + 1];
@@ -6850,26 +7144,21 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
        vm_behavior_t           behavior;
        boolean_t               look_behind = TRUE;
        boolean_t               look_ahead  = TRUE;
+       boolean_t               isSSD = FALSE;
        uint32_t                throttle_limit;
        int                     sequential_run;
        int                     sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
        unsigned int            max_ph_size;
        unsigned int            min_ph_size;
-       unsigned int            ph_mult;
+       unsigned int            min_ph_size_in_bytes;
 
        assert( !(*length & PAGE_MASK));
        assert( !(*start & PAGE_MASK_64));
 
-       if ( (ph_mult = preheat_pages_mult) < 1 ) 
-               ph_mult = 1;
-       if ( (min_ph_size = preheat_pages_min) < 1 ) 
-               min_ph_size = 1;
-       if ( (max_ph_size = preheat_pages_max) > MAX_UPL_TRANSFER ) 
-               max_ph_size = MAX_UPL_TRANSFER;
-       
-       if ( (max_length = *length) > (max_ph_size * PAGE_SIZE) ) 
-               max_length = (max_ph_size * PAGE_SIZE);
-
+       /*
+        * remember maxiumum length of run requested
+        */
+       max_length = *length;
        /*
         * we'll always return a cluster size of at least
         * 1 page, since the original fault must always
@@ -6878,7 +7167,7 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
        *length = PAGE_SIZE;
        *io_streaming = 0;
 
-       if (speculative_reads_disabled || fault_info == NULL || max_length == 0) {
+       if (speculative_reads_disabled || fault_info == NULL) {
                /*
                 * no cluster... just fault the page in
                 */
@@ -6891,12 +7180,39 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
 
        vm_object_lock(object);
 
+       if (object->pager == MEMORY_OBJECT_NULL)
+               goto out;       /* pager is gone for this object, nothing more to do */
+
+       if (!ignore_is_ssd)
+               vnode_pager_get_isSSD(object->pager, &isSSD);
+
+       min_ph_size = preheat_pages_min;
+       max_ph_size = preheat_pages_max;
+
+       if (isSSD) {
+               min_ph_size /= 2;
+               max_ph_size /= 8;
+       }
+       if (min_ph_size < 1)
+               min_ph_size = 1;
+
+       if (max_ph_size < 1)
+               max_ph_size = 1;
+       else if (max_ph_size > MAX_UPL_TRANSFER)
+               max_ph_size = MAX_UPL_TRANSFER;
+
+       if (max_length > (max_ph_size * PAGE_SIZE)) 
+               max_length = max_ph_size * PAGE_SIZE;
+
+       if (max_length <= PAGE_SIZE)
+               goto out;
+
+       min_ph_size_in_bytes = min_ph_size * PAGE_SIZE;
+
        if (object->internal)
-               object_size = object->size;
-       else if (object->pager != MEMORY_OBJECT_NULL)
-               vnode_pager_get_object_size(object->pager, &object_size);
+               object_size = object->vo_size;
        else
-               goto out;       /* pager is gone for this object, nothing more to do */
+               vnode_pager_get_object_size(object->pager, &object_size);
 
        object_size = round_page_64(object_size);
 
@@ -6924,7 +7240,7 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
                  }
 
        }
-       switch(behavior) {
+       switch (behavior) {
 
        default:
                behavior = VM_BEHAVIOR_DEFAULT;
@@ -6944,25 +7260,25 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
                        *io_streaming = 1;
                } else {
 
-                       if (object->pages_created < 32 * ph_mult) {
+                       if (object->pages_created < (20 * min_ph_size)) {
                                /*
                                 * prime the pump
                                 */
-                               pre_heat_size = PAGE_SIZE * 8 * ph_mult;
-                               break;
+                               pre_heat_size = min_ph_size_in_bytes;
+                       } else {
+                               /*
+                                * Linear growth in PH size: The maximum size is max_length...
+                                * this cacluation will result in a size that is neither a 
+                                * power of 2 nor a multiple of PAGE_SIZE... so round
+                                * it up to the nearest PAGE_SIZE boundary
+                                */
+                               pre_heat_size = (max_length * object->pages_used) / object->pages_created;
+                               
+                               if (pre_heat_size < min_ph_size_in_bytes)
+                                       pre_heat_size = min_ph_size_in_bytes;
+                               else
+                                       pre_heat_size = round_page(pre_heat_size);
                        }
-                       /*
-                        * Linear growth in PH size: The maximum size is max_length...
-                        * this cacluation will result in a size that is neither a 
-                        * power of 2 nor a multiple of PAGE_SIZE... so round
-                        * it up to the nearest PAGE_SIZE boundary
-                        */
-                       pre_heat_size = (ph_mult * (max_length * object->pages_used) / object->pages_created);
-                       
-                       if (pre_heat_size < PAGE_SIZE * min_ph_size)
-                               pre_heat_size = PAGE_SIZE * min_ph_size;
-                       else
-                               pre_heat_size = round_page(pre_heat_size);
                }
                break;
 
@@ -6991,21 +7307,25 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
        throttle_limit = (uint32_t) max_length;
        assert(throttle_limit == max_length);
 
-       if (vnode_pager_check_hard_throttle(object->pager, &throttle_limit, *io_streaming) == KERN_SUCCESS) {
+       if (vnode_pager_get_throttle_io_limit(object->pager, &throttle_limit) == KERN_SUCCESS) {
                if (max_length > throttle_limit)
                        max_length = throttle_limit;
        }
        if (pre_heat_size > max_length)
                pre_heat_size = max_length;
 
-       if (behavior == VM_BEHAVIOR_DEFAULT) {
-               if (vm_page_free_count < vm_page_throttle_limit)
-                       pre_heat_size = trunc_page(pre_heat_size / 8);
-               else if (vm_page_free_count < vm_page_free_target)
-                       pre_heat_size = trunc_page(pre_heat_size / 2);
+       if (behavior == VM_BEHAVIOR_DEFAULT && (pre_heat_size > min_ph_size_in_bytes)) {
 
-               if (pre_heat_size <= PAGE_SIZE)
-                       goto out;
+               unsigned int consider_free = vm_page_free_count + vm_page_cleaned_count;
+               
+               if (consider_free < vm_page_throttle_limit) {
+                       pre_heat_size = trunc_page(pre_heat_size / 16);
+               } else if (consider_free < vm_page_free_target) {
+                       pre_heat_size = trunc_page(pre_heat_size / 4);
+               }
+               
+               if (pre_heat_size < min_ph_size_in_bytes)
+                       pre_heat_size = min_ph_size_in_bytes;
        }
        if (look_ahead == TRUE) {
                if (look_behind == TRUE) { 
@@ -7042,8 +7362,14 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
                assert((vm_size_t)(orig_start - target_start) == (orig_start - target_start));
                tail_size = pre_heat_size - (vm_size_t)(orig_start - target_start) - PAGE_SIZE;
        } else {
-               if (pre_heat_size > target_start)
-                       pre_heat_size = (vm_size_t) target_start; /* XXX: 32-bit vs 64-bit ? Joe ? */
+               if (pre_heat_size > target_start) {
+                       /*
+                        * since pre_heat_size is always smaller then 2^32,
+                        * if it is larger then target_start (a 64 bit value)
+                        * it is safe to clip target_start to 32 bits
+                        */
+                       pre_heat_size = (vm_size_t) target_start;
+               }
                tail_size = 0;
        }
        assert( !(target_start & PAGE_MASK_64));
@@ -7081,7 +7407,11 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
                                 */
                                break;
                        }
-#endif
+#endif /* MACH_PAGEMAP */
+                       if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset)
+                           == VM_EXTERNAL_STATE_ABSENT) {
+                               break;
+                       }
                        if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
                                /*
                                 * don't bridge resident pages
@@ -7113,7 +7443,11 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
                                 */
                                break;
                        }
-#endif
+#endif /* MACH_PAGEMAP */
+                       if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset)
+                           == VM_EXTERNAL_STATE_ABSENT) {
+                               break;
+                       }
                        if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
                                /*
                                 * don't bridge resident pages
@@ -7130,6 +7464,8 @@ out:
        pre_heat_cluster[*length / PAGE_SIZE]++;
 
        vm_object_unlock(object);
+       
+       DTRACE_VM1(clustersize, vm_size_t, *length);
 }
 
 
@@ -7154,7 +7490,7 @@ vm_object_page_op(
                if(object->phys_contiguous) {
                        if (phys_entry) {
                                *phys_entry = (ppnum_t)
-                                       (object->shadow_offset >> PAGE_SHIFT);
+                                       (object->vo_shadow_offset >> PAGE_SHIFT);
                        }
                        vm_object_unlock(object);
                        return KERN_SUCCESS;
@@ -7216,7 +7552,9 @@ vm_object_page_op(
                        /* if such violations occur we will assert sooner */
                        /* or later. */
                        assert(dst_page->busy || (ops & UPL_POP_BUSY));
-                       if (ops & UPL_POP_DIRTY) dst_page->dirty = TRUE;
+                       if (ops & UPL_POP_DIRTY) {
+                               SET_PAGE_DIRTY(dst_page, FALSE);
+                       }
                        if (ops & UPL_POP_PAGEOUT) dst_page->pageout = TRUE;
                        if (ops & UPL_POP_PRECIOUS) dst_page->precious = TRUE;
                        if (ops & UPL_POP_ABSENT) dst_page->absent = TRUE;
@@ -7336,7 +7674,7 @@ vm_object_range_op(
                if (dst_page != VM_PAGE_NULL) {
                        if (ops & UPL_ROP_DUMP) {
                                if (dst_page->busy || dst_page->cleaning) {
-                                       /*
+                                       /*
                                         * someone else is playing with the 
                                         * page, we will have to wait
                                         */
@@ -7349,6 +7687,11 @@ vm_object_range_op(
                                         */
                                        continue;
                                }
+                               if (dst_page->laundry) {
+                                       dst_page->pageout = FALSE;
+                                       
+                                       vm_pageout_steal_laundry(dst_page, FALSE);
+                               }
                                if (dst_page->pmapped == TRUE)
                                        pmap_disconnect(dst_page->phys_page);
 
@@ -7376,6 +7719,46 @@ vm_object_range_op(
        return KERN_SUCCESS;
 }
 
+/*
+ * Used to point a pager directly to a range of memory (when the pager may be associated
+ *   with a non-device vnode).  Takes a virtual address, an offset, and a size.  We currently
+ *   expect that the virtual address will denote the start of a range that is physically contiguous.
+ */
+kern_return_t pager_map_to_phys_contiguous(
+       memory_object_control_t object,
+       memory_object_offset_t  offset,
+       addr64_t                base_vaddr,
+       vm_size_t               size)
+{
+       ppnum_t page_num;
+       boolean_t clobbered_private;
+       kern_return_t retval;
+       vm_object_t pager_object;
+
+       page_num = pmap_find_phys(kernel_pmap, base_vaddr);
+
+       if (!page_num) {
+               retval = KERN_FAILURE;
+               goto out;
+       }
+
+       pager_object = memory_object_control_to_vm_object(object);
+
+       if (!pager_object) {
+               retval = KERN_FAILURE;
+               goto out;
+       }
+
+       clobbered_private = pager_object->private;
+       pager_object->private = TRUE;
+       retval = vm_object_populate_with_private(pager_object, offset, page_num, size);
+
+       if (retval != KERN_SUCCESS)
+               pager_object->private = clobbered_private;
+
+out:
+       return retval;
+}
 
 uint32_t scan_object_collision = 0;
 
@@ -7408,11 +7791,15 @@ _vm_object_lock_try(vm_object_t object)
 boolean_t
 vm_object_lock_try(vm_object_t object)
 {
-       if (vm_object_lock_avoid(object)) {
+       /*
+        * Called from hibernate path so check before blocking.
+        */
+       if (vm_object_lock_avoid(object) && ml_get_interrupts_enabled() && get_preemption_level()==0) {
                mutex_pause(2);
        }
        return _vm_object_lock_try(object);
 }
+
 void
 vm_object_lock_shared(vm_object_t object)
 {
@@ -7430,3 +7817,289 @@ vm_object_lock_try_shared(vm_object_t object)
        }
        return (lck_rw_try_lock_shared(&object->Lock));
 }
+
+
+unsigned int vm_object_change_wimg_mode_count = 0;
+
+/*
+ * The object must be locked
+ */
+void
+vm_object_change_wimg_mode(vm_object_t object, unsigned int wimg_mode)
+{
+       vm_page_t p;
+
+       vm_object_lock_assert_exclusive(object);
+
+       vm_object_paging_wait(object, THREAD_UNINT);
+
+       queue_iterate(&object->memq, p, vm_page_t, listq) {
+
+               if (!p->fictitious)
+                       pmap_set_cache_attributes(p->phys_page, wimg_mode);
+       }
+       if (wimg_mode == VM_WIMG_USE_DEFAULT)
+               object->set_cache_attr = FALSE;
+       else
+               object->set_cache_attr = TRUE;
+
+       object->wimg_bits = wimg_mode;
+
+       vm_object_change_wimg_mode_count++;
+}
+
+#if CONFIG_FREEZE
+
+kern_return_t vm_object_pack(
+       unsigned int    *purgeable_count,
+       unsigned int    *wired_count,
+       unsigned int    *clean_count,
+       unsigned int    *dirty_count,
+       unsigned int    dirty_budget,
+       boolean_t       *shared,
+       vm_object_t     src_object,
+       struct default_freezer_handle *df_handle)
+{
+       kern_return_t   kr = KERN_SUCCESS;
+       
+       vm_object_lock(src_object);
+
+       *purgeable_count = *wired_count = *clean_count = *dirty_count = 0;
+       *shared = FALSE;
+
+       if (!src_object->alive || src_object->terminating){
+               kr = KERN_FAILURE;
+               goto done;
+       }
+
+       if (src_object->purgable == VM_PURGABLE_VOLATILE) {
+               *purgeable_count = src_object->resident_page_count;
+               
+               /* If the default freezer handle is null, we're just walking the pages to discover how many can be hibernated */
+               if (df_handle != NULL) {
+                       purgeable_q_t queue;
+                       /* object should be on a queue */
+                       assert(src_object->objq.next != NULL &&
+                              src_object->objq.prev != NULL);
+                       queue = vm_purgeable_object_remove(src_object);
+                       assert(queue);
+                       if (src_object->purgeable_when_ripe) {
+                               vm_page_lock_queues();
+                               vm_purgeable_token_delete_first(queue);
+                               vm_page_unlock_queues();
+                       }
+                       vm_object_purge(src_object);
+               }
+               goto done;
+       }
+
+       if (src_object->ref_count == 1) {
+               vm_object_pack_pages(wired_count, clean_count, dirty_count, dirty_budget, src_object, df_handle);
+       } else {
+               if (src_object->internal) {
+                       *shared = TRUE;
+               }
+       }
+done:
+       vm_object_unlock(src_object);
+       
+       return kr;
+}
+
+
+void
+vm_object_pack_pages(
+       unsigned int            *wired_count,
+       unsigned int            *clean_count,
+       unsigned int            *dirty_count,
+       unsigned int            dirty_budget,
+       vm_object_t             src_object,
+       struct default_freezer_handle *df_handle)
+{
+       vm_page_t p, next;
+
+       next = (vm_page_t)queue_first(&src_object->memq);
+
+       while (!queue_end(&src_object->memq, (queue_entry_t)next)) {
+               p = next;
+               next = (vm_page_t)queue_next(&next->listq);
+               
+               /* Finish up if we've hit our pageout limit */
+               if (dirty_budget && (dirty_budget == *dirty_count)) {
+                       break;
+               }
+               assert(!p->laundry);
+
+               if (p->fictitious || p->busy ) 
+                       continue;
+               
+               if (p->absent || p->unusual || p->error)
+                       continue;
+               
+               if (VM_PAGE_WIRED(p)) {
+                       (*wired_count)++;
+                       continue;
+               }
+               
+               if (df_handle == NULL) {
+                       if (p->dirty || pmap_is_modified(p->phys_page)) {
+                               (*dirty_count)++;
+                       } else {
+                               (*clean_count)++;                               
+                       }
+                       continue;
+               }
+               
+               if (p->cleaning) {
+                       p->pageout = TRUE;
+                       continue;
+               }
+
+               if (p->pmapped == TRUE) {
+                       int refmod_state;
+                       refmod_state = pmap_disconnect(p->phys_page);
+                       if (refmod_state & VM_MEM_MODIFIED) {
+                               SET_PAGE_DIRTY(p, FALSE);
+                       }
+               }
+               
+               if (p->dirty) {
+                       default_freezer_pack_page(p, df_handle);        
+                       (*dirty_count)++;
+               }
+               else {
+                       VM_PAGE_FREE(p);
+                       (*clean_count)++;
+               }
+       }
+}
+
+void
+vm_object_pageout(
+       vm_object_t object)
+{
+       vm_page_t                       p, next;
+       struct  vm_pageout_queue        *iq;
+
+       iq = &vm_pageout_queue_internal;
+       
+       assert(object != VM_OBJECT_NULL );
+       
+       vm_object_lock(object);
+
+       if (DEFAULT_PAGER_IS_ACTIVE || DEFAULT_FREEZER_IS_ACTIVE) {
+               if (!object->pager_initialized) {
+                       /*
+                       *   If there is no memory object for the page, create
+                       *   one and hand it to the default pager.
+                       */
+                       vm_object_pager_create(object);
+               }
+       }
+
+ReScan:        
+       next = (vm_page_t)queue_first(&object->memq);
+
+       while (!queue_end(&object->memq, (queue_entry_t)next)) {
+               p = next;
+               next = (vm_page_t)queue_next(&next->listq);
+               
+               /* Throw to the pageout queue */
+               vm_page_lockspin_queues();
+
+               /*
+                * see if page is already in the process of
+                * being cleaned... if so, leave it alone
+                */
+               if (!p->laundry) {
+
+                       if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
+
+                               if (VM_PAGE_Q_THROTTLED(iq)) {
+                                       
+                                       iq->pgo_draining = TRUE;
+                                       
+                                       assert_wait((event_t) (&iq->pgo_laundry + 1), THREAD_INTERRUPTIBLE);
+                                       vm_page_unlock_queues();
+                                       vm_object_unlock(object);
+                                       
+                                       thread_block(THREAD_CONTINUE_NULL);
+
+                                       vm_object_lock(object);
+                                       goto ReScan;
+                               }
+
+                               if (p->fictitious || p->busy ) {
+                                       vm_page_unlock_queues();
+                                       continue;
+                               }
+                               
+                               if (p->absent || p->unusual || p->error || VM_PAGE_WIRED(p)) {
+                                       vm_page_unlock_queues();
+                                       continue;
+                               }
+                               
+                               if (p->cleaning) {
+                                       p->pageout = TRUE;
+                                       vm_page_unlock_queues();
+                                       continue;
+                               }
+
+                               if (p->pmapped == TRUE) {
+                                       int refmod_state;
+                                       refmod_state = pmap_disconnect_options(p->phys_page, PMAP_OPTIONS_COMPRESSOR, NULL);
+                                       if (refmod_state & VM_MEM_MODIFIED) {
+                                               SET_PAGE_DIRTY(p, FALSE);
+                                       }
+                               }
+                               
+                               if (p->dirty == FALSE) {
+                                       vm_page_unlock_queues();
+                                       VM_PAGE_FREE(p);
+                                       continue;
+                               }
+                       }
+
+                       VM_PAGE_QUEUES_REMOVE(p);
+                       vm_pageout_cluster(p, TRUE);
+               }
+               vm_page_unlock_queues();
+       }
+
+       vm_object_unlock(object);
+}
+
+kern_return_t
+vm_object_pagein(
+       vm_object_t object)
+{
+       memory_object_t pager;
+       kern_return_t   kr;
+
+       vm_object_lock(object);
+
+       pager = object->pager;
+
+       if (!object->pager_ready || pager == MEMORY_OBJECT_NULL) {
+               vm_object_unlock(object);
+               return KERN_FAILURE;
+       }
+       
+       vm_object_paging_wait(object, THREAD_UNINT);
+       vm_object_paging_begin(object);
+
+       object->blocked_access = TRUE;
+       vm_object_unlock(object);
+       
+       kr = memory_object_data_reclaim(pager, TRUE);
+
+       vm_object_lock(object);
+
+       object->blocked_access = FALSE;
+       vm_object_paging_end(object);
+
+       vm_object_unlock(object);
+       
+       return kr;
+}
+#endif /* CONFIG_FREEZE */