]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_resident.c
xnu-2782.30.5.tar.gz
[apple/xnu.git] / osfmk / vm / vm_resident.c
index 9552295d8867da7d949e04b96b15894becccd480..9d81f507048eaf5d07f07b0a39adc7c19e42a4e1 100644 (file)
@@ -76,6 +76,7 @@
 #include <kern/kalloc.h>
 #include <kern/zalloc.h>
 #include <kern/xpr.h>
+#include <kern/ledger.h>
 #include <vm/pmap.h>
 #include <vm/vm_init.h>
 #include <vm/vm_map.h>
 #include <kern/misc_protos.h>
 #include <zone_debug.h>
 #include <vm/cpm.h>
-#include <ppc/mappings.h>              /* (BRINGUP) */
-#include <pexpert/pexpert.h>   /* (BRINGUP) */
+#include <pexpert/pexpert.h>
 
 #include <vm/vm_protos.h>
 #include <vm/memory_object.h>
 #include <vm/vm_purgeable_internal.h>
+#include <vm/vm_compressor.h>
 
-#include <IOKit/IOHibernatePrivate.h>
-
-
-#if CONFIG_EMBEDDED
-#include <sys/kern_memorystatus.h>
+#if CONFIG_PHANTOM_CACHE
+#include <vm/vm_phantom_cache.h>
 #endif
 
+#include <IOKit/IOHibernatePrivate.h>
+
 #include <sys/kdebug.h>
 
+boolean_t      hibernate_cleaning_in_progress = FALSE;
 boolean_t      vm_page_free_verify = TRUE;
 
-int                            speculative_age_index = 0;
-int                            speculative_steal_index = 0;
+uint32_t       vm_lopage_free_count = 0;
+uint32_t       vm_lopage_free_limit = 0;
+uint32_t       vm_lopage_lowater    = 0;
+boolean_t      vm_lopage_refill = FALSE;
+boolean_t      vm_lopage_needed = FALSE;
+
 lck_mtx_ext_t  vm_page_queue_lock_ext;
 lck_mtx_ext_t  vm_page_queue_free_lock_ext;
 lck_mtx_ext_t  vm_purgeable_queue_lock_ext;
 
+int            speculative_age_index = 0;
+int            speculative_steal_index = 0;
 struct vm_speculative_age_q vm_page_queue_speculative[VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1];
 
 
 __private_extern__ void                vm_page_init_lck_grp(void);
 
-static void                    vm_page_free_prepare(vm_page_t  page);
+static void            vm_page_free_prepare(vm_page_t  page);
+static vm_page_t       vm_page_grab_fictitious_common(ppnum_t phys_addr);
+
 
 
 
@@ -131,7 +140,7 @@ static void                 vm_page_free_prepare(vm_page_t  page);
 
 vm_offset_t virtual_space_start;
 vm_offset_t virtual_space_end;
-int    vm_page_pages;
+uint32_t       vm_page_pages;
 
 /*
  *     The vm_page_lookup() routine, which provides for fast
@@ -142,7 +151,7 @@ int vm_page_pages;
  *     or VP, table.]
  */
 typedef struct {
-       vm_page_t       pages;
+       vm_page_packed_t page_list;
 #if    MACH_PAGE_HASH_STATS
        int             cur_count;              /* current count */
        int             hi_count;               /* high water mark */
@@ -161,6 +170,13 @@ unsigned int       vm_page_bucket_lock_count = 0;          /* How big is array of locks? */
 
 lck_spin_t     *vm_page_bucket_locks;
 
+#if VM_PAGE_BUCKETS_CHECK
+boolean_t vm_page_buckets_check_ready = FALSE;
+#if VM_PAGE_FAKE_BUCKETS
+vm_page_bucket_t *vm_page_fake_buckets;        /* decoy buckets */
+vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
 
 #if    MACH_PAGE_HASH_STATS
 /* This routine is only for debug.  It is intended to be called by
@@ -224,6 +240,7 @@ struct vm_page      vm_page_template;
 
 vm_page_t      vm_pages = VM_PAGE_NULL;
 unsigned int   vm_pages_count = 0;
+ppnum_t                vm_page_lowest = 0;
 
 /*
  *     Resident pages that represent real memory
@@ -233,15 +250,13 @@ unsigned int      vm_pages_count = 0;
 unsigned int   vm_colors;
 unsigned int    vm_color_mask;                 /* mask is == (vm_colors-1) */
 unsigned int   vm_cache_geometry_colors = 0;   /* set by hw dependent code during startup */
+unsigned int   vm_free_magazine_refill_limit = 0;
 queue_head_t   vm_page_queue_free[MAX_COLORS];
-vm_page_t       vm_page_queue_fictitious;
 unsigned int   vm_page_free_wanted;
 unsigned int   vm_page_free_wanted_privileged;
 unsigned int   vm_page_free_count;
 unsigned int   vm_page_fictitious_count;
 
-unsigned int   vm_page_free_count_minimum;     /* debugging */
-
 /*
  *     Occasionally, the virtual memory system uses
  *     resident page structures that do not refer to
@@ -254,6 +269,8 @@ unsigned int        vm_page_free_count_minimum;     /* debugging */
 zone_t vm_page_zone;
 vm_locks_array_t vm_page_locks;
 decl_lck_mtx_data(,vm_page_alloc_lock)
+lck_mtx_ext_t vm_page_alloc_lock_ext;
+
 unsigned int io_throttle_zero_fill;
 
 unsigned int   vm_page_local_q_count = 0;
@@ -261,6 +278,9 @@ unsigned int        vm_page_local_q_soft_limit = 250;
 unsigned int   vm_page_local_q_hard_limit = 500;
 struct vplq     *vm_page_local_q = NULL;
 
+/* N.B. Guard and fictitious pages must not
+ * be assigned a zero phys_page value.
+ */
 /*
  *     Fictitious pages don't have a physical address,
  *     but we must initialize phys_page to something.
@@ -285,38 +305,50 @@ ppnum_t vm_page_guard_addr = (ppnum_t) -2;
  *     system (pageout daemon).  These queues are
  *     defined here, but are shared by the pageout
  *     module.  The inactive queue is broken into 
- *     inactive and zf for convenience as the 
+ *     file backed and anonymous for convenience as the 
  *     pageout daemon often assignes a higher 
- *     affinity to zf pages
+ *     importance to anonymous pages (less likely to pick)
  */
 queue_head_t   vm_page_queue_active;
 queue_head_t   vm_page_queue_inactive;
-queue_head_t   vm_page_queue_zf;       /* inactive memory queue for zero fill */
+queue_head_t   vm_page_queue_anonymous;        /* inactive memory queue for anonymous pages */
 queue_head_t   vm_page_queue_throttled;
 
 unsigned int   vm_page_active_count;
 unsigned int   vm_page_inactive_count;
+unsigned int   vm_page_anonymous_count;
 unsigned int   vm_page_throttled_count;
 unsigned int   vm_page_speculative_count;
 unsigned int   vm_page_wire_count;
+unsigned int   vm_page_wire_count_initial;
 unsigned int   vm_page_gobble_count = 0;
-unsigned int   vm_page_wire_count_warning = 0;
-unsigned int   vm_page_gobble_count_warning = 0;
+
+#define        VM_PAGE_WIRE_COUNT_WARNING      0
+#define VM_PAGE_GOBBLE_COUNT_WARNING   0
 
 unsigned int   vm_page_purgeable_count = 0; /* # of pages purgeable now */
 unsigned int   vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */
 uint64_t       vm_page_purged_count = 0;    /* total count of purged pages */
 
+unsigned int   vm_page_xpmapped_external_count = 0;
+unsigned int   vm_page_external_count = 0;
+unsigned int   vm_page_internal_count = 0;
+unsigned int   vm_page_pageable_external_count = 0;
+unsigned int   vm_page_pageable_internal_count = 0;
+
 #if DEVELOPMENT || DEBUG
 unsigned int   vm_page_speculative_recreated = 0;
 unsigned int   vm_page_speculative_created = 0;
 unsigned int   vm_page_speculative_used = 0;
 #endif
 
-ppnum_t                vm_lopage_poolstart = 0;
-ppnum_t                vm_lopage_poolend = 0;
-int            vm_lopage_poolsize = 0;
+queue_head_t    vm_page_queue_cleaned;
+
+unsigned int   vm_page_cleaned_count = 0;
+unsigned int   vm_pageout_enqueued_cleaned = 0;
+
 uint64_t       max_valid_dma_address = 0xffffffffffffffffULL;
+ppnum_t                max_valid_low_ppnum = 0xffffffff;
 
 
 /*
@@ -328,12 +360,13 @@ uint64_t  max_valid_dma_address = 0xffffffffffffffffULL;
 unsigned int   vm_page_free_target = 0;
 unsigned int   vm_page_free_min = 0;
 unsigned int   vm_page_throttle_limit = 0;
-uint32_t       vm_page_creation_throttle = 0;
 unsigned int   vm_page_inactive_target = 0;
+unsigned int   vm_page_anonymous_min = 0;
 unsigned int   vm_page_inactive_min = 0;
 unsigned int   vm_page_free_reserved = 0;
 unsigned int   vm_page_throttle_count = 0;
 
+
 /*
  *     The VM system has a couple of heuristics for deciding
  *     that pages are "uninteresting" and should be placed
@@ -358,7 +391,9 @@ struct vm_page_stats_reusable vm_page_stats_reusable;
 void
 vm_set_page_size(void)
 {
-       page_mask = page_size - 1;
+       page_size  = PAGE_SIZE;
+       page_mask  = PAGE_MASK;
+       page_shift = PAGE_SHIFT;
 
        if ((page_mask & page_size) != 0)
                panic("vm_set_page_size: page size not a power of two");
@@ -368,6 +403,8 @@ vm_set_page_size(void)
                        break;
 }
 
+#define COLOR_GROUPS_TO_STEAL  4
+
 
 /* Called once during statup, once the cache geometry is known.
  */
@@ -393,6 +430,8 @@ vm_page_set_colors( void )
        
        vm_colors = n;
        vm_color_mask = n - 1;
+
+       vm_free_magazine_refill_limit = vm_colors * COLOR_GROUPS_TO_STEAL;
 }
 
 
@@ -420,6 +459,9 @@ vm_page_init_lck_grp(void)
        lck_grp_init(&vm_page_lck_grp_alloc, "vm_page_alloc", &vm_page_lck_grp_attr);
        lck_grp_init(&vm_page_lck_grp_bucket, "vm_page_bucket", &vm_page_lck_grp_attr);
        lck_attr_setdefault(&vm_page_lck_attr);
+       lck_mtx_init_ext(&vm_page_alloc_lock, &vm_page_alloc_lock_ext, &vm_page_lck_grp_alloc, &vm_page_lck_attr);
+
+       vm_compressor_init_locks();
 }
 
 void
@@ -444,6 +486,8 @@ vm_page_init_local_q()
                        VPL_LOCK_INIT(lq, &vm_page_lck_grp_local, &vm_page_lck_attr);
                        queue_init(&lq->vpl_queue);
                        lq->vpl_count = 0;
+                       lq->vpl_internal_count = 0;
+                       lq->vpl_external_count = 0;
                }
                vm_page_local_q_count = num_cpus;
 
@@ -485,7 +529,7 @@ vm_page_bootstrap(
        m->pageq.prev = NULL;
        m->listq.next = NULL;
        m->listq.prev = NULL;
-       m->next = VM_PAGE_NULL;
+       m->next_m = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
 
        m->object = VM_OBJECT_NULL;             /* reset later */
        m->offset = (vm_object_offset_t) -1;    /* reset later */
@@ -509,6 +553,7 @@ vm_page_bootstrap(
        m->busy = TRUE;
        m->wanted = FALSE;
        m->tabled = FALSE;
+       m->hashed = FALSE;
        m->fictitious = FALSE;
        m->pmapped = FALSE;
        m->wpmapped = FALSE;
@@ -524,16 +569,16 @@ vm_page_bootstrap(
        m->unusual = FALSE;
        m->encrypted = FALSE;
        m->encrypted_cleaning = FALSE;
-       m->list_req_pending = FALSE;
-       m->dump_cleaning = FALSE;
        m->cs_validated = FALSE;
        m->cs_tainted = FALSE;
        m->no_cache = FALSE;
-       m->zero_fill = FALSE;
        m->reusable = FALSE;
+       m->slid = FALSE;
+       m->xpmapped = FALSE;
+       m->compressor = FALSE;
+       m->written_by_kernel = FALSE;
        m->__unused_object_bits = 0;
 
-
        /*
         *      Initialize the page queues.
         */
@@ -558,15 +603,18 @@ vm_page_bootstrap(
                purgeable_queues[i].debug_count_objects = 0;
 #endif
        };
+       purgeable_nonvolatile_count = 0;
+       queue_init(&purgeable_nonvolatile_queue);
     
        for (i = 0; i < MAX_COLORS; i++ )
                queue_init(&vm_page_queue_free[i]);
+
        queue_init(&vm_lopage_queue_free);
-       vm_page_queue_fictitious = VM_PAGE_NULL;
        queue_init(&vm_page_queue_active);
        queue_init(&vm_page_queue_inactive);
+       queue_init(&vm_page_queue_cleaned);
        queue_init(&vm_page_queue_throttled);
-       queue_init(&vm_page_queue_zf);
+       queue_init(&vm_page_queue_anonymous);
 
        for ( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ ) {
                queue_init(&vm_page_queue_speculative[i].age_q);
@@ -583,9 +631,10 @@ vm_page_bootstrap(
        /*
         *      Steal memory for the map and zone subsystems.
         */
-
-       vm_map_steal_memory();
+       kernel_debug_string("zone_steal_memory");
        zone_steal_memory();
+       kernel_debug_string("vm_map_steal_memory");
+       vm_map_steal_memory();
 
        /*
         *      Allocate (and initialize) the virtual-to-physical
@@ -630,10 +679,36 @@ vm_page_bootstrap(
        if (vm_page_hash_mask & vm_page_bucket_count)
                printf("vm_page_bootstrap: WARNING -- strange page hash\n");
 
+#if VM_PAGE_BUCKETS_CHECK
+#if VM_PAGE_FAKE_BUCKETS
+       /*
+        * Allocate a decoy set of page buckets, to detect
+        * any stomping there.
+        */
+       vm_page_fake_buckets = (vm_page_bucket_t *)
+               pmap_steal_memory(vm_page_bucket_count *
+                                 sizeof(vm_page_bucket_t));
+       vm_page_fake_buckets_start = (vm_map_offset_t) vm_page_fake_buckets;
+       vm_page_fake_buckets_end =
+               vm_map_round_page((vm_page_fake_buckets_start +
+                                  (vm_page_bucket_count *
+                                   sizeof (vm_page_bucket_t))),
+                                 PAGE_MASK);
+       char *cp;
+       for (cp = (char *)vm_page_fake_buckets_start;
+            cp < (char *)vm_page_fake_buckets_end;
+            cp++) {
+               *cp = 0x5a;
+       }
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
+       kernel_debug_string("vm_page_buckets");
        vm_page_buckets = (vm_page_bucket_t *)
                pmap_steal_memory(vm_page_bucket_count *
                                  sizeof(vm_page_bucket_t));
 
+       kernel_debug_string("vm_page_bucket_locks");
        vm_page_bucket_locks = (lck_spin_t *)
                pmap_steal_memory(vm_page_bucket_lock_count *
                                  sizeof(lck_spin_t));
@@ -641,7 +716,7 @@ vm_page_bootstrap(
        for (i = 0; i < vm_page_bucket_count; i++) {
                register vm_page_bucket_t *bucket = &vm_page_buckets[i];
 
-               bucket->pages = VM_PAGE_NULL;
+               bucket->page_list = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
 #if     MACH_PAGE_HASH_STATS
                bucket->cur_count = 0;
                bucket->hi_count = 0;
@@ -651,6 +726,10 @@ vm_page_bootstrap(
        for (i = 0; i < vm_page_bucket_lock_count; i++)
                lck_spin_init(&vm_page_bucket_locks[i], &vm_page_lck_grp_bucket, &vm_page_lck_attr);
 
+#if VM_PAGE_BUCKETS_CHECK
+       vm_page_buckets_check_ready = TRUE;
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
        /*
         *      Machine-dependent code allocates the resident page table.
         *      It uses vm_page_init to initialize the page frames.
@@ -659,6 +738,7 @@ vm_page_bootstrap(
         *      to get the alignment right.
         */
 
+       kernel_debug_string("pmap_startup");
        pmap_startup(&virtual_space_start, &virtual_space_end);
        virtual_space_start = round_page(virtual_space_start);
        virtual_space_end = trunc_page(virtual_space_end);
@@ -674,12 +754,13 @@ vm_page_bootstrap(
         *      all VM managed pages are "free", courtesy of pmap_startup.
         */
        assert((unsigned int) atop_64(max_mem) == atop_64(max_mem));
-       vm_page_wire_count = ((unsigned int) atop_64(max_mem)) - vm_page_free_count;    /* initial value */
-       vm_page_free_count_minimum = vm_page_free_count;
+       vm_page_wire_count = ((unsigned int) atop_64(max_mem)) - vm_page_free_count - vm_lopage_free_count;     /* initial value */
+       vm_page_wire_count_initial = vm_page_wire_count;
 
        printf("vm_page_bootstrap: %d free pages and %d wired pages\n",
               vm_page_free_count, vm_page_wire_count);
 
+       kernel_debug_string("vm_page_bootstrap complete");
        simple_lock_init(&vm_paging_lock, 0);
 }
 
@@ -726,7 +807,7 @@ pmap_steal_memory(
        addr = virtual_space_start;
        virtual_space_start += size;
 
-       kprintf("pmap_steal_memory: %08lX - %08lX; size=%08lX\n", (long)addr, (long)virtual_space_start, (long)size);   /* (TEST/DEBUG) */
+       //kprintf("pmap_steal_memory: %08lX - %08lX; size=%08lX\n", (long)addr, (long)virtual_space_start, (long)size); /* (TEST/DEBUG) */
 
        /*
         *      Allocate and map physical pages to back new virtual pages.
@@ -735,12 +816,8 @@ pmap_steal_memory(
        for (vaddr = round_page(addr);
             vaddr < addr + size;
             vaddr += PAGE_SIZE) {
-#if defined(__LP64__)
-               if (!pmap_next_page_k64(&phys_page))
-#else
-               if (!pmap_next_page(&phys_page))
-#endif
 
+               if (!pmap_next_page_hi(&phys_page))
                        panic("pmap_steal_memory");
 
                /*
@@ -752,7 +829,7 @@ pmap_steal_memory(
 #endif
 
                pmap_enter(kernel_pmap, vaddr, phys_page,
-                          VM_PROT_READ|VM_PROT_WRITE, 
+                          VM_PROT_READ|VM_PROT_WRITE, VM_PROT_NONE,
                                VM_WIMG_USE_DEFAULT, FALSE);
                /*
                 * Account for newly stolen memory
@@ -764,6 +841,7 @@ pmap_steal_memory(
        return (void *) addr;
 }
 
+void vm_page_release_startup(vm_page_t mem);
 void
 pmap_startup(
        vm_offset_t *startp,
@@ -772,8 +850,22 @@ pmap_startup(
        unsigned int i, npages, pages_initialized, fill, fillval;
        ppnum_t         phys_page;
        addr64_t        tmpaddr;
-       unsigned int    num_of_lopages = 0;
-       unsigned int    last_index;
+
+
+#if    defined(__LP64__)
+       /*
+        * struct vm_page must be of size 64 due to VM_PAGE_PACK_PTR use
+        */
+       assert(sizeof(struct vm_page) == 64);
+
+       /*
+        * make sure we are aligned on a 64 byte boundary
+        * for VM_PAGE_PACK_PTR (it clips off the low-order
+        * 6 bits of the pointer)
+        */
+       if (virtual_space_start != virtual_space_end)
+               virtual_space_start = round_page(virtual_space_start);
+#endif
 
        /*
         *      We calculate how many page frames we will have
@@ -789,64 +881,51 @@ pmap_startup(
        /*
         *      Initialize the page frames.
         */
+       kernel_debug_string("Initialize the page frames");
        for (i = 0, pages_initialized = 0; i < npages; i++) {
                if (!pmap_next_page(&phys_page))
                        break;
+               if (pages_initialized == 0 || phys_page < vm_page_lowest)
+                       vm_page_lowest = phys_page;
 
-               vm_page_init(&vm_pages[i], phys_page);
+               vm_page_init(&vm_pages[i], phys_page, FALSE);
                vm_page_pages++;
                pages_initialized++;
        }
        vm_pages_count = pages_initialized;
 
+#if    defined(__LP64__)
+
+       if (VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[0])) != &vm_pages[0])
+               panic("VM_PAGE_PACK_PTR failed on &vm_pages[0] - %p", (void *)&vm_pages[0]);
+
+       if (VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[vm_pages_count-1])) != &vm_pages[vm_pages_count-1])
+               panic("VM_PAGE_PACK_PTR failed on &vm_pages[vm_pages_count-1] - %p", (void *)&vm_pages[vm_pages_count-1]);
+#endif
+       kernel_debug_string("page fill/release");
        /*
         * Check if we want to initialize pages to a known value
         */
        fill = 0;                                                               /* Assume no fill */
        if (PE_parse_boot_argn("fill", &fillval, sizeof (fillval))) fill = 1;                   /* Set fill */
-       
-
-       /*
-        * if vm_lopage_poolsize is non-zero, than we need to reserve
-        * a pool of pages whose addresess are less than 4G... this pool
-        * is used by drivers whose hardware can't DMA beyond 32 bits...
-        *
-        * note that I'm assuming that the page list is ascending and
-        * ordered w/r to the physical address
+#if    DEBUG
+       /* This slows down booting the DEBUG kernel, particularly on
+        * large memory systems, but is worthwhile in deterministically
+        * trapping uninitialized memory usage.
         */
-       for (i = 0, num_of_lopages = vm_lopage_poolsize; num_of_lopages && i < pages_initialized; num_of_lopages--, i++) {
-               vm_page_t m;
-
-               m = &vm_pages[i];
-
-               if (m->phys_page >= (1 << (32 - PAGE_SHIFT)))
-                       panic("couldn't reserve the lopage pool: not enough lo pages\n");
-
-               if (m->phys_page < vm_lopage_poolend)
-                       panic("couldn't reserve the lopage pool: page list out of order\n");
-
-               vm_lopage_poolend = m->phys_page;
-
-               if (vm_lopage_poolstart == 0)
-                       vm_lopage_poolstart = m->phys_page;
-               else {
-                       if (m->phys_page < vm_lopage_poolstart)
-                               panic("couldn't reserve the lopage pool: page list out of order\n");
-               }
-
-               if (fill)
-                       fillPage(m->phys_page, fillval);                /* Fill the page with a know value if requested at boot */                      
-
-               vm_page_release(m);
-       } 
-       last_index = i;
-
+       if (fill == 0) {
+               fill = 1;
+               fillval = 0xDEB8F177;
+       }
+#endif
+       if (fill)
+               kprintf("Filling vm_pages with pattern: 0x%x\n", fillval);
        // -debug code remove
        if (2 == vm_himemory_mode) {
                // free low -> high so high is preferred
-               for (i = last_index + 1; i <= pages_initialized; i++) {
+               for (i = 1; i <= pages_initialized; i++) {
                        if(fill) fillPage(vm_pages[i - 1].phys_page, fillval);          /* Fill the page with a know value if requested at boot */                      
-                       vm_page_release(&vm_pages[i - 1]);
+                       vm_page_release_startup(&vm_pages[i - 1]);
                }
        }
        else
@@ -858,11 +937,13 @@ pmap_startup(
         * the devices (which must address physical memory) happy if
         * they require several consecutive pages.
         */
-       for (i = pages_initialized; i > last_index; i--) {
+       for (i = pages_initialized; i > 0; i--) {
                if(fill) fillPage(vm_pages[i - 1].phys_page, fillval);          /* Fill the page with a know value if requested at boot */                      
-               vm_page_release(&vm_pages[i - 1]);
+               vm_page_release_startup(&vm_pages[i - 1]);
        }
 
+       VM_CHECK_MEMORYSTATUS;
+       
 #if 0
        {
                vm_page_t xx, xxo, xxl;
@@ -933,18 +1014,18 @@ vm_page_module_init(void)
        zone_debug_disable(vm_page_zone);
 #endif /* ZONE_DEBUG */
 
+       zone_change(vm_page_zone, Z_CALLERACCT, FALSE);
        zone_change(vm_page_zone, Z_EXPAND, FALSE);
        zone_change(vm_page_zone, Z_EXHAUST, TRUE);
        zone_change(vm_page_zone, Z_FOREIGN, TRUE);
-
+       zone_change(vm_page_zone, Z_GZALLOC_EXEMPT, TRUE);
         /*
          * Adjust zone statistics to account for the real pages allocated
          * in vm_page_create(). [Q: is this really what we want?]
          */
         vm_page_zone->count += vm_page_pages;
+        vm_page_zone->sum_count += vm_page_pages;
         vm_page_zone->cur_size += vm_page_pages * vm_page_zone->elem_size;
-
-       lck_mtx_init(&vm_page_alloc_lock, &vm_page_lck_grp_alloc, &vm_page_lck_attr);
 }
 
 /*
@@ -967,11 +1048,13 @@ vm_page_create(
        for (phys_page = start;
             phys_page < end;
             phys_page++) {
-               while ((m = (vm_page_t) vm_page_grab_fictitious())
+               while ((m = (vm_page_t) vm_page_grab_fictitious_common(phys_page))
                        == VM_PAGE_NULL)
                        vm_page_more_fictitious();
 
-               vm_page_init(m, phys_page);
+               m->fictitious = FALSE;
+               pmap_clear_noencrypt(phys_page);
+
                vm_page_pages++;
                vm_page_release(m);
        }
@@ -1003,7 +1086,7 @@ vm_page_insert(
        vm_object_t             object,
        vm_object_offset_t      offset)
 {
-       vm_page_insert_internal(mem, object, offset, FALSE, TRUE);
+       vm_page_insert_internal(mem, object, offset, FALSE, TRUE, FALSE);
 }
 
 void
@@ -1012,22 +1095,29 @@ vm_page_insert_internal(
        vm_object_t             object,
        vm_object_offset_t      offset,
        boolean_t               queues_lock_held,
-       boolean_t               insert_in_hash)
+       boolean_t               insert_in_hash,
+       boolean_t               batch_pmap_op)
 {
-       vm_page_bucket_t *bucket;
-       lck_spin_t      *bucket_lock;
-       int     hash_id;
+       vm_page_bucket_t        *bucket;
+       lck_spin_t              *bucket_lock;
+       int                     hash_id;
+       task_t                  owner;
 
         XPR(XPR_VM_PAGE,
                 "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n",
                 object, offset, mem, 0,0);
-
+#if 0
+       /*
+        * we may not hold the page queue lock
+        * so this check isn't safe to make
+        */
        VM_PAGE_CHECK(mem);
+#endif
 
-       if (object == vm_submap_object) {
-               /* the vm_submap_object is only a placeholder for submaps */
-               panic("vm_page_insert(vm_submap_object,0x%llx)\n", offset);
-       }
+       assert(page_aligned(offset));
+
+       /* the vm_submap_object is only a placeholder for submaps */
+       assert(object != vm_submap_object);
 
        vm_object_lock_assert_exclusive(object);
 #if DEBUG
@@ -1037,13 +1127,13 @@ vm_page_insert_internal(
 #endif /* DEBUG */
        
        if (insert_in_hash == TRUE) {
-#if DEBUG
+#if DEBUG || VM_PAGE_CHECK_BUCKETS
                if (mem->tabled || mem->object != VM_OBJECT_NULL)
                        panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
                              "already in (obj=%p,off=0x%llx)",
                              mem, object, offset, mem->object, mem->offset);
 #endif
-               assert(!object->internal || offset < object->size);
+               assert(!object->internal || offset < object->vo_size);
 
                /* only insert "pageout" pages into "pageout" objects,
                 * and normal pages into normal objects */
@@ -1067,19 +1157,30 @@ vm_page_insert_internal(
        
                lck_spin_lock(bucket_lock);
 
-               mem->next = bucket->pages;
-               bucket->pages = mem;
+               mem->next_m = bucket->page_list;
+               bucket->page_list = VM_PAGE_PACK_PTR(mem);
+               assert(mem == VM_PAGE_UNPACK_PTR(bucket->page_list));
+
 #if     MACH_PAGE_HASH_STATS
                if (++bucket->cur_count > bucket->hi_count)
                        bucket->hi_count = bucket->cur_count;
 #endif /* MACH_PAGE_HASH_STATS */
-
+               mem->hashed = TRUE;
                lck_spin_unlock(bucket_lock);
        }
+
+       {       
+               unsigned int    cache_attr;
+
+               cache_attr = object->wimg_bits & VM_WIMG_MASK;
+
+               if (cache_attr != VM_WIMG_USE_DEFAULT) {
+                       PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op);
+               }
+       }
        /*
         *      Now link into the object's list of backed pages.
         */
-
        VM_PAGE_INSERT(mem, object);
        mem->tabled = TRUE;
 
@@ -1093,13 +1194,61 @@ vm_page_insert_internal(
        }
        assert(object->resident_page_count >= object->wired_page_count);
 
+       if (object->internal) {
+               OSAddAtomic(1, &vm_page_internal_count);
+       } else {
+               OSAddAtomic(1, &vm_page_external_count);
+       }
+
+       /*
+        * It wouldn't make sense to insert a "reusable" page in
+        * an object (the page would have been marked "reusable" only
+        * at the time of a madvise(MADV_FREE_REUSABLE) if it was already
+        * in the object at that time).
+        * But a page could be inserted in a "all_reusable" object, if
+        * something faults it in (a vm_read() from another task or a
+        * "use-after-free" issue in user space, for example).  It can
+        * also happen if we're relocating a page from that object to
+        * a different physical page during a physically-contiguous
+        * allocation.
+        */
        assert(!mem->reusable);
+       if (mem->object->all_reusable) {
+               OSAddAtomic(+1, &vm_page_stats_reusable.reusable_count);
+       }
+
+       if (object->purgable == VM_PURGABLE_DENY) {
+               owner = TASK_NULL;
+       } else {
+               owner = object->vo_purgeable_owner;
+       }
+       if (owner &&
+           (object->purgable == VM_PURGABLE_NONVOLATILE ||
+            VM_PAGE_WIRED(mem))) {
+               /* more non-volatile bytes */
+               ledger_credit(owner->ledger,
+                             task_ledgers.purgeable_nonvolatile,
+                             PAGE_SIZE);
+               /* more footprint */
+               ledger_credit(owner->ledger,
+                             task_ledgers.phys_footprint,
+                             PAGE_SIZE);
+
+       } else if (owner &&
+                  (object->purgable == VM_PURGABLE_VOLATILE ||
+                   object->purgable == VM_PURGABLE_EMPTY)) {
+               assert(! VM_PAGE_WIRED(mem));
+               /* more volatile bytes */
+               ledger_credit(owner->ledger,
+                             task_ledgers.purgeable_volatile,
+                             PAGE_SIZE);
+       }
 
        if (object->purgable == VM_PURGABLE_VOLATILE) {
                if (VM_PAGE_WIRED(mem)) {
-                       OSAddAtomic(1, &vm_page_purgeable_wired_count);
+                       OSAddAtomic(+1, &vm_page_purgeable_wired_count);
                } else {
-                       OSAddAtomic(1, &vm_page_purgeable_count);
+                       OSAddAtomic(+1, &vm_page_purgeable_count);
                }
        } else if (object->purgable == VM_PURGABLE_EMPTY &&
                   mem->throttled) {
@@ -1117,6 +1266,25 @@ vm_page_insert_internal(
                if (queues_lock_held == FALSE)
                        vm_page_unlock_queues();
        }
+
+#if VM_OBJECT_TRACKING_OP_MODIFIED
+       if (vm_object_tracking_inited &&
+           object->internal &&
+           object->resident_page_count == 0 &&
+           object->pager == NULL &&
+           object->shadow != NULL &&
+           object->shadow->copy == object) {
+               void *bt[VM_OBJECT_TRACKING_BTDEPTH];
+               int numsaved = 0;
+
+               numsaved =OSBacktrace(bt, VM_OBJECT_TRACKING_BTDEPTH);
+               btlog_add_entry(vm_object_tracking_btlog,
+                               object,
+                               VM_OBJECT_TRACKING_OP_MODIFIED,
+                               bt,
+                               numsaved);
+       }
+#endif /* VM_OBJECT_TRACKING_OP_MODIFIED */
 }
 
 /*
@@ -1138,9 +1306,15 @@ vm_page_replace(
        lck_spin_t      *bucket_lock;
        int             hash_id;
 
+#if 0
+       /*
+        * we don't hold the page queue lock
+        * so this check isn't safe to make
+        */
        VM_PAGE_CHECK(mem);
+#endif
        vm_object_lock_assert_exclusive(object);
-#if DEBUG
+#if DEBUG || VM_PAGE_CHECK_BUCKETS
        if (mem->tabled || mem->object != VM_OBJECT_NULL)
                panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
                      "already in (obj=%p,off=0x%llx)",
@@ -1165,31 +1339,33 @@ vm_page_replace(
 
        lck_spin_lock(bucket_lock);
 
-       if (bucket->pages) {
-               vm_page_t *mp = &bucket->pages;
-               vm_page_t m = *mp;
+       if (bucket->page_list) {
+               vm_page_packed_t *mp = &bucket->page_list;
+               vm_page_t m = VM_PAGE_UNPACK_PTR(*mp);
 
                do {
                        if (m->object == object && m->offset == offset) {
                                /*
                                 * Remove old page from hash list
                                 */
-                               *mp = m->next;
+                               *mp = m->next_m;
+                               m->hashed = FALSE;
 
                                found_m = m;
                                break;
                        }
-                       mp = &m->next;
-               } while ((m = *mp));
+                       mp = &m->next_m;
+               } while ((m = VM_PAGE_UNPACK_PTR(*mp)));
 
-               mem->next = bucket->pages;
+               mem->next_m = bucket->page_list;
        } else {
-               mem->next = VM_PAGE_NULL;
+               mem->next_m = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
        }
        /*
         * insert new page at head of hash list
         */
-       bucket->pages = mem;
+       bucket->page_list = VM_PAGE_PACK_PTR(mem);
+       mem->hashed = TRUE;
 
        lck_spin_unlock(bucket_lock);
 
@@ -1201,7 +1377,7 @@ vm_page_replace(
                 */
                vm_page_free_unlocked(found_m, FALSE);
        }
-       vm_page_insert_internal(mem, object, offset, FALSE, FALSE);
+       vm_page_insert_internal(mem, object, offset, FALSE, FALSE, FALSE);
 }
 
 /*
@@ -1222,6 +1398,7 @@ vm_page_remove(
        vm_page_t       this;
        lck_spin_t      *bucket_lock;
        int             hash_id;
+       task_t          owner;
 
         XPR(XPR_VM_PAGE,
                 "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n",
@@ -1231,8 +1408,14 @@ vm_page_remove(
        vm_object_lock_assert_exclusive(mem->object);
        assert(mem->tabled);
        assert(!mem->cleaning);
+       assert(!mem->laundry);
+#if 0
+       /*
+        * we don't hold the page queue lock
+        * so this check isn't safe to make
+        */
        VM_PAGE_CHECK(mem);
-
+#endif
        if (remove_from_hash == TRUE) {
                /*
                 *      Remove from the object_object/offset hash table
@@ -1243,23 +1426,23 @@ vm_page_remove(
 
                lck_spin_lock(bucket_lock);
 
-               if ((this = bucket->pages) == mem) {
+               if ((this = VM_PAGE_UNPACK_PTR(bucket->page_list)) == mem) {
                        /* optimize for common case */
 
-                       bucket->pages = mem->next;
+                       bucket->page_list = mem->next_m;
                } else {
-                       vm_page_t       *prev;
+                       vm_page_packed_t        *prev;
 
-                       for (prev = &this->next;
-                            (this = *prev) != mem;
-                            prev = &this->next)
+                       for (prev = &this->next_m;
+                            (this = VM_PAGE_UNPACK_PTR(*prev)) != mem;
+                            prev = &this->next_m)
                                continue;
-                       *prev = this->next;
+                       *prev = this->next_m;
                }
 #if     MACH_PAGE_HASH_STATS
                bucket->cur_count--;
 #endif /* MACH_PAGE_HASH_STATS */
-
+               mem->hashed = FALSE;
                lck_spin_unlock(bucket_lock);
        }
        /*
@@ -1275,6 +1458,27 @@ vm_page_remove(
 
        assert(mem->object->resident_page_count > 0);
        mem->object->resident_page_count--;
+
+       if (mem->object->internal) {
+#if DEBUG
+               assert(vm_page_internal_count);
+#endif /* DEBUG */
+
+               OSAddAtomic(-1, &vm_page_internal_count);
+       } else {
+               assert(vm_page_external_count);
+               OSAddAtomic(-1, &vm_page_external_count);
+
+               if (mem->xpmapped) {
+                       assert(vm_page_xpmapped_external_count);
+                       OSAddAtomic(-1, &vm_page_xpmapped_external_count);
+               }
+       }
+       if (!mem->object->internal && (mem->object->objq.next || mem->object->objq.prev)) {
+               if (mem->object->resident_page_count == 0)
+                       vm_object_cache_remove(mem->object);
+       }
+
        if (VM_PAGE_WIRED(mem)) {
                assert(mem->object->wired_page_count > 0);
                mem->object->wired_page_count--;
@@ -1294,6 +1498,31 @@ vm_page_remove(
                vm_page_stats_reusable.reused_remove++;
        }
 
+       if (mem->object->purgable == VM_PURGABLE_DENY) {
+               owner = TASK_NULL;
+       } else {
+               owner = mem->object->vo_purgeable_owner;
+       }
+       if (owner &&
+           (mem->object->purgable == VM_PURGABLE_NONVOLATILE ||
+            VM_PAGE_WIRED(mem))) {
+               /* less non-volatile bytes */
+               ledger_debit(owner->ledger,
+                            task_ledgers.purgeable_nonvolatile,
+                            PAGE_SIZE);
+               /* less footprint */
+               ledger_debit(owner->ledger,
+                            task_ledgers.phys_footprint,
+                            PAGE_SIZE);
+       } else if (owner &&
+                  (mem->object->purgable == VM_PURGABLE_VOLATILE ||
+                   mem->object->purgable == VM_PURGABLE_EMPTY)) {
+               assert(! VM_PAGE_WIRED(mem));
+               /* less volatile bytes */
+               ledger_debit(owner->ledger,
+                            task_ledgers.purgeable_volatile,
+                            PAGE_SIZE);
+       }
        if (mem->object->purgable == VM_PURGABLE_VOLATILE) {
                if (VM_PAGE_WIRED(mem)) {
                        assert(vm_page_purgeable_wired_count > 0);
@@ -1303,6 +1532,9 @@ vm_page_remove(
                        OSAddAtomic(-1, &vm_page_purgeable_count);
                }
        }
+       if (mem->object->set_cache_attr == TRUE)
+               pmap_set_cache_attributes(mem->phys_page, 0);
+
        mem->tabled = FALSE;
        mem->object = VM_OBJECT_NULL;
        mem->offset = (vm_object_offset_t) -1;
@@ -1390,7 +1622,7 @@ vm_page_lookup(
         * at outside the scope of the hash bucket lock... this is a 
         * really cheap optimiztion to avoid taking the lock
         */
-       if (bucket->pages == VM_PAGE_NULL) {
+       if (!bucket->page_list) {
                vm_page_lookup_bucket_NULL++;
 
                return (VM_PAGE_NULL);
@@ -1399,8 +1631,14 @@ vm_page_lookup(
 
        lck_spin_lock(bucket_lock);
 
-       for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
+       for (mem = VM_PAGE_UNPACK_PTR(bucket->page_list); mem != VM_PAGE_NULL; mem = VM_PAGE_UNPACK_PTR(mem->next_m)) {
+#if 0
+               /*
+                * we don't hold the page queue lock
+                * so this check isn't safe to make
+                */
                VM_PAGE_CHECK(mem);
+#endif
                if ((mem->object == object) && (mem->offset == offset))
                        break;
        }
@@ -1434,6 +1672,8 @@ vm_page_rename(
        vm_object_offset_t              new_offset,
        boolean_t                       encrypted_ok)
 {
+       boolean_t       internal_to_external, external_to_internal;
+
        assert(mem->object != new_object);
 
        /*
@@ -1464,8 +1704,38 @@ vm_page_rename(
         */
        vm_page_lockspin_queues();
 
+       internal_to_external = FALSE;
+       external_to_internal = FALSE;
+
+       if (mem->local) {
+               /*
+                * it's much easier to get the vm_page_pageable_xxx accounting correct
+                * if we first move the page to the active queue... it's going to end
+                * up there anyway, and we don't do vm_page_rename's frequently enough
+                * for this to matter.
+                */
+               VM_PAGE_QUEUES_REMOVE(mem);
+               vm_page_activate(mem);
+       }
+       if (mem->active || mem->inactive || mem->speculative) {
+               if (mem->object->internal && !new_object->internal) {
+                       internal_to_external = TRUE;
+               }
+               if (!mem->object->internal && new_object->internal) {
+                       external_to_internal = TRUE;
+               }
+       }
+
        vm_page_remove(mem, TRUE);
-       vm_page_insert_internal(mem, new_object, new_offset, TRUE, TRUE);
+       vm_page_insert_internal(mem, new_object, new_offset, TRUE, TRUE, FALSE);
+
+       if (internal_to_external) {
+               vm_page_pageable_internal_count--;
+               vm_page_pageable_external_count++;
+       } else if (external_to_internal) {
+               vm_page_pageable_external_count--;
+               vm_page_pageable_internal_count++;
+       }
 
        vm_page_unlock_queues();
 }
@@ -1480,11 +1750,40 @@ vm_page_rename(
 void
 vm_page_init(
        vm_page_t       mem,
-       ppnum_t phys_page)
+       ppnum_t         phys_page,
+       boolean_t       lopage)
 {
        assert(phys_page);
+
+#if    DEBUG
+       if ((phys_page != vm_page_fictitious_addr) && (phys_page != vm_page_guard_addr)) {
+               if (!(pmap_valid_page(phys_page))) {
+                       panic("vm_page_init: non-DRAM phys_page 0x%x\n", phys_page);
+               }
+       }
+#endif
        *mem = vm_page_template;
        mem->phys_page = phys_page;
+#if 0
+       /*
+        * we're leaving this turned off for now... currently pages
+        * come off the free list and are either immediately dirtied/referenced
+        * due to zero-fill or COW faults, or are used to read or write files...
+        * in the file I/O case, the UPL mechanism takes care of clearing
+        * the state of the HW ref/mod bits in a somewhat fragile way.
+        * Since we may change the way this works in the future (to toughen it up),
+        * I'm leaving this as a reminder of where these bits could get cleared
+        */
+
+       /*
+        * make sure both the h/w referenced and modified bits are
+        * clear at this point... we are especially dependent on 
+        * not finding a 'stale' h/w modified in a number of spots
+        * once this page goes back into use
+        */
+       pmap_clear_refmod(phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
+#endif
+       mem->lopage = lopage;
 }
 
 /*
@@ -1494,24 +1793,25 @@ vm_page_init(
  *     Returns VM_PAGE_NULL if there are no free pages.
  */
 int    c_vm_page_grab_fictitious = 0;
+int    c_vm_page_grab_fictitious_failed = 0;
 int    c_vm_page_release_fictitious = 0;
 int    c_vm_page_more_fictitious = 0;
 
-extern vm_page_t vm_page_grab_fictitious_common(ppnum_t phys_addr);
-
 vm_page_t
 vm_page_grab_fictitious_common(
        ppnum_t phys_addr)
 {
-       register vm_page_t m;
+       vm_page_t       m;
 
-       m = (vm_page_t)zget(vm_page_zone);
-       if (m) {
-               vm_page_init(m, phys_addr);
+       if ((m = (vm_page_t)zget(vm_page_zone))) {
+
+               vm_page_init(m, phys_addr, FALSE);
                m->fictitious = TRUE;
-       }
 
-       c_vm_page_grab_fictitious++;
+               c_vm_page_grab_fictitious++;
+       } else
+               c_vm_page_grab_fictitious_failed++;
+
        return m;
 }
 
@@ -1527,35 +1827,30 @@ vm_page_grab_guard(void)
        return vm_page_grab_fictitious_common(vm_page_guard_addr);
 }
 
+
 /*
  *     vm_page_release_fictitious:
  *
- *     Release a fictitious page to the free list.
+ *     Release a fictitious page to the zone pool
  */
-
 void
 vm_page_release_fictitious(
-       register vm_page_t m)
+       vm_page_t m)
 {
        assert(!m->free);
-       assert(m->busy);
        assert(m->fictitious);
        assert(m->phys_page == vm_page_fictitious_addr ||
               m->phys_page == vm_page_guard_addr);
 
        c_vm_page_release_fictitious++;
-#if DEBUG
-       if (m->free)
-               panic("vm_page_release_fictitious");
-#endif
-       m->free = TRUE;
+
        zfree(vm_page_zone, m);
 }
 
 /*
  *     vm_page_more_fictitious:
  *
- *     Add more fictitious pages to the free list.
+ *     Add more fictitious pages to the zone.
  *     Allowed to block. This routine is way intimate
  *     with the zones code, for several reasons:
  *     1. we need to carve some page structures out of physical
@@ -1569,23 +1864,13 @@ vm_page_release_fictitious(
  *        permanent allocation of a resource.
  *     3. To smooth allocation humps, we allocate single pages
  *        with kernel_memory_allocate(), and cram them into the
- *        zone. This also allows us to initialize the vm_page_t's
- *        on the way into the zone, so that zget() always returns
- *        an initialized structure. The zone free element pointer
- *        and the free page pointer are both the first item in the
- *        vm_page_t.
- *     4. By having the pages in the zone pre-initialized, we need
- *        not keep 2 levels of lists. The garbage collector simply
- *        scans our list, and reduces physical memory usage as it
- *        sees fit.
+ *        zone.
  */
 
 void vm_page_more_fictitious(void)
 {
-       register vm_page_t m;
-       vm_offset_t addr;
-       kern_return_t retval;
-       int i;
+       vm_offset_t     addr;
+       kern_return_t   retval;
 
        c_vm_page_more_fictitious++;
 
@@ -1624,7 +1909,7 @@ void vm_page_more_fictitious(void)
                                        KMA_KOBJECT|KMA_NOPAGEWAIT);
        if (retval != KERN_SUCCESS) { 
                /*
-                * No page was available. Tell the pageout daemon, drop the
+                * No page was available. Drop the
                 * lock to give another thread a chance at it, and
                 * wait for the pageout daemon to make progress.
                 */
@@ -1632,18 +1917,12 @@ void vm_page_more_fictitious(void)
                vm_page_wait(THREAD_UNINT);
                return;
        }
-       /*
-        * Initialize as many vm_page_t's as will fit on this page. This
-        * depends on the zone code disturbing ONLY the first item of
-        * each zone element.
-        */
-       m = (vm_page_t)addr;
-       for (i = PAGE_SIZE/sizeof(struct vm_page); i > 0; i--) {
-               vm_page_init(m, vm_page_fictitious_addr);
-               m->fictitious = TRUE;
-               m++;
-       }
-       zcram(vm_page_zone, (void *) addr, PAGE_SIZE);
+
+       /* Increment zone page count. We account for all memory managed by the zone in z->page_count */
+       OSAddAtomic64(1, &(vm_page_zone->page_count));
+
+       zcram(vm_page_zone, addr, PAGE_SIZE);
+
        lck_mtx_unlock(&vm_page_alloc_lock);
 }
 
@@ -1668,7 +1947,7 @@ vm_pool_low(void)
  * this is an interface to support bring-up of drivers
  * on platforms with physical memory > 4G...
  */
-int            vm_himemory_mode = 0;
+int            vm_himemory_mode = 2;
 
 
 /*
@@ -1676,43 +1955,65 @@ int             vm_himemory_mode = 0;
  * incapable of generating DMAs with more than 32 bits
  * of address on platforms with physical memory > 4G...
  */
-unsigned int   vm_lopage_free_count = 0;
-unsigned int   vm_lopage_max_count = 0;
+unsigned int   vm_lopages_allocated_q = 0;
+unsigned int   vm_lopages_allocated_cpm_success = 0;
+unsigned int   vm_lopages_allocated_cpm_failed = 0;
 queue_head_t   vm_lopage_queue_free;
 
 vm_page_t
 vm_page_grablo(void)
 {
-       register vm_page_t      mem;
-       unsigned int vm_lopage_alloc_count;
+       vm_page_t       mem;
 
-       if (vm_lopage_poolsize == 0)
+       if (vm_lopage_needed == FALSE)
                return (vm_page_grab());
 
        lck_mtx_lock_spin(&vm_page_queue_free_lock);
 
-       if (! queue_empty(&vm_lopage_queue_free)) {
-               queue_remove_first(&vm_lopage_queue_free,
-                                  mem,
-                                  vm_page_t,
-                                  pageq);
-               assert(mem->free);
-               assert(mem->busy);
-               assert(!mem->pmapped);
-               assert(!mem->wpmapped);
+        if ( !queue_empty(&vm_lopage_queue_free)) {
+                queue_remove_first(&vm_lopage_queue_free,
+                                   mem,
+                                   vm_page_t,
+                                   pageq);
+               assert(vm_lopage_free_count);
 
-               mem->pageq.next = NULL;
-               mem->pageq.prev = NULL;
-               mem->free = FALSE;
+                vm_lopage_free_count--;
+               vm_lopages_allocated_q++;
 
-               vm_lopage_free_count--;
-               vm_lopage_alloc_count = (vm_lopage_poolend - vm_lopage_poolstart) - vm_lopage_free_count;
-               if (vm_lopage_alloc_count > vm_lopage_max_count)
-                       vm_lopage_max_count = vm_lopage_alloc_count;
+               if (vm_lopage_free_count < vm_lopage_lowater)
+                       vm_lopage_refill = TRUE;
+
+               lck_mtx_unlock(&vm_page_queue_free_lock);
        } else {
-               mem = VM_PAGE_NULL;
+               lck_mtx_unlock(&vm_page_queue_free_lock);
+
+               if (cpm_allocate(PAGE_SIZE, &mem, atop(0xffffffff), 0, FALSE, KMA_LOMEM) != KERN_SUCCESS) {
+
+                       lck_mtx_lock_spin(&vm_page_queue_free_lock);
+                       vm_lopages_allocated_cpm_failed++;
+                       lck_mtx_unlock(&vm_page_queue_free_lock);
+
+                       return (VM_PAGE_NULL);
+               }
+               mem->busy = TRUE;
+
+               vm_page_lockspin_queues();
+               
+               mem->gobbled = FALSE;
+               vm_page_gobble_count--;
+               vm_page_wire_count--;
+
+               vm_lopages_allocated_cpm_success++;
+               vm_page_unlock_queues();
        }
-       lck_mtx_unlock(&vm_page_queue_free_lock);
+       assert(mem->busy);
+       assert(!mem->free);
+       assert(!mem->pmapped);
+       assert(!mem->wpmapped);
+       assert(!pmap_is_noencrypt(mem->phys_page));
+
+       mem->pageq.next = NULL;
+       mem->pageq.prev = NULL;
 
        return (mem);
 }
@@ -1739,8 +2040,6 @@ vm_page_grablo(void)
  *     request from the per-cpu queue.
  */
 
-#define COLOR_GROUPS_TO_STEAL  4
-
 
 vm_page_t
 vm_page_grab( void )
@@ -1754,9 +2053,9 @@ vm_page_grab( void )
 return_page_from_cpu_list:
                PROCESSOR_DATA(current_processor(), page_grab_count) += 1;
                PROCESSOR_DATA(current_processor(), free_pages) = mem->pageq.next;
-               mem->pageq.next = NULL;
 
                enable_preemption();
+               mem->pageq.next = NULL;
 
                assert(mem->listq.next == NULL && mem->listq.prev == NULL);
                assert(mem->tabled == FALSE);
@@ -1768,6 +2067,11 @@ return_page_from_cpu_list:
                assert(!mem->encrypted);
                assert(!mem->pmapped);
                assert(!mem->wpmapped);
+               assert(!mem->active);
+               assert(!mem->inactive);
+               assert(!mem->throttled);
+               assert(!mem->speculative);
+               assert(!pmap_is_noencrypt(mem->phys_page));
 
                return mem;
        }
@@ -1778,19 +2082,18 @@ return_page_from_cpu_list:
         *      Optionally produce warnings if the wire or gobble
         *      counts exceed some threshold.
         */
-       if (vm_page_wire_count_warning > 0
-           && vm_page_wire_count >= vm_page_wire_count_warning) {
+#if VM_PAGE_WIRE_COUNT_WARNING
+       if (vm_page_wire_count >= VM_PAGE_WIRE_COUNT_WARNING) {
                printf("mk: vm_page_grab(): high wired page count of %d\n",
                        vm_page_wire_count);
-               assert(vm_page_wire_count < vm_page_wire_count_warning);
        }
-       if (vm_page_gobble_count_warning > 0
-           && vm_page_gobble_count >= vm_page_gobble_count_warning) {
+#endif
+#if VM_PAGE_GOBBLE_COUNT_WARNING
+       if (vm_page_gobble_count >= VM_PAGE_GOBBLE_COUNT_WARNING) {
                printf("mk: vm_page_grab(): high gobbled page count of %d\n",
                        vm_page_gobble_count);
-               assert(vm_page_gobble_count < vm_page_gobble_count_warning);
        }
-
+#endif
        lck_mtx_lock_spin(&vm_page_queue_free_lock);
 
        /*
@@ -1835,17 +2138,17 @@ return_page_from_cpu_list:
                if (vm_page_free_count <= vm_page_free_reserved)
                        pages_to_steal = 1;
                else {
-                       pages_to_steal = COLOR_GROUPS_TO_STEAL * vm_colors;
-               
-                       if (pages_to_steal > (vm_page_free_count - vm_page_free_reserved))
+                       if (vm_free_magazine_refill_limit <= (vm_page_free_count - vm_page_free_reserved))
+                               pages_to_steal = vm_free_magazine_refill_limit;
+                       else
                                pages_to_steal = (vm_page_free_count - vm_page_free_reserved);
                }
                color = PROCESSOR_DATA(current_processor(), start_color);
                head = tail = NULL;
 
+               vm_page_free_count -= pages_to_steal;
+
                while (pages_to_steal--) {
-                       if (--vm_page_free_count < vm_page_free_count_minimum)
-                               vm_page_free_count_minimum = vm_page_free_count;
 
                        while (queue_empty(&vm_page_queue_free[color]))
                                color = (color + 1) & vm_color_mask;
@@ -1857,6 +2160,11 @@ return_page_from_cpu_list:
                        mem->pageq.next = NULL;
                        mem->pageq.prev = NULL;
 
+                       assert(!mem->active);
+                       assert(!mem->inactive);
+                       assert(!mem->throttled);
+                       assert(!mem->speculative);                      
+
                        color = (color + 1) & vm_color_mask;
 
                        if (head == NULL)
@@ -1865,7 +2173,6 @@ return_page_from_cpu_list:
                                tail->pageq.next = (queue_t)mem;
                        tail = mem;
 
-                       mem->pageq.prev = NULL;
                        assert(mem->listq.next == NULL && mem->listq.prev == NULL);
                        assert(mem->tabled == FALSE);
                        assert(mem->object == VM_OBJECT_NULL);
@@ -1879,7 +2186,10 @@ return_page_from_cpu_list:
                        assert(!mem->encrypted);
                        assert(!mem->pmapped);
                        assert(!mem->wpmapped);
+                       assert(!pmap_is_noencrypt(mem->phys_page));
                }
+               lck_mtx_unlock(&vm_page_queue_free_lock);
+
                PROCESSOR_DATA(current_processor(), free_pages) = head->pageq.next;
                PROCESSOR_DATA(current_processor(), start_color) = color;
 
@@ -1890,8 +2200,6 @@ return_page_from_cpu_list:
                mem = head;
                mem->pageq.next = NULL;
 
-               lck_mtx_unlock(&vm_page_queue_free_lock);
-
                enable_preemption();
        }
        /*
@@ -1905,29 +2213,12 @@ return_page_from_cpu_list:
         *      it doesn't really matter.
         */
        if ((vm_page_free_count < vm_page_free_min) ||
-           ((vm_page_free_count < vm_page_free_target) &&
-            ((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_min)))
-               thread_wakeup((event_t) &vm_page_free_wanted);
-
-#if CONFIG_EMBEDDED
-       {
-       int     percent_avail;
-
-       /*
-        * Decide if we need to poke the memorystatus notification thread.
-        */
-       percent_avail = 
-               (vm_page_active_count + vm_page_inactive_count + 
-                vm_page_speculative_count + vm_page_free_count +
-                (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
-               atop_64(max_mem);
-       if (percent_avail <= (kern_memorystatus_level - 5)) {
-               kern_memorystatus_level = percent_avail;
-               thread_wakeup((event_t)&kern_memorystatus_wakeup);
-       }
-       }
-#endif
+            ((vm_page_free_count < vm_page_free_target) &&
+             ((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_min)))
+                thread_wakeup((event_t) &vm_page_free_wanted);
 
+       VM_CHECK_MEMORYSTATUS;
+       
 //     dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 4);      /* (TEST/DEBUG) */
 
        return mem;
@@ -1946,29 +2237,21 @@ vm_page_release(
        unsigned int    color;
        int     need_wakeup = 0;
        int     need_priv_wakeup = 0;
-#if 0
-       unsigned int pindex;
-       phys_entry *physent;
 
-       physent = mapping_phys_lookup(mem->phys_page, &pindex);         /* (BRINGUP) */
-       if(physent->ppLink & ppN) {                                                                                     /* (BRINGUP) */
-               panic("vm_page_release: already released - %08X %08X\n", mem, mem->phys_page);
-       }
-       physent->ppLink = physent->ppLink | ppN;                                                        /* (BRINGUP) */
-#endif
+
        assert(!mem->private && !mem->fictitious);
        if (vm_page_free_verify) {
                assert(pmap_verify_free(mem->phys_page));
        }
 //     dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 5);      /* (TEST/DEBUG) */
 
+       pmap_clear_noencrypt(mem->phys_page);
 
        lck_mtx_lock_spin(&vm_page_queue_free_lock);
 #if DEBUG
        if (mem->free)
                panic("vm_page_release");
 #endif
-       mem->free = TRUE;
 
        assert(mem->busy);
        assert(!mem->laundry);
@@ -1978,7 +2261,9 @@ vm_page_release(
        assert(mem->listq.next == NULL &&
               mem->listq.prev == NULL);
        
-       if (mem->phys_page <= vm_lopage_poolend && mem->phys_page >= vm_lopage_poolstart) {
+       if ((mem->lopage == TRUE || vm_lopage_refill == TRUE) &&
+           vm_lopage_free_count < vm_lopage_free_limit &&
+           mem->phys_page < max_valid_low_ppnum) {
                /*
                 * this exists to support hardware controllers
                 * incapable of generating DMAs with more than 32 bits
@@ -1989,7 +2274,15 @@ vm_page_release(
                                  vm_page_t,
                                  pageq);
                vm_lopage_free_count++;
+
+               if (vm_lopage_free_count >= vm_lopage_free_limit)
+                       vm_lopage_refill = FALSE;
+
+               mem->lopage = TRUE;
        } else {          
+               mem->lopage = FALSE;
+               mem->free = TRUE;
+
                color = mem->phys_page & vm_color_mask;
                queue_enter_first(&vm_page_queue_free[color],
                                  mem,
@@ -2033,25 +2326,33 @@ vm_page_release(
        else if (need_wakeup)
                thread_wakeup_one((event_t) &vm_page_free_count);
 
-#if CONFIG_EMBEDDED
-       {
-       int     percent_avail;
+       VM_CHECK_MEMORYSTATUS;
+}
 
-       /*
-        * Decide if we need to poke the memorystatus notification thread.
-        * Locking is not a big issue, as only a single thread delivers these.
-        */
-       percent_avail = 
-               (vm_page_active_count + vm_page_inactive_count + 
-                vm_page_speculative_count + vm_page_free_count +
-                (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count)  ) * 100 /
-               atop_64(max_mem);
-       if (percent_avail >= (kern_memorystatus_level + 5)) {
-               kern_memorystatus_level = percent_avail;
-               thread_wakeup((event_t)&kern_memorystatus_wakeup);
-       }
+/*
+ * This version of vm_page_release() is used only at startup
+ * when we are single-threaded and pages are being released 
+ * for the first time. Hence, no locking or unnecessary checks are made.
+ * Note: VM_CHECK_MEMORYSTATUS invoked by the caller.
+ */
+void
+vm_page_release_startup(
+       register vm_page_t      mem)
+{
+       queue_t queue_free;
+
+       if (vm_lopage_free_count < vm_lopage_free_limit &&
+           mem->phys_page < max_valid_low_ppnum) {
+               mem->lopage = TRUE;
+               vm_lopage_free_count++;
+               queue_free = &vm_lopage_queue_free;
+       } else {          
+               mem->lopage = FALSE;
+               mem->free = TRUE;
+               vm_page_free_count++;
+               queue_free = &vm_page_queue_free[mem->phys_page & vm_color_mask];
        }
-#endif
+       queue_enter_first(queue_free, mem, vm_page_t, pageq);
 }
 
 /*
@@ -2103,8 +2404,12 @@ vm_page_wait(
                if (need_wakeup)
                        thread_wakeup((event_t)&vm_page_free_wanted);
 
-               if (wait_result == THREAD_WAITING)
+               if (wait_result == THREAD_WAITING) {
+                       VM_DEBUG_EVENT(vm_page_wait_block, VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
+                                      vm_page_free_wanted_privileged, vm_page_free_wanted, 0, 0);
                        wait_result = thread_block(THREAD_CONTINUE_NULL);
+                       VM_DEBUG_EVENT(vm_page_wait_block, VM_PAGE_WAIT_BLOCK, DBG_FUNC_END, 0, 0, 0, 0);
+               }
 
                return(wait_result == THREAD_AWAKENED);
        } else {
@@ -2186,16 +2491,16 @@ vm_page_alloc_guard(
 counter(unsigned int c_laundry_pages_freed = 0;)
 
 /*
- *     vm_page_free:
+ *     vm_page_free_prepare:
  *
- *     Returns the given page to the free list,
- *     disassociating it with any VM object.
+ *     Removes page from any queue it may be on
+ *     and disassociates it from its VM object.
  *
  *     Object and page queues must be locked prior to entry.
  */
 static void
 vm_page_free_prepare(
-       register vm_page_t      mem)
+       vm_page_t       mem)
 {
        vm_page_free_prepare_queues(mem);
        vm_page_free_prepare_object(mem, TRUE);
@@ -2209,27 +2514,27 @@ vm_page_free_prepare_queues(
        VM_PAGE_CHECK(mem);
        assert(!mem->free);
        assert(!mem->cleaning);
-       assert(!mem->pageout);
-#if DEBUG
+
+#if MACH_ASSERT || DEBUG
        lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
        if (mem->free)
                panic("vm_page_free: freeing page on free list\n");
-#endif
+#endif /* MACH_ASSERT || DEBUG */
        if (mem->object) {
                vm_object_lock_assert_exclusive(mem->object);
        }
-
        if (mem->laundry) {
                /*
                 * We may have to free a page while it's being laundered
                 * if we lost its pager (due to a forced unmount, for example).
-                * We need to call vm_pageout_throttle_up() before removing
-                * the page from its VM object, so that we can find out on
-                * which pageout queue the page is on.
+                * We need to call vm_pageout_steal_laundry() before removing
+                * the page from its VM object, so that we can remove it
+                * from its pageout queue and adjust the laundry accounting
                 */
-               vm_pageout_throttle_up(mem);
+               vm_pageout_steal_laundry(mem, TRUE);
                counter(++c_laundry_pages_freed);
        }
+       
        VM_PAGE_QUEUES_REMOVE(mem);     /* clears local/active/inactive/throttled/speculative */
 
        if (VM_PAGE_WIRED(mem)) {
@@ -2238,6 +2543,36 @@ vm_page_free_prepare_queues(
                        mem->object->wired_page_count--;
                        assert(mem->object->resident_page_count >=
                               mem->object->wired_page_count);
+
+                       if (mem->object->purgable == VM_PURGABLE_VOLATILE) {
+                               OSAddAtomic(+1, &vm_page_purgeable_count);
+                               assert(vm_page_purgeable_wired_count > 0);
+                               OSAddAtomic(-1, &vm_page_purgeable_wired_count);
+                       }
+                       if ((mem->object->purgable == VM_PURGABLE_VOLATILE ||
+                            mem->object->purgable == VM_PURGABLE_EMPTY) &&
+                           mem->object->vo_purgeable_owner != TASK_NULL) {
+                               task_t owner;
+
+                               owner = mem->object->vo_purgeable_owner;
+                               /*
+                                * While wired, this page was accounted
+                                * as "non-volatile" but it should now
+                                * be accounted as "volatile".
+                                */
+                               /* one less "non-volatile"... */
+                               ledger_debit(owner->ledger,
+                                            task_ledgers.purgeable_nonvolatile,
+                                            PAGE_SIZE);
+                               /* ... and "phys_footprint" */
+                               ledger_debit(owner->ledger,
+                                            task_ledgers.phys_footprint,
+                                            PAGE_SIZE);
+                               /* one more "volatile" */
+                               ledger_credit(owner->ledger,
+                                             task_ledgers.purgeable_volatile,
+                                             PAGE_SIZE);
+                       }
                }
                if (!mem->private && !mem->fictitious)
                        vm_page_wire_count--;
@@ -2256,10 +2591,6 @@ vm_page_free_prepare_object(
        vm_page_t       mem,
        boolean_t       remove_from_hash)
 {
-       if (mem->object) {
-               vm_object_lock_assert_exclusive(mem->object);
-       }
-
        if (mem->tabled)
                vm_page_remove(mem, remove_from_hash);  /* clears tabled, object, offset */
 
@@ -2270,33 +2601,26 @@ vm_page_free_prepare_object(
                mem->fictitious = TRUE;
                mem->phys_page = vm_page_fictitious_addr;
        }
-       if (mem->fictitious) {
-               /* Some of these may be unnecessary */
-               mem->gobbled = FALSE;
-               mem->busy = TRUE;
-               mem->absent = FALSE;
-               mem->error = FALSE;
-               mem->dirty = FALSE;
-               mem->precious = FALSE;
-               mem->reference = FALSE;
-               mem->encrypted = FALSE;
-               mem->encrypted_cleaning = FALSE;
-               mem->pmapped = FALSE;
-               mem->wpmapped = FALSE;
-               mem->reusable = FALSE;
-       } else {
-               if (mem->zero_fill == TRUE)
-                       VM_ZF_COUNT_DECR();
-               vm_page_init(mem, mem->phys_page);
+       if ( !mem->fictitious) {
+               vm_page_init(mem, mem->phys_page, mem->lopage);
        }
 }
 
 
+/*
+ *     vm_page_free:
+ *
+ *     Returns the given page to the free list,
+ *     disassociating it with any VM object.
+ *
+ *     Object and page queues must be locked prior to entry.
+ */
 void
 vm_page_free(
        vm_page_t       mem)
 {
        vm_page_free_prepare(mem);
+
        if (mem->fictitious) {
                vm_page_release_fictitious(mem);
        } else {
@@ -2323,207 +2647,163 @@ vm_page_free_unlocked(
        }
 }
 
+
 /*
  * Free a list of pages.  The list can be up to several hundred pages,
  * as blocked up by vm_pageout_scan().
  * The big win is not having to take the free list lock once
- * per page.  We sort the incoming pages into n lists, one for
- * each color.
+ * per page.
  */
 void
 vm_page_free_list(
-       vm_page_t       mem,
+       vm_page_t       freeq,
        boolean_t       prepare_object)
 {
+        vm_page_t      mem;
         vm_page_t      nxt;
-       int             pg_count = 0;
-       int             color;
-       int             inuse_list_head = -1;
+       vm_page_t       local_freeq;
+       int             pg_count;
 
-       queue_head_t    free_list[MAX_COLORS];
-       int             inuse[MAX_COLORS];
+       while (freeq) {
 
-       for (color = 0; color < (signed) vm_colors; color++) {
-               queue_init(&free_list[color]);
-       }
-       
-       while (mem) {
-               assert(!mem->inactive);
-               assert(!mem->active);
-               assert(!mem->throttled);
-               assert(!mem->free);
-               assert(!mem->speculative);
-               assert(mem->pageq.prev == NULL);
+               pg_count = 0;
+               local_freeq = VM_PAGE_NULL;
+               mem = freeq;
+
+               /*
+                * break up the processing into smaller chunks so
+                * that we can 'pipeline' the pages onto the
+                * free list w/o introducing too much
+                * contention on the global free queue lock
+                */
+               while (mem && pg_count < 64) {
+
+                       assert(!mem->inactive);
+                       assert(!mem->active);
+                       assert(!mem->throttled);
+                       assert(!mem->free);
+                       assert(!mem->speculative);
+                       assert(!VM_PAGE_WIRED(mem));
+                       assert(mem->pageq.prev == NULL);
 
-               nxt = (vm_page_t)(mem->pageq.next);
+                       nxt = (vm_page_t)(mem->pageq.next);
                
-               if (prepare_object == TRUE)
-                       vm_page_free_prepare_object(mem, TRUE);
+                       if (vm_page_free_verify && !mem->fictitious && !mem->private) {
+                               assert(pmap_verify_free(mem->phys_page));
+                       }
+                       if (prepare_object == TRUE)
+                               vm_page_free_prepare_object(mem, TRUE);
 
-               if (vm_page_free_verify && !mem->fictitious && !mem->private) {
-                       assert(pmap_verify_free(mem->phys_page));
-               }
-               assert(mem->busy);
+                       if (!mem->fictitious) {
+                               assert(mem->busy);
 
-               if (!mem->fictitious) {
-                       if (mem->phys_page <= vm_lopage_poolend && mem->phys_page >= vm_lopage_poolstart) {
-                               mem->pageq.next = NULL;
-                               vm_page_release(mem);
-                       } else {
+                               if ((mem->lopage == TRUE || vm_lopage_refill == TRUE) &&
+                                   vm_lopage_free_count < vm_lopage_free_limit &&
+                                   mem->phys_page < max_valid_low_ppnum) {
+                                       mem->pageq.next = NULL;
+                                       vm_page_release(mem);
+                               } else {
+                                       /*
+                                        * IMPORTANT: we can't set the page "free" here
+                                        * because that would make the page eligible for
+                                        * a physically-contiguous allocation (see
+                                        * vm_page_find_contiguous()) right away (we don't
+                                        * hold the vm_page_queue_free lock).  That would
+                                        * cause trouble because the page is not actually
+                                        * in the free queue yet...
+                                        */
+                                       mem->pageq.next = (queue_entry_t)local_freeq;
+                                       local_freeq = mem;
+                                       pg_count++;
 
-                       /*
-                        * IMPORTANT: we can't set the page "free" here
-                        * because that would make the page eligible for
-                        * a physically-contiguous allocation (see
-                        * vm_page_find_contiguous()) right away (we don't
-                        * hold the vm_page_queue_free lock).  That would
-                        * cause trouble because the page is not actually
-                        * in the free queue yet...
-                        */
-                               color = mem->phys_page & vm_color_mask;
-                               if (queue_empty(&free_list[color])) {
-                                       inuse[color] = inuse_list_head;
-                                       inuse_list_head = color;
+                                       pmap_clear_noencrypt(mem->phys_page);
                                }
-                               queue_enter_first(&free_list[color],
-                                                 mem,
-                                                 vm_page_t,
-                                                 pageq);
-                               pg_count++;
+                       } else {
+                               assert(mem->phys_page == vm_page_fictitious_addr ||
+                                      mem->phys_page == vm_page_guard_addr);
+                               vm_page_release_fictitious(mem);
                        }
-               } else {
-                       assert(mem->phys_page == vm_page_fictitious_addr ||
-                              mem->phys_page == vm_page_guard_addr);
-                       vm_page_release_fictitious(mem);
+                       mem = nxt;
                }
-               mem = nxt;
-       }
-       if (pg_count) {
-               unsigned int    avail_free_count;
-               unsigned int    need_wakeup = 0;
-               unsigned int    need_priv_wakeup = 0;
+               freeq = mem;
+
+               if ( (mem = local_freeq) ) {
+                       unsigned int    avail_free_count;
+                       unsigned int    need_wakeup = 0;
+                       unsigned int    need_priv_wakeup = 0;
          
-               lck_mtx_lock_spin(&vm_page_queue_free_lock);
+                       lck_mtx_lock_spin(&vm_page_queue_free_lock);
 
-               color = inuse_list_head;
-               
-               while( color != -1 ) {
-                       vm_page_t first, last;
-                       vm_page_t first_free;
+                       while (mem) {
+                               int     color;
+
+                               nxt = (vm_page_t)(mem->pageq.next);
 
-                       /*
-                        * Now that we hold the vm_page_queue_free lock,
-                        * it's safe to mark all pages in our local queue
-                        * as "free"...
-                        */
-                       queue_iterate(&free_list[color],
-                                     mem,
-                                     vm_page_t,
-                                     pageq) {
                                assert(!mem->free);
                                assert(mem->busy);
                                mem->free = TRUE;
-                       }
 
-                       /*
-                        * ... and insert our local queue at the head of
-                        * the global free queue.
-                        */
-                       first = (vm_page_t) queue_first(&free_list[color]);
-                       last = (vm_page_t) queue_last(&free_list[color]);
-                       first_free = (vm_page_t) queue_first(&vm_page_queue_free[color]);
-                       if (queue_empty(&vm_page_queue_free[color])) {
-                               queue_last(&vm_page_queue_free[color]) =
-                                       (queue_entry_t) last;
-                       } else {
-                               queue_prev(&first_free->pageq) =
-                                       (queue_entry_t) last;
-                       }
-                       queue_first(&vm_page_queue_free[color]) =
-                               (queue_entry_t) first;
-                       queue_prev(&first->pageq) =
-                               (queue_entry_t) &vm_page_queue_free[color];
-                       queue_next(&last->pageq) =
-                               (queue_entry_t) first_free;
-
-                       /* next color */
-                       color = inuse[color];
-               }
-               
-               vm_page_free_count += pg_count;
-               avail_free_count = vm_page_free_count;
-
-               if (vm_page_free_wanted_privileged > 0 &&
-                   avail_free_count > 0) {
-                       if (avail_free_count < vm_page_free_wanted_privileged) {
-                               need_priv_wakeup = avail_free_count;
-                               vm_page_free_wanted_privileged -=
-                                       avail_free_count;
-                               avail_free_count = 0;
-                       } else {
-                               need_priv_wakeup = vm_page_free_wanted_privileged;
-                               vm_page_free_wanted_privileged = 0;
-                               avail_free_count -=
-                                       vm_page_free_wanted_privileged;
+                               color = mem->phys_page & vm_color_mask;
+                               queue_enter_first(&vm_page_queue_free[color],
+                                                 mem,
+                                                 vm_page_t,
+                                                 pageq);
+                               mem = nxt;
                        }
-               }
+                       vm_page_free_count += pg_count;
+                       avail_free_count = vm_page_free_count;
 
-               if (vm_page_free_wanted > 0 &&
-                   avail_free_count > vm_page_free_reserved) {
-                       unsigned int  available_pages;
+                       if (vm_page_free_wanted_privileged > 0 && avail_free_count > 0) {
+
+                               if (avail_free_count < vm_page_free_wanted_privileged) {
+                                       need_priv_wakeup = avail_free_count;
+                                       vm_page_free_wanted_privileged -= avail_free_count;
+                                       avail_free_count = 0;
+                               } else {
+                                       need_priv_wakeup = vm_page_free_wanted_privileged;
+                                       vm_page_free_wanted_privileged = 0;
+                                       avail_free_count -= vm_page_free_wanted_privileged;
+                               }
+                       }
+                       if (vm_page_free_wanted > 0 && avail_free_count > vm_page_free_reserved) {
+                               unsigned int  available_pages;
 
-                       available_pages = (avail_free_count -
-                                          vm_page_free_reserved);
+                               available_pages = avail_free_count - vm_page_free_reserved;
 
-                       if (available_pages >= vm_page_free_wanted) {
-                               need_wakeup = vm_page_free_wanted;
-                               vm_page_free_wanted = 0;
-                       } else {
-                               need_wakeup = available_pages;
-                               vm_page_free_wanted -= available_pages;
+                               if (available_pages >= vm_page_free_wanted) {
+                                       need_wakeup = vm_page_free_wanted;
+                                       vm_page_free_wanted = 0;
+                               } else {
+                                       need_wakeup = available_pages;
+                                       vm_page_free_wanted -= available_pages;
+                               }
                        }
-               }
-               lck_mtx_unlock(&vm_page_queue_free_lock);
+                       lck_mtx_unlock(&vm_page_queue_free_lock);
 
-               if (need_priv_wakeup != 0) {
-                       /*
-                        * There shouldn't be that many VM-privileged threads,
-                        * so let's wake them all up, even if we don't quite
-                        * have enough pages to satisfy them all.
-                        */
-                       thread_wakeup((event_t)&vm_page_free_wanted_privileged);
-               }
-               if (need_wakeup != 0 && vm_page_free_wanted == 0) {
-                       /*
-                        * We don't expect to have any more waiters
-                        * after this, so let's wake them all up at
-                        * once.
-                        */
-                       thread_wakeup((event_t) &vm_page_free_count);
-               } else for (; need_wakeup != 0; need_wakeup--) {
-                       /*
-                        * Wake up one waiter per page we just released.
-                        */
-                       thread_wakeup_one((event_t) &vm_page_free_count);
-               }
-#if CONFIG_EMBEDDED
-               {
-               int percent_avail;
+                       if (need_priv_wakeup != 0) {
+                               /*
+                                * There shouldn't be that many VM-privileged threads,
+                                * so let's wake them all up, even if we don't quite
+                                * have enough pages to satisfy them all.
+                                */
+                               thread_wakeup((event_t)&vm_page_free_wanted_privileged);
+                       }
+                       if (need_wakeup != 0 && vm_page_free_wanted == 0) {
+                               /*
+                                * We don't expect to have any more waiters
+                                * after this, so let's wake them all up at
+                                * once.
+                                */
+                               thread_wakeup((event_t) &vm_page_free_count);
+                       } else for (; need_wakeup != 0; need_wakeup--) {
+                               /*
+                                * Wake up one waiter per page we just released.
+                                */
+                               thread_wakeup_one((event_t) &vm_page_free_count);
+                       }
 
-               /*
-                * Decide if we need to poke the memorystatus notification thread.
-                */
-               percent_avail = 
-                       (vm_page_active_count + vm_page_inactive_count + 
-                        vm_page_speculative_count + vm_page_free_count +
-                        (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count)  ) * 100 /
-                       atop_64(max_mem);
-               if (percent_avail >= (kern_memorystatus_level + 5)) {
-                       kern_memorystatus_level = percent_avail;
-                       thread_wakeup((event_t)&kern_memorystatus_wakeup);
-               }
+                       VM_CHECK_MEMORYSTATUS;
                }
-#endif
        }
 }
 
@@ -2562,6 +2842,11 @@ vm_page_wire(
        lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
 #endif
        if ( !VM_PAGE_WIRED(mem)) {
+
+               if (mem->pageout_queue) {
+                       mem->pageout = FALSE;
+                       vm_pageout_throttle_up(mem);
+               }
                VM_PAGE_QUEUES_REMOVE(mem);
 
                if (mem->object) {
@@ -2573,6 +2858,25 @@ vm_page_wire(
                                OSAddAtomic(-1, &vm_page_purgeable_count);
                                OSAddAtomic(1, &vm_page_purgeable_wired_count);
                        }
+                       if ((mem->object->purgable == VM_PURGABLE_VOLATILE ||
+                            mem->object->purgable == VM_PURGABLE_EMPTY) &&
+                           mem->object->vo_purgeable_owner != TASK_NULL) {
+                               task_t owner;
+
+                               owner = mem->object->vo_purgeable_owner;
+                               /* less volatile bytes */
+                               ledger_debit(owner->ledger,
+                                            task_ledgers.purgeable_volatile,
+                                            PAGE_SIZE);
+                               /* more not-quite-volatile bytes */
+                               ledger_credit(owner->ledger,
+                                             task_ledgers.purgeable_nonvolatile,
+                                             PAGE_SIZE);
+                               /* more footprint */
+                               ledger_credit(owner->ledger,
+                                             task_ledgers.phys_footprint,
+                                             PAGE_SIZE);
+                       }
                        if (mem->object->all_reusable) {
                                /*
                                 * Wired pages are not counted as "re-usable"
@@ -2598,28 +2902,9 @@ vm_page_wire(
                if (mem->gobbled)
                        vm_page_gobble_count--;
                mem->gobbled = FALSE;
-               if (mem->zero_fill == TRUE) {
-                       mem->zero_fill = FALSE;
-                       VM_ZF_COUNT_DECR();
-               }
-#if CONFIG_EMBEDDED
-               {
-               int     percent_avail;
 
-               /*
-                * Decide if we need to poke the memorystatus notification thread.
-                */
-               percent_avail = 
-                       (vm_page_active_count + vm_page_inactive_count + 
-                        vm_page_speculative_count + vm_page_free_count +
-                        (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
-                       atop_64(max_mem);
-               if (percent_avail <= (kern_memorystatus_level - 5)) {
-                       kern_memorystatus_level = percent_avail;
-                       thread_wakeup((event_t)&kern_memorystatus_wakeup);
-               }
-               }
-#endif
+               VM_CHECK_MEMORYSTATUS;
+               
                /* 
                 * ENCRYPTED SWAP:
                 * The page could be encrypted, but
@@ -2671,7 +2956,8 @@ vm_page_gobble(
  */
 void
 vm_page_unwire(
-       register vm_page_t      mem)
+       vm_page_t       mem,
+       boolean_t       queueit)
 {
 
 //     dbgLog(current_thread(), mem->offset, mem->object, 0);  /* (TEST/DEBUG) */
@@ -2695,32 +2981,38 @@ vm_page_unwire(
                        assert(vm_page_purgeable_wired_count > 0);
                        OSAddAtomic(-1, &vm_page_purgeable_wired_count);
                }
-               assert(!mem->laundry);
+               if ((mem->object->purgable == VM_PURGABLE_VOLATILE ||
+                    mem->object->purgable == VM_PURGABLE_EMPTY) &&
+                   mem->object->vo_purgeable_owner != TASK_NULL) {
+                       task_t owner;
+
+                       owner = mem->object->vo_purgeable_owner;
+                       /* more volatile bytes */
+                       ledger_credit(owner->ledger,
+                                     task_ledgers.purgeable_volatile,
+                                     PAGE_SIZE);
+                       /* less not-quite-volatile bytes */
+                       ledger_debit(owner->ledger,
+                                    task_ledgers.purgeable_nonvolatile,
+                                    PAGE_SIZE);
+                       /* less footprint */
+                       ledger_debit(owner->ledger,
+                                    task_ledgers.phys_footprint,
+                                    PAGE_SIZE);
+               }
                assert(mem->object != kernel_object);
                assert(mem->pageq.next == NULL && mem->pageq.prev == NULL);
-               if (mem->object->purgable == VM_PURGABLE_EMPTY) {
-                       vm_page_deactivate(mem);
-               } else {
-                       vm_page_activate(mem);
-               }
-#if CONFIG_EMBEDDED
-               {
-               int     percent_avail;
 
-               /*
-                * Decide if we need to poke the memorystatus notification thread.
-                */
-               percent_avail = 
-                       (vm_page_active_count + vm_page_inactive_count + 
-                        vm_page_speculative_count + vm_page_free_count +
-                        (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
-                       atop_64(max_mem);
-               if (percent_avail >= (kern_memorystatus_level + 5)) {
-                       kern_memorystatus_level = percent_avail;
-                       thread_wakeup((event_t)&kern_memorystatus_wakeup);
-               }
+               if (queueit == TRUE) {
+                       if (mem->object->purgable == VM_PURGABLE_EMPTY) {
+                               vm_page_deactivate(mem);
+                       } else {
+                               vm_page_activate(mem);
+                       }
                }
-#endif
+
+               VM_CHECK_MEMORYSTATUS;
+               
        }
        VM_PAGE_CHECK(mem);
 }
@@ -2763,6 +3055,8 @@ vm_page_deactivate_internal(
         *      inactive queue.  Note wired pages should not have
         *      their reference bit cleared.
         */
+       assert ( !(m->absent && !m->unusual));
+
        if (m->gobbled) {               /* can this happen? */
                assert( !VM_PAGE_WIRED(m));
 
@@ -2771,10 +3065,18 @@ vm_page_deactivate_internal(
                vm_page_gobble_count--;
                m->gobbled = FALSE;
        }
-       if (m->private || (VM_PAGE_WIRED(m)))
+       /*
+        * if this page is currently on the pageout queue, we can't do the
+        * VM_PAGE_QUEUES_REMOVE (which doesn't handle the pageout queue case)
+        * and we can't remove it manually since we would need the object lock
+        * (which is not required here) to decrement the activity_in_progress
+        * reference which is held on the object while the page is in the pageout queue...
+        * just let the normal laundry processing proceed
+        */
+       if (m->laundry || m->pageout_queue || m->private || m->fictitious || m->compressor || (VM_PAGE_WIRED(m)))
                return;
 
-       if (!m->fictitious && !m->absent && clear_hw_reference == TRUE)
+       if (!m->absent && clear_hw_reference == TRUE)
                pmap_clear_reference(m->phys_page);
 
        m->reference = FALSE;
@@ -2783,10 +3085,7 @@ vm_page_deactivate_internal(
        if (!m->inactive) {
                VM_PAGE_QUEUES_REMOVE(m);
 
-               assert(!m->laundry);
-               assert(m->pageq.next == NULL && m->pageq.prev == NULL);
-
-               if (!IP_VALID(memory_manager_default) &&
+               if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) &&
                    m->dirty && m->object->internal &&
                    (m->object->purgable == VM_PURGABLE_DENY ||
                     m->object->purgable == VM_PURGABLE_NONVOLATILE ||
@@ -2795,29 +3094,71 @@ vm_page_deactivate_internal(
                        m->throttled = TRUE;
                        vm_page_throttled_count++;
                } else {
-                       if (!m->fictitious && m->object->named && m->object->ref_count == 1) {
+                       if (m->object->named && m->object->ref_count == 1) {
                                vm_page_speculate(m, FALSE);
 #if DEVELOPMENT || DEBUG
                                vm_page_speculative_recreated++;
 #endif
-                               return;
                        } else {
-                               if (m->zero_fill) {
-                                       queue_enter(&vm_page_queue_zf, m, vm_page_t, pageq);
-                                       vm_zf_queue_count++;
-                               } else {
-                                       queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq);
-                               }
-                       }
-                       m->inactive = TRUE;
-                       if (!m->fictitious) {
-                               vm_page_inactive_count++;
-                               token_new_pagecount++;
+                               VM_PAGE_ENQUEUE_INACTIVE(m, FALSE);
                        }
                }
        }
 }
 
+/*
+ * vm_page_enqueue_cleaned
+ *
+ * Put the page on the cleaned queue, mark it cleaned, etc.
+ * Being on the cleaned queue (and having m->clean_queue set)
+ * does ** NOT ** guarantee that the page is clean!
+ *
+ * Call with the queues lock held.
+ */
+
+void vm_page_enqueue_cleaned(vm_page_t m)
+{
+       assert(m->phys_page != vm_page_guard_addr);
+#if DEBUG
+       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+#endif
+       assert( !(m->absent && !m->unusual));
+
+       if (m->gobbled) {
+               assert( !VM_PAGE_WIRED(m));
+               if (!m->private && !m->fictitious)
+                       vm_page_wire_count--;
+               vm_page_gobble_count--;
+               m->gobbled = FALSE;
+       }
+       /*
+        * if this page is currently on the pageout queue, we can't do the
+        * VM_PAGE_QUEUES_REMOVE (which doesn't handle the pageout queue case)
+        * and we can't remove it manually since we would need the object lock
+        * (which is not required here) to decrement the activity_in_progress
+        * reference which is held on the object while the page is in the pageout queue...
+        * just let the normal laundry processing proceed
+        */
+       if (m->laundry || m->clean_queue || m->pageout_queue || m->private || m->fictitious)
+               return;
+
+       VM_PAGE_QUEUES_REMOVE(m);
+
+       queue_enter(&vm_page_queue_cleaned, m, vm_page_t, pageq);
+       m->clean_queue = TRUE;
+       vm_page_cleaned_count++;
+
+       m->inactive = TRUE;
+       vm_page_inactive_count++;
+       if (m->object->internal) {
+               vm_page_pageable_internal_count++;
+       } else {
+               vm_page_pageable_external_count++;
+       }
+
+       vm_pageout_enqueued_cleaned++;
+}
+
 /*
  *     vm_page_activate:
  *
@@ -2838,6 +3179,8 @@ vm_page_activate(
 #if DEBUG
        lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
 #endif
+       assert( !(m->absent && !m->unusual));
+
        if (m->gobbled) {
                assert( !VM_PAGE_WIRED(m));
                if (!m->private && !m->fictitious)
@@ -2845,7 +3188,15 @@ vm_page_activate(
                vm_page_gobble_count--;
                m->gobbled = FALSE;
        }
-       if (m->private)
+       /*
+        * if this page is currently on the pageout queue, we can't do the
+        * VM_PAGE_QUEUES_REMOVE (which doesn't handle the pageout queue case)
+        * and we can't remove it manually since we would need the object lock
+        * (which is not required here) to decrement the activity_in_progress
+        * reference which is held on the object while the page is in the pageout queue...
+        * just let the normal laundry processing proceed
+        */
+       if (m->laundry || m->pageout_queue || m->private || m->fictitious || m->compressor)
                return;
 
 #if DEBUG
@@ -2857,14 +3208,13 @@ vm_page_activate(
                DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
                DTRACE_VM2(pgfrec, int, 1, (uint64_t *), NULL);
        }
-
+       
        VM_PAGE_QUEUES_REMOVE(m);
 
        if ( !VM_PAGE_WIRED(m)) {
-               assert(!m->laundry);
-               assert(m->pageq.next == NULL && m->pageq.prev == NULL);
-               if (!IP_VALID(memory_manager_default) && 
-                   !m->fictitious && m->dirty && m->object->internal && 
+
+               if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) && 
+                   m->dirty && m->object->internal && 
                    (m->object->purgable == VM_PURGABLE_DENY ||
                     m->object->purgable == VM_PURGABLE_NONVOLATILE ||
                     m->object->purgable == VM_PURGABLE_VOLATILE)) {
@@ -2874,8 +3224,12 @@ vm_page_activate(
                } else {
                        queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
                        m->active = TRUE;
-                       if (!m->fictitious)
-                               vm_page_active_count++;
+                       vm_page_active_count++;
+                       if (m->object->internal) {
+                               vm_page_pageable_internal_count++;
+                       } else {
+                               vm_page_pageable_external_count++;
+                       }
                }
                m->reference = TRUE;
                m->no_cache = FALSE;
@@ -2904,6 +3258,18 @@ vm_page_speculate(
 #if DEBUG
        lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
 #endif
+       assert( !(m->absent && !m->unusual));
+
+       /*
+        * if this page is currently on the pageout queue, we can't do the
+        * VM_PAGE_QUEUES_REMOVE (which doesn't handle the pageout queue case)
+        * and we can't remove it manually since we would need the object lock
+        * (which is not required here) to decrement the activity_in_progress
+        * reference which is held on the object while the page is in the pageout queue...
+        * just let the normal laundry processing proceed
+        */
+       if (m->laundry || m->pageout_queue || m->private || m->fictitious || m->compressor)
+               return;
 
        VM_PAGE_QUEUES_REMOVE(m);               
 
@@ -2926,8 +3292,8 @@ vm_page_speculate(
                        /*
                         * set the timer to begin a new group
                         */
-                       aq->age_ts.tv_sec = VM_PAGE_SPECULATIVE_Q_AGE_MS / 1000;
-                       aq->age_ts.tv_nsec = (VM_PAGE_SPECULATIVE_Q_AGE_MS % 1000) * 1000 * NSEC_PER_USEC;
+                       aq->age_ts.tv_sec = vm_page_speculative_q_age_ms / 1000;
+                       aq->age_ts.tv_nsec = (vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
 
                        ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
                } else {
@@ -2950,8 +3316,8 @@ vm_page_speculate(
                                if (!queue_empty(&aq->age_q))
                                        vm_page_speculate_ageit(aq);
 
-                               aq->age_ts.tv_sec = VM_PAGE_SPECULATIVE_Q_AGE_MS / 1000;
-                               aq->age_ts.tv_nsec = (VM_PAGE_SPECULATIVE_Q_AGE_MS % 1000) * 1000 * NSEC_PER_USEC;
+                               aq->age_ts.tv_sec = vm_page_speculative_q_age_ms / 1000;
+                               aq->age_ts.tv_nsec = (vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
 
                                ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
                        }
@@ -2959,8 +3325,15 @@ vm_page_speculate(
                enqueue_tail(&aq->age_q, &m->pageq);
                m->speculative = TRUE;
                vm_page_speculative_count++;
+               if (m->object->internal) {
+                       vm_page_pageable_internal_count++;
+               } else {
+                       vm_page_pageable_external_count++;
+               }
 
                if (new == TRUE) {
+                       vm_object_lock_assert_exclusive(m->object);
+
                        m->object->pages_created++;
 #if DEVELOPMENT || DEBUG
                        vm_page_speculative_created++;
@@ -3021,24 +3394,22 @@ vm_page_lru(
 #if DEBUG
        lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
 #endif
-       if (m->active || m->reference)
-               return;
-
-       if (m->private || (VM_PAGE_WIRED(m)))
+       /*
+        * if this page is currently on the pageout queue, we can't do the
+        * VM_PAGE_QUEUES_REMOVE (which doesn't handle the pageout queue case)
+        * and we can't remove it manually since we would need the object lock
+        * (which is not required here) to decrement the activity_in_progress
+        * reference which is held on the object while the page is in the pageout queue...
+        * just let the normal laundry processing proceed
+        */
+       if (m->laundry || m->pageout_queue || m->private || m->compressor || (VM_PAGE_WIRED(m)))
                return;
 
        m->no_cache = FALSE;
 
        VM_PAGE_QUEUES_REMOVE(m);
 
-       assert(!m->laundry);
-       assert(m->pageq.next == NULL && m->pageq.prev == NULL);
-
-       queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq);
-       m->inactive = TRUE;
-
-        vm_page_inactive_count++;
-       token_new_pagecount++;
+       VM_PAGE_ENQUEUE_INACTIVE(m, FALSE);
 }
 
 
@@ -3049,8 +3420,14 @@ vm_page_reactivate_all_throttled(void)
        vm_page_t       first_active;
        vm_page_t       m;
        int             extra_active_count;
+       int             extra_internal_count, extra_external_count;
+
+       if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default))
+               return;
 
        extra_active_count = 0;
+       extra_internal_count = 0;
+       extra_external_count = 0;
        vm_page_lock_queues();
        if (! queue_empty(&vm_page_queue_throttled)) {
                /*
@@ -3063,9 +3440,14 @@ vm_page_reactivate_all_throttled(void)
                        assert(!m->inactive);
                        assert(!m->speculative);
                        assert(!VM_PAGE_WIRED(m));
-                       if (!m->fictitious) {
-                               extra_active_count++;
+
+                       extra_active_count++;
+                       if (m->object->internal) {
+                               extra_internal_count++;
+                       } else {
+                               extra_external_count++;
                        }
+
                        m->throttled = FALSE;
                        m->active = TRUE;
                        VM_PAGE_CHECK(m);
@@ -3097,6 +3479,8 @@ vm_page_reactivate_all_throttled(void)
                 * Adjust the global page counts.
                 */
                vm_page_active_count += extra_active_count;
+               vm_page_pageable_internal_count += extra_internal_count;
+               vm_page_pageable_external_count += extra_external_count;
                vm_page_throttled_count = 0;
        }
        assert(vm_page_throttled_count == 0);
@@ -3185,7 +3569,11 @@ vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks)
                 * Adjust the global page counts.
                 */
                vm_page_active_count += lq->vpl_count;
+               vm_page_pageable_internal_count += lq->vpl_internal_count;
+               vm_page_pageable_external_count += lq->vpl_external_count;
                lq->vpl_count = 0;
+               lq->vpl_internal_count = 0;
+               lq->vpl_external_count = 0;
        }
        assert(queue_empty(&lq->vpl_queue));
 
@@ -3200,18 +3588,26 @@ vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks)
  *
  *     Zero-fill a part of the page.
  */
+#define PMAP_ZERO_PART_PAGE_IMPLEMENTED
 void
 vm_page_part_zero_fill(
        vm_page_t       m,
        vm_offset_t     m_pa,
        vm_size_t       len)
 {
-       vm_page_t       tmp;
 
+#if 0
+       /*
+        * we don't hold the page queue lock
+        * so this check isn't safe to make
+        */
        VM_PAGE_CHECK(m);
+#endif
+
 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
        pmap_zero_part_page(m->phys_page, m_pa, len);
 #else
+       vm_page_t       tmp;
        while (1) {
                        tmp = vm_page_grab();
                if (tmp == VM_PAGE_NULL) {
@@ -3246,8 +3642,13 @@ vm_page_zero_fill(
         XPR(XPR_VM_PAGE,
                 "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n",
                 m->object, m->offset, m, 0,0);
-
+#if 0
+       /*
+        * we don't hold the page queue lock
+        * so this check isn't safe to make
+        */
        VM_PAGE_CHECK(m);
+#endif
 
 //     dbgTrace(0xAEAEAEAE, m->phys_page, 0);          /* (BRINGUP) */
        pmap_zero_page(m->phys_page);
@@ -3267,9 +3668,14 @@ vm_page_part_copy(
        vm_offset_t     dst_pa,
        vm_size_t       len)
 {
+#if 0
+       /*
+        * we don't hold the page queue lock
+        * so this check isn't safe to make
+        */
        VM_PAGE_CHECK(src_m);
        VM_PAGE_CHECK(dst_m);
-
+#endif
        pmap_copy_part_page(src_m->phys_page, src_pa,
                        dst_m->phys_page, dst_pa, len);
 }
@@ -3297,9 +3703,15 @@ vm_page_copy(
         src_m->object, src_m->offset, 
        dest_m->object, dest_m->offset,
        0);
-
+#if 0
+       /*
+        * we don't hold the page queue lock
+        * so this check isn't safe to make
+        */
        VM_PAGE_CHECK(src_m);
        VM_PAGE_CHECK(dest_m);
+#endif
+       vm_object_lock_assert_held(src_m->object);
 
        /*
         * ENCRYPTED SWAP:
@@ -3323,6 +3735,17 @@ vm_page_copy(
                vm_page_copy_cs_validations++;
                vm_page_validate_cs(src_m);
        }
+
+       if (vm_page_is_slideable(src_m)) {
+               boolean_t was_busy = src_m->busy;
+               src_m->busy = TRUE;
+               (void) vm_page_slide(src_m, 0);
+               assert(src_m->busy);
+               if (!was_busy) {
+                       PAGE_WAKEUP_DONE(src_m);
+               }
+       }
+
        /*
         * Propagate the cs_tainted bit to the copy page. Do not propagate
         * the cs_validated bit.
@@ -3331,7 +3754,8 @@ vm_page_copy(
        if (dest_m->cs_tainted) {
                vm_page_copy_cs_tainted++;
        }
-
+       dest_m->slid = src_m->slid;
+       dest_m->error = src_m->error; /* sliding src_m might have failed... */
        pmap_copy_page(src_m->phys_page, dest_m->phys_page);
 }
 
@@ -3343,7 +3767,7 @@ _vm_page_print(
        printf("vm_page %p: \n", p);
        printf("  pageq: next=%p prev=%p\n", p->pageq.next, p->pageq.prev);
        printf("  listq: next=%p prev=%p\n", p->listq.next, p->listq.prev);
-       printf("  next=%p\n", p->next);
+       printf("  next=%p\n", VM_PAGE_UNPACK_PTR(p->next_m));
        printf("  object=%p offset=0x%llx\n", p->object, p->offset);
        printf("  wire_count=%u\n", p->wire_count);
 
@@ -3381,14 +3805,10 @@ _vm_page_print(
               (p->unusual ? "" : "!"),
               (p->encrypted ? "" : "!"),
               (p->encrypted_cleaning ? "" : "!"));
-       printf("  %slist_req_pending, %sdump_cleaning, %scs_validated, %scs_tainted, %sno_cache\n",
-              (p->list_req_pending ? "" : "!"),
-              (p->dump_cleaning ? "" : "!"),
+       printf("  %scs_validated, %scs_tainted, %sno_cache\n",
               (p->cs_validated ? "" : "!"),
               (p->cs_tainted ? "" : "!"),
               (p->no_cache ? "" : "!"));
-       printf("  %szero_fill\n",
-              (p->zero_fill ? "" : "!"));
 
        printf("phys_page=0x%x\n", p->phys_page);
 }
@@ -3412,7 +3832,7 @@ vm_page_verify_contiguous(
                if (m->phys_page != prev_addr + 1) {
                        printf("m %p prev_addr 0x%lx, current addr 0x%x\n",
                               m, (long)prev_addr, m->phys_page);
-                       printf("pages %p page_count %d\n", pages, page_count);
+                       printf("pages %p page_count %d npages %d\n", pages, page_count, npages);
                        panic("vm_page_verify_contiguous:  not contiguous!");
                }
                prev_addr = m->phys_page;
@@ -3430,6 +3850,7 @@ vm_page_verify_contiguous(
 /*
  *     Check the free lists for proper length etc.
  */
+static boolean_t vm_page_verify_this_free_list_enabled = FALSE;
 static unsigned int
 vm_page_verify_free_list(
        queue_head_t    *vm_page_queue,
@@ -3442,6 +3863,9 @@ vm_page_verify_free_list(
        vm_page_t       prev_m;
        boolean_t       found_page;
 
+       if (! vm_page_verify_this_free_list_enabled)
+               return 0;
+
        found_page = FALSE;
        npages = 0;
        prev_m = (vm_page_t) vm_page_queue;
@@ -3449,21 +3873,24 @@ vm_page_verify_free_list(
                      m,
                      vm_page_t,
                      pageq) {
+
                if (m == look_for_page) {
                        found_page = TRUE;
                }
                if ((vm_page_t) m->pageq.prev != prev_m)
                        panic("vm_page_verify_free_list(color=%u, npages=%u): page %p corrupted prev ptr %p instead of %p\n",
                              color, npages, m, m->pageq.prev, prev_m);
-               if ( ! m->free )
-                       panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not free\n",
-                             color, npages, m);
                if ( ! m->busy )
                        panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not busy\n",
                              color, npages, m);
-               if ( color != (unsigned int) -1 && (m->phys_page & vm_color_mask) != color)
-                       panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u\n",
-                             color, npages, m, m->phys_page & vm_color_mask, color);
+               if (color != (unsigned int) -1) {
+                       if ((m->phys_page & vm_color_mask) != color)
+                               panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u\n",
+                                     color, npages, m, m->phys_page & vm_color_mask, color);
+                       if ( ! m->free )
+                               panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not free\n",
+                                     color, npages, m);
+               }
                ++npages;
                prev_m = m;
        }
@@ -3480,13 +3907,12 @@ vm_page_verify_free_list(
                                if (other_color == color)
                                        continue;
                                vm_page_verify_free_list(&vm_page_queue_free[other_color],
-                                                       other_color, look_for_page, FALSE);
+                                                        other_color, look_for_page, FALSE);
                        }
-                       if (color != (unsigned int) -1) {
+                       if (color == (unsigned int) -1) {
                                vm_page_verify_free_list(&vm_lopage_queue_free,
                                                         (unsigned int) -1, look_for_page, FALSE);
                        }
-
                        panic("vm_page_verify_free_list(color=%u)\n", color);
                }
                if (!expect_page && found_page) {
@@ -3497,24 +3923,37 @@ vm_page_verify_free_list(
        return npages;
 }
 
-static boolean_t vm_page_verify_free_lists_enabled = FALSE;
+static boolean_t vm_page_verify_all_free_lists_enabled = FALSE;
 static void
 vm_page_verify_free_lists( void )
 {
        unsigned int    color, npages, nlopages;
+       boolean_t       toggle = TRUE;
 
-       if (! vm_page_verify_free_lists_enabled)
+       if (! vm_page_verify_all_free_lists_enabled)
                return;
 
        npages = 0;
 
        lck_mtx_lock(&vm_page_queue_free_lock);
+       
+       if (vm_page_verify_this_free_list_enabled == TRUE) {
+               /*
+                * This variable has been set globally for extra checking of
+                * each free list Q. Since we didn't set it, we don't own it
+                * and we shouldn't toggle it.
+                */
+               toggle = FALSE;
+       }
+
+       if (toggle == TRUE) {
+               vm_page_verify_this_free_list_enabled = TRUE;
+       }
 
        for( color = 0; color < vm_colors; color++ ) {
                npages += vm_page_verify_free_list(&vm_page_queue_free[color],
-                                               color, VM_PAGE_NULL, FALSE);
+                                                  color, VM_PAGE_NULL, FALSE);
        }
-
        nlopages = vm_page_verify_free_list(&vm_lopage_queue_free,
                                            (unsigned int) -1,
                                            VM_PAGE_NULL, FALSE);
@@ -3522,6 +3961,11 @@ vm_page_verify_free_lists( void )
                panic("vm_page_verify_free_lists:  "
                      "npages %u free_count %d nlopages %u lo_free_count %u",
                      npages, vm_page_free_count, nlopages, vm_lopage_free_count);
+
+       if (toggle == TRUE) {
+               vm_page_verify_this_free_list_enabled = FALSE;
+       }
+
        lck_mtx_unlock(&vm_page_queue_free_lock);
 }
 
@@ -3530,6 +3974,9 @@ vm_page_queues_assert(
        vm_page_t       mem,
        int             val)
 {
+#if DEBUG
+       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+#endif
        if (mem->free + mem->active + mem->inactive + mem->speculative +
            mem->throttled + mem->pageout_queue > (val)) {
                _vm_page_print(mem);
@@ -3540,6 +3987,7 @@ vm_page_queues_assert(
                assert(!mem->inactive);
                assert(!mem->speculative);
                assert(!mem->throttled);
+               assert(!mem->pageout_queue);
        }
 }
 #endif /* MACH_ASSERT */
@@ -3636,6 +4084,7 @@ vm_page_find_contiguous(
        int             yielded = 0;
        int             dumped_run = 0;
        int             stolen_pages = 0;
+       int             compressed_pages = 0;
 #endif
 
        if (contig_pages == 0)
@@ -3647,6 +4096,8 @@ vm_page_find_contiguous(
 #if DEBUG
        clock_get_system_microtime(&tv_start_sec, &tv_start_usec);
 #endif
+       PAGE_REPLACEMENT_ALLOWED(TRUE);
+
        vm_page_lock_queues();
        lck_mtx_lock(&vm_page_queue_free_lock);
 
@@ -3690,27 +4141,16 @@ retry:
                        /* no more low pages... */
                        break;
                }
-               if ( !(flags & KMA_LOMEM) && m->phys_page <= vm_lopage_poolend &&
-                   m->phys_page >= vm_lopage_poolstart) {
-                       /*
-                        * don't want to take pages from our
-                        * reserved pool of low memory
-                        * so don't consider it which
-                        * means starting a new run
-                        */
-                       RESET_STATE_OF_RUN();
-
-               } else if (!npages && ((m->phys_page & pnum_mask) != 0)) {
+               if (!npages & ((m->phys_page & pnum_mask) != 0)) {
                        /*
                         * not aligned
                         */
                        RESET_STATE_OF_RUN();
 
                } else if (VM_PAGE_WIRED(m) || m->gobbled ||
-                          m->encrypted || m->encrypted_cleaning || m->cs_validated || m->cs_tainted ||
-                          m->error || m->absent || m->pageout_queue || m->laundry || m->wanted || m->precious ||
-                          m->cleaning || m->overwriting || m->restart || m->unusual || m->list_req_pending ||
-                          m->pageout) {
+                          m->encrypted_cleaning ||
+                          m->pageout_queue || m->laundry || m->wanted ||
+                          m->cleaning || m->overwriting || m->pageout) {
                        /*
                         * page is in a transient state
                         * or a state we don't want to deal
@@ -3719,9 +4159,10 @@ retry:
                         */
                        RESET_STATE_OF_RUN();
 
-               } else if (!m->free && !m->active && !m->inactive && !m->speculative && !m->throttled) {
+               } else if (!m->free && !m->active && !m->inactive && !m->speculative && !m->throttled && !m->compressor) {
                        /*
                         * page needs to be on one of our queues
+                        * or it needs to belong to the compressor pool
                         * in order for it to be stable behind the
                         * locks we hold at this point...
                         * if not, don't consider it which
@@ -3772,7 +4213,7 @@ retry:
                                 * into a substitute page.
                                 */
 #if VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
-                               if (m->pmapped || m->dirty) {
+                               if (m->pmapped || m->dirty || m->precious) {
                                        substitute_needed++;
                                }
 #else
@@ -3807,12 +4248,16 @@ retry:
                }
 did_consider:
                if (considered > MAX_CONSIDERED_BEFORE_YIELD && npages <= 1) {
-                       
+
+                       PAGE_REPLACEMENT_ALLOWED(FALSE);
+
                        lck_mtx_unlock(&vm_page_queue_free_lock);
                        vm_page_unlock_queues();
 
                        mutex_pause(0);
 
+                       PAGE_REPLACEMENT_ALLOWED(TRUE);
+
                        vm_page_lock_queues();
                        lck_mtx_lock(&vm_page_queue_free_lock);
 
@@ -3880,46 +4325,21 @@ did_consider:
 #endif
 
                        if (m1->free) {
-                               if (  m1->phys_page <= vm_lopage_poolend &&
-                                           m1->phys_page >= vm_lopage_poolstart) {
+                               unsigned int color;
 
-                                       assert( flags & KMA_LOMEM );
+                               color = m1->phys_page & vm_color_mask;
 #if MACH_ASSERT
-                                       vm_page_verify_free_list(&vm_lopage_queue_free,
-                                                               (unsigned int) -1, m1, TRUE);
+                               vm_page_verify_free_list(&vm_page_queue_free[color], color, m1, TRUE);
 #endif
-                                       queue_remove(&vm_lopage_queue_free,
-                                                    m1,
-                                                    vm_page_t,
-                                                    pageq);
-                                       vm_lopage_free_count--;
-
-#if MACH_ASSERT
-                                       vm_page_verify_free_list(&vm_lopage_queue_free,
-                                                               (unsigned int) -1, VM_PAGE_NULL, FALSE);
-#endif
-                               } else {
-
-                                       unsigned int color;
-
-                                       color = m1->phys_page & vm_color_mask;
-#if MACH_ASSERT
-                                       vm_page_verify_free_list(&vm_page_queue_free[color],
-                                                               color, m1, TRUE);
-#endif
-                                       queue_remove(&vm_page_queue_free[color],
-                                                    m1,
-                                                    vm_page_t,
-                                                    pageq);
-                                       vm_page_free_count--;
-#if MACH_ASSERT
-                                       vm_page_verify_free_list(&vm_page_queue_free[color],
-                                                               color, VM_PAGE_NULL, FALSE);
-#endif
-                               }
-
+                               queue_remove(&vm_page_queue_free[color],
+                                            m1,
+                                            vm_page_t,
+                                            pageq);
                                m1->pageq.next = NULL;
                                m1->pageq.prev = NULL;
+#if MACH_ASSERT
+                               vm_page_verify_free_list(&vm_page_queue_free[color], color, VM_PAGE_NULL, FALSE);
+#endif
                                /*
                                 * Clear the "free" bit so that this page
                                 * does not get considered for another
@@ -3927,14 +4347,10 @@ did_consider:
                                 */
                                m1->free = FALSE; 
                                assert(m1->busy);
+
+                               vm_page_free_count--;
                        }
                }
-               /*
-                * adjust global freelist counts
-                */
-               if (vm_page_free_count < vm_page_free_count_minimum)
-                       vm_page_free_count_minimum = vm_page_free_count;
-
                if( flags & KMA_LOMEM)
                        vm_page_lomem_find_contiguous_last_idx = page_idx;
                else 
@@ -3960,6 +4376,7 @@ did_consider:
                        m1 = &vm_pages[cur_idx--];
 
                        assert(!m1->free);
+
                        if (m1->object == VM_OBJECT_NULL) {
                                /*
                                 * page has already been removed from
@@ -3971,6 +4388,8 @@ did_consider:
                                assert(!m1->laundry);
                        } else {
                                vm_object_t object;
+                               int refmod;
+                               boolean_t disconnected, reusable;
 
                                if (abort_run == TRUE)
                                        continue;
@@ -3987,9 +4406,9 @@ did_consider:
                                }
                                if (locked_object == VM_OBJECT_NULL || 
                                    (VM_PAGE_WIRED(m1) || m1->gobbled ||
-                                    m1->encrypted || m1->encrypted_cleaning || m1->cs_validated || m1->cs_tainted ||
-                                    m1->error || m1->absent || m1->pageout_queue || m1->laundry || m1->wanted || m1->precious ||
-                                    m1->cleaning || m1->overwriting || m1->restart || m1->unusual || m1->list_req_pending || m1->busy)) {
+                                    m1->encrypted_cleaning ||
+                                    m1->pageout_queue || m1->laundry || m1->wanted ||
+                                    m1->cleaning || m1->overwriting || m1->pageout || m1->busy)) {
 
                                        if (locked_object) {
                                                vm_object_unlock(locked_object);
@@ -3999,8 +4418,31 @@ did_consider:
                                        abort_run = TRUE;
                                        continue;
                                }
-                               if (m1->pmapped || m1->dirty) {
-                                       int refmod;
+
+                               disconnected = FALSE;
+                               reusable = FALSE;
+
+                               if ((m1->reusable ||
+                                    m1->object->all_reusable) &&
+                                   m1->inactive &&
+                                   !m1->dirty &&
+                                   !m1->reference) {
+                                       /* reusable page... */
+                                       refmod = pmap_disconnect(m1->phys_page);
+                                       disconnected = TRUE;
+                                       if (refmod == 0) {
+                                               /*
+                                                * ... not reused: can steal
+                                                * without relocating contents.
+                                                */
+                                               reusable = TRUE;
+                                       }
+                               }
+
+                               if ((m1->pmapped &&
+                                    ! reusable) ||
+                                   m1->dirty ||
+                                   m1->precious) {
                                        vm_object_offset_t offset;
 
                                        m2 = vm_page_grab();
@@ -4014,19 +4456,80 @@ did_consider:
                                                abort_run = TRUE;
                                                continue;
                                        }
-                                       if (m1->pmapped)
-                                               refmod = pmap_disconnect(m1->phys_page);
-                                       else
-                                               refmod = 0;
-                                       vm_page_copy(m1, m2);
-                 
-                                       m2->reference = m1->reference;
-                                       m2->dirty     = m1->dirty;
+                                       if (! disconnected) {
+                                               if (m1->pmapped)
+                                                       refmod = pmap_disconnect(m1->phys_page);
+                                               else
+                                                       refmod = 0;
+                                       }
+
+                                       /* copy the page's contents */
+                                       pmap_copy_page(m1->phys_page, m2->phys_page);
+                                       /* copy the page's state */
+                                       assert(!VM_PAGE_WIRED(m1));
+                                       assert(!m1->free);
+                                       assert(!m1->pageout_queue);
+                                       assert(!m1->laundry);
+                                       m2->reference   = m1->reference;
+                                       assert(!m1->gobbled);
+                                       assert(!m1->private);
+                                       m2->no_cache    = m1->no_cache;
+                                       m2->xpmapped    = 0;
+                                       assert(!m1->busy);
+                                       assert(!m1->wanted);
+                                       assert(!m1->fictitious);
+                                       m2->pmapped     = m1->pmapped; /* should flush cache ? */
+                                       m2->wpmapped    = m1->wpmapped;
+                                       assert(!m1->pageout);
+                                       m2->absent      = m1->absent;
+                                       m2->error       = m1->error;
+                                       m2->dirty       = m1->dirty;
+                                       assert(!m1->cleaning);
+                                       m2->precious    = m1->precious;
+                                       m2->clustered   = m1->clustered;
+                                       assert(!m1->overwriting);
+                                       m2->restart     = m1->restart;
+                                       m2->unusual     = m1->unusual;
+                                       m2->encrypted   = m1->encrypted;
+                                       assert(!m1->encrypted_cleaning);
+                                       m2->cs_validated = m1->cs_validated;
+                                       m2->cs_tainted  = m1->cs_tainted;
+
+                                       /*
+                                        * If m1 had really been reusable,
+                                        * we would have just stolen it, so
+                                        * let's not propagate it's "reusable"
+                                        * bit and assert that m2 is not
+                                        * marked as "reusable".
+                                        */
+                                       // m2->reusable = m1->reusable;
+                                       assert(!m2->reusable);
+
+                                       assert(!m1->lopage);
+                                       m2->slid        = m1->slid;
+                                       m2->compressor  = m1->compressor;
+
+                                       /*
+                                        * page may need to be flushed if
+                                        * it is marshalled into a UPL
+                                        * that is going to be used by a device
+                                        * that doesn't support coherency
+                                        */
+                                       m2->written_by_kernel = TRUE;
+
+                                       /*
+                                        * make sure we clear the ref/mod state
+                                        * from the pmap layer... else we risk
+                                        * inheriting state from the last time
+                                        * this page was used...
+                                        */
+                                       pmap_clear_refmod(m2->phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
 
                                        if (refmod & VM_MEM_REFERENCED)
                                                m2->reference = TRUE;
-                                       if (refmod & VM_MEM_MODIFIED)
-                                               m2->dirty = TRUE;
+                                       if (refmod & VM_MEM_MODIFIED) {
+                                               SET_PAGE_DIRTY(m2, TRUE);
+                                       }
                                        offset = m1->offset;
 
                                        /*
@@ -4039,25 +4542,31 @@ did_consider:
                                        vm_page_free_prepare(m1);
 
                                        /*
-                                        * make sure we clear the ref/mod state
-                                        * from the pmap layer... else we risk
-                                        * inheriting state from the last time
-                                        * this page was used...
-                                        */
-                                       pmap_clear_refmod(m2->phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
-                                       /*
-                                        * now put the substitute page on the object
+                                        * now put the substitute page
+                                        * on the object
                                         */
-                                       vm_page_insert_internal(m2, locked_object, offset, TRUE, TRUE);
+                                       vm_page_insert_internal(m2, locked_object, offset, TRUE, TRUE, FALSE);
 
-                                       if (m2->reference)
-                                               vm_page_activate(m2);
-                                       else
-                                               vm_page_deactivate(m2);
+                                       if (m2->compressor) {
+                                               m2->pmapped = TRUE;
+                                               m2->wpmapped = TRUE;
 
+                                               PMAP_ENTER(kernel_pmap, m2->offset, m2,
+                                                          VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE);
+#if MACH_ASSERT
+                                               compressed_pages++;
+#endif
+                                       } else {
+                                               if (m2->reference)
+                                                       vm_page_activate(m2);
+                                               else
+                                                       vm_page_deactivate(m2);
+                                       }
                                        PAGE_WAKEUP_DONE(m2);
 
                                } else {
+                                       assert(!m1->compressor);
+
                                        /*
                                         * completely cleans up the state
                                         * of the page so that it is ready
@@ -4144,6 +4653,8 @@ did_consider:
                assert(vm_page_verify_contiguous(m, npages));
        }
 done_scanning:
+       PAGE_REPLACEMENT_ALLOWED(FALSE);
+
        vm_page_unlock_queues();
 
 #if DEBUG
@@ -4160,10 +4671,10 @@ done_scanning:
                tv_end_sec -= 1000000;
        }
        if (vm_page_find_contig_debug) {
-               printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds...  started at %d... scanned %d pages...  yielded %d times...  dumped run %d times... stole %d pages\n",
-              __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
-              (long)tv_end_sec, tv_end_usec, orig_last_idx,
-              scanned, yielded, dumped_run, stolen_pages);
+               printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds...  started at %d...  scanned %d pages...  yielded %d times...  dumped run %d times... stole %d pages... stole %d compressed pages\n",
+                      __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
+                      (long)tv_end_sec, tv_end_usec, orig_last_idx,
+                      scanned, yielded, dumped_run, stolen_pages, compressed_pages);
        }
 
 #endif
@@ -4188,7 +4699,7 @@ cpm_allocate(
        vm_page_t               pages;
        unsigned int            npages;
 
-       if (size % page_size != 0)
+       if (size % PAGE_SIZE != 0)
                return KERN_INVALID_ARGUMENT;
 
        npages = (unsigned int) (size / PAGE_SIZE);
@@ -4210,28 +4721,12 @@ cpm_allocate(
         * determine need for wakeups
         */
        if ((vm_page_free_count < vm_page_free_min) ||
-           ((vm_page_free_count < vm_page_free_target) &&
-            ((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_min)))
-               thread_wakeup((event_t) &vm_page_free_wanted);
+            ((vm_page_free_count < vm_page_free_target) &&
+             ((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_min)))
+                thread_wakeup((event_t) &vm_page_free_wanted);
                
-#if CONFIG_EMBEDDED
-       {
-       int                     percent_avail;
-
-       /*
-        * Decide if we need to poke the memorystatus notification thread.
-        */
-       percent_avail = 
-               (vm_page_active_count + vm_page_inactive_count + 
-                vm_page_speculative_count + vm_page_free_count +
-                (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count)  ) * 100 /
-               atop_64(max_mem);
-       if (percent_avail <= (kern_memorystatus_level - 5)) {
-               kern_memorystatus_level = percent_avail;
-               thread_wakeup((event_t)&kern_memorystatus_wakeup);
-       }
-       }
-#endif
+       VM_CHECK_MEMORYSTATUS;
+       
        /*
         *      The CPM pages should now be available and
         *      ordered by ascending physical address.
@@ -4241,186 +4736,944 @@ cpm_allocate(
        *list = pages;
        return KERN_SUCCESS;
 }
-       
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
-#if HIBERNATION
 
-static vm_page_t hibernate_gobble_queue;
+unsigned int vm_max_delayed_work_limit = DEFAULT_DELAYED_WORK_LIMIT;
 
-static void
-hibernate_page_list_zero(hibernate_page_list_t *list)
+/*
+ * when working on a 'run' of pages, it is necessary to hold 
+ * the vm_page_queue_lock (a hot global lock) for certain operations
+ * on the page... however, the majority of the work can be done
+ * while merely holding the object lock... in fact there are certain
+ * collections of pages that don't require any work brokered by the
+ * vm_page_queue_lock... to mitigate the time spent behind the global
+ * lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
+ * while doing all of the work that doesn't require the vm_page_queue_lock...
+ * then call vm_page_do_delayed_work to acquire the vm_page_queue_lock and do the
+ * necessary work for each page... we will grab the busy bit on the page
+ * if it's not already held so that vm_page_do_delayed_work can drop the object lock
+ * if it can't immediately take the vm_page_queue_lock in order to compete
+ * for the locks in the same order that vm_pageout_scan takes them.
+ * the operation names are modeled after the names of the routines that
+ * need to be called in order to make the changes very obvious in the
+ * original loop
+ */
+
+void
+vm_page_do_delayed_work(
+       vm_object_t     object,
+       struct vm_page_delayed_work *dwp,
+       int             dw_count)
 {
-    uint32_t             bank;
-    hibernate_bitmap_t * bitmap;
+       int             j;
+       vm_page_t       m;
+        vm_page_t       local_free_q = VM_PAGE_NULL;
 
-    bitmap = &list->bank_bitmap[0];
-    for (bank = 0; bank < list->bank_count; bank++)
-    {
-        uint32_t last_bit;
+       /*
+        * pageout_scan takes the vm_page_lock_queues first
+        * then tries for the object lock... to avoid what
+        * is effectively a lock inversion, we'll go to the
+        * trouble of taking them in that same order... otherwise
+        * if this object contains the majority of the pages resident
+        * in the UBC (or a small set of large objects actively being
+        * worked on contain the majority of the pages), we could
+        * cause the pageout_scan thread to 'starve' in its attempt
+        * to find pages to move to the free queue, since it has to
+        * successfully acquire the object lock of any candidate page
+        * before it can steal/clean it.
+        */
+       if (!vm_page_trylockspin_queues()) {
+               vm_object_unlock(object);
 
-       bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2); 
-        // set out-of-bound bits at end of bitmap.
-        last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31);
-       if (last_bit)
-           bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit);
+               vm_page_lockspin_queues();
+
+               for (j = 0; ; j++) {
+                       if (!vm_object_lock_avoid(object) &&
+                           _vm_object_lock_try(object))
+                               break;
+                       vm_page_unlock_queues();
+                       mutex_pause(j);
+                       vm_page_lockspin_queues();
+               }
+       }
+       for (j = 0; j < dw_count; j++, dwp++) {
+
+               m = dwp->dw_m;
+
+               if (dwp->dw_mask & DW_vm_pageout_throttle_up)
+                       vm_pageout_throttle_up(m);
+#if CONFIG_PHANTOM_CACHE
+               if (dwp->dw_mask & DW_vm_phantom_cache_update)
+                       vm_phantom_cache_update(m);
+#endif
+               if (dwp->dw_mask & DW_vm_page_wire)
+                       vm_page_wire(m);
+               else if (dwp->dw_mask & DW_vm_page_unwire) {
+                       boolean_t       queueit;
+
+                       queueit = (dwp->dw_mask & (DW_vm_page_free | DW_vm_page_deactivate_internal)) ? FALSE : TRUE;
+
+                       vm_page_unwire(m, queueit);
+               }
+               if (dwp->dw_mask & DW_vm_page_free) {
+                       vm_page_free_prepare_queues(m);
+
+                       assert(m->pageq.next == NULL && m->pageq.prev == NULL);
+                       /*
+                        * Add this page to our list of reclaimed pages,
+                        * to be freed later.
+                        */
+                       m->pageq.next = (queue_entry_t) local_free_q;
+                       local_free_q = m;
+               } else {
+                       if (dwp->dw_mask & DW_vm_page_deactivate_internal)
+                               vm_page_deactivate_internal(m, FALSE);
+                       else if (dwp->dw_mask & DW_vm_page_activate) {
+                               if (m->active == FALSE) {
+                                       vm_page_activate(m);
+                               }
+                       }
+                       else if (dwp->dw_mask & DW_vm_page_speculate)
+                               vm_page_speculate(m, TRUE);
+                       else if (dwp->dw_mask & DW_enqueue_cleaned) {
+                               /*
+                                * if we didn't hold the object lock and did this,
+                                * we might disconnect the page, then someone might
+                                * soft fault it back in, then we would put it on the
+                                * cleaned queue, and so we would have a referenced (maybe even dirty)
+                                * page on that queue, which we don't want
+                                */
+                               int refmod_state = pmap_disconnect(m->phys_page);
+
+                               if ((refmod_state & VM_MEM_REFERENCED)) {
+                                       /*
+                                        * this page has been touched since it got cleaned; let's activate it
+                                        * if it hasn't already been
+                                        */
+                                       vm_pageout_enqueued_cleaned++;
+                                       vm_pageout_cleaned_reactivated++;
+                                       vm_pageout_cleaned_commit_reactivated++;
+
+                                       if (m->active == FALSE)
+                                               vm_page_activate(m);
+                               } else {
+                                       m->reference = FALSE;
+                                       vm_page_enqueue_cleaned(m);
+                               }
+                       }
+                       else if (dwp->dw_mask & DW_vm_page_lru)
+                               vm_page_lru(m);
+                       else if (dwp->dw_mask & DW_VM_PAGE_QUEUES_REMOVE) {
+                               if ( !m->pageout_queue)
+                                       VM_PAGE_QUEUES_REMOVE(m);
+                       }
+                       if (dwp->dw_mask & DW_set_reference)
+                               m->reference = TRUE;
+                       else if (dwp->dw_mask & DW_clear_reference)
+                               m->reference = FALSE;
+
+                       if (dwp->dw_mask & DW_move_page) {
+                               if ( !m->pageout_queue) {
+                                       VM_PAGE_QUEUES_REMOVE(m);
+
+                                       assert(m->object != kernel_object);
+
+                                       VM_PAGE_ENQUEUE_INACTIVE(m, FALSE);
+                               }
+                       }
+                       if (dwp->dw_mask & DW_clear_busy)
+                               m->busy = FALSE;
+
+                       if (dwp->dw_mask & DW_PAGE_WAKEUP)
+                               PAGE_WAKEUP(m);
+               }
+       }
+       vm_page_unlock_queues();
+
+       if (local_free_q)
+               vm_page_free_list(local_free_q, TRUE);
+       
+       VM_CHECK_MEMORYSTATUS;
 
-       bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
-    }
 }
 
-void
-hibernate_gobble_pages(uint32_t gobble_count, uint32_t free_page_time)
+kern_return_t
+vm_page_alloc_list(
+       int     page_count,
+       int     flags,
+       vm_page_t *list)
 {
-    uint32_t i;
-    vm_page_t m;
-    uint64_t start, end, timeout, nsec;
-    clock_interval_to_deadline(free_page_time, 1000 * 1000 /*ms*/, &timeout);
-    clock_get_uptime(&start);
+       vm_page_t       lo_page_list = VM_PAGE_NULL;
+       vm_page_t       mem;
+       int             i;
 
-    for (i = 0; i < gobble_count; i++)
-    {
-       while (VM_PAGE_NULL == (m = vm_page_grab()))
-       {
-           clock_get_uptime(&end);
-           if (end >= timeout)
-               break;
-           VM_PAGE_WAIT();
-       }
-       if (!m)
-           break;
-       m->busy = FALSE;
-       vm_page_gobble(m);
+       if ( !(flags & KMA_LOMEM))
+               panic("vm_page_alloc_list: called w/o KMA_LOMEM");
 
-       m->pageq.next = (queue_entry_t) hibernate_gobble_queue;
-       hibernate_gobble_queue = m;
-    }
+       for (i = 0; i < page_count; i++) {
 
-    clock_get_uptime(&end);
-    absolutetime_to_nanoseconds(end - start, &nsec);
-    HIBLOG("Gobbled %d pages, time: %qd ms\n", i, nsec / 1000000ULL);
+               mem = vm_page_grablo();
+
+               if (mem == VM_PAGE_NULL) {
+                       if (lo_page_list)
+                               vm_page_free_list(lo_page_list, FALSE);
+
+                       *list = VM_PAGE_NULL;
+
+                       return (KERN_RESOURCE_SHORTAGE);
+               }
+               mem->pageq.next = (queue_entry_t) lo_page_list;
+               lo_page_list = mem;
+       }
+       *list = lo_page_list;
+
+       return (KERN_SUCCESS);
 }
 
 void
-hibernate_free_gobble_pages(void)
+vm_page_set_offset(vm_page_t page, vm_object_offset_t offset)
 {
-    vm_page_t m, next;
-    uint32_t  count = 0;
+       page->offset = offset;
+}
 
-    m = (vm_page_t) hibernate_gobble_queue;
-    while(m)
-    {
-        next = (vm_page_t) m->pageq.next;
-        vm_page_free(m);
-        count++;
-        m = next;
-    }
-    hibernate_gobble_queue = VM_PAGE_NULL;
-    
-    if (count)
-        HIBLOG("Freed %d pages\n", count);
+vm_page_t
+vm_page_get_next(vm_page_t page)
+{
+       return ((vm_page_t) page->pageq.next);
 }
 
-static boolean_t 
-hibernate_consider_discard(vm_page_t m)
+vm_object_offset_t
+vm_page_get_offset(vm_page_t page)
 {
-    vm_object_t object = NULL;
-    int                  refmod_state;
-    boolean_t            discard = FALSE;
+       return (page->offset);
+}
 
-    do
-    {
-        if(m->private)
-            panic("hibernate_consider_discard: private");
+ppnum_t
+vm_page_get_phys_page(vm_page_t page)
+{
+       return (page->phys_page);
+}
+       
+       
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
-        if (!vm_object_lock_try(m->object))
-            break;
+#if HIBERNATION
 
-        object = m->object;
+static vm_page_t hibernate_gobble_queue;
 
-       if (VM_PAGE_WIRED(m))
-            break;
-        if (m->precious)
-            break;
+extern boolean_t (* volatile consider_buffer_cache_collect)(int);
+
+static int  hibernate_drain_pageout_queue(struct vm_pageout_queue *);
+static int  hibernate_flush_dirty_pages(int);
+static int  hibernate_flush_queue(queue_head_t *, int);
+
+void hibernate_flush_wait(void);
+void hibernate_mark_in_progress(void);
+void hibernate_clear_in_progress(void);
+
+void           hibernate_free_range(int, int);
+void           hibernate_hash_insert_page(vm_page_t);
+uint32_t       hibernate_mark_as_unneeded(addr64_t, addr64_t, hibernate_page_list_t *, hibernate_page_list_t *);
+void           hibernate_rebuild_vm_structs(void);
+uint32_t       hibernate_teardown_vm_structs(hibernate_page_list_t *, hibernate_page_list_t *);
+ppnum_t                hibernate_lookup_paddr(unsigned int);
+
+struct hibernate_statistics {
+       int hibernate_considered;
+       int hibernate_reentered_on_q;
+       int hibernate_found_dirty;
+       int hibernate_skipped_cleaning;
+       int hibernate_skipped_transient;
+       int hibernate_skipped_precious;
+       int hibernate_skipped_external;
+       int hibernate_queue_nolock;
+       int hibernate_queue_paused;
+       int hibernate_throttled;
+       int hibernate_throttle_timeout;
+       int hibernate_drained;
+       int hibernate_drain_timeout;
+       int cd_lock_failed;
+       int cd_found_precious;
+       int cd_found_wired;
+       int cd_found_busy;
+       int cd_found_unusual;
+       int cd_found_cleaning;
+       int cd_found_laundry;
+       int cd_found_dirty;
+       int cd_found_xpmapped;
+       int cd_skipped_xpmapped;
+       int cd_local_free;
+       int cd_total_free;
+       int cd_vm_page_wire_count;
+       int cd_vm_struct_pages_unneeded;
+       int cd_pages;
+       int cd_discarded;
+       int cd_count_wire;
+} hibernate_stats;
 
-        if (m->busy || !object->alive)
-           /*
-            *  Somebody is playing with this page.
-            */
-            break;
 
-        if (m->absent || m->unusual || m->error)
-           /*
-            * If it's unusual in anyway, ignore it
-            */
-            break;
-    
-        if (m->cleaning)
-            break;
+/*
+ * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
+ * so that we don't overrun the estimated image size, which would
+ * result in a hibernation failure.
+ */
+#define        HIBERNATE_XPMAPPED_LIMIT        40000
 
-       if (m->laundry || m->list_req_pending)
-            break;
 
-        if (!m->dirty)
-        {
-            refmod_state = pmap_get_refmod(m->phys_page);
-        
-            if (refmod_state & VM_MEM_REFERENCED)
-                m->reference = TRUE;
-            if (refmod_state & VM_MEM_MODIFIED)
-                m->dirty = TRUE;
-        }
-   
-        /*
-         * If it's clean or purgeable we can discard the page on wakeup.
-         */
-        discard = (!m->dirty) 
-                   || (VM_PURGABLE_VOLATILE == object->purgable)
-                   || (VM_PURGABLE_EMPTY    == m->object->purgable);
-    }
-    while (FALSE);
+static int
+hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
+{
+       wait_result_t   wait_result;
 
-    if (object)
-        vm_object_unlock(object);
+       vm_page_lock_queues();
 
-    return (discard);
-}
+       while ( !queue_empty(&q->pgo_pending) ) {
 
+               q->pgo_draining = TRUE;
 
-static void
-hibernate_discard_page(vm_page_t m)
-{
-    if (m->absent || m->unusual || m->error)
-       /*
-        * If it's unusual in anyway, ignore
-        */
-        return;
+               assert_wait_timeout((event_t) (&q->pgo_laundry+1), THREAD_INTERRUPTIBLE, 5000, 1000*NSEC_PER_USEC);
 
-    if (m->pmapped == TRUE) 
-    {
-        __unused int refmod_state = pmap_disconnect(m->phys_page);
-    }
+               vm_page_unlock_queues();
 
-    if (m->laundry)
-        panic("hibernate_discard_page(%p) laundry", m);
-    if (m->private)
-        panic("hibernate_discard_page(%p) private", m);
-    if (m->fictitious)
-        panic("hibernate_discard_page(%p) fictitious", m);
+               wait_result = thread_block(THREAD_CONTINUE_NULL);
 
-    if (VM_PURGABLE_VOLATILE == m->object->purgable)
-    {
-       /* object should be on a queue */
-        assert((m->object->objq.next != NULL) && (m->object->objq.prev != NULL));
-        purgeable_q_t old_queue = vm_purgeable_object_remove(m->object);
+               if (wait_result == THREAD_TIMED_OUT && !queue_empty(&q->pgo_pending)) {
+                       hibernate_stats.hibernate_drain_timeout++;
+                       
+                       if (q == &vm_pageout_queue_external)
+                               return (0);
+                       
+                       return (1);
+               }
+               vm_page_lock_queues();
+
+               hibernate_stats.hibernate_drained++;
+       }
+       vm_page_unlock_queues();
+
+       return (0);
+}
+
+
+boolean_t hibernate_skip_external = FALSE;
+
+static int
+hibernate_flush_queue(queue_head_t *q, int qcount)
+{
+       vm_page_t       m;
+       vm_object_t     l_object = NULL;
+       vm_object_t     m_object = NULL;
+       int             refmod_state = 0;
+       int             try_failed_count = 0;
+       int             retval = 0;
+       int             current_run = 0;
+       struct  vm_pageout_queue *iq;
+       struct  vm_pageout_queue *eq;
+       struct  vm_pageout_queue *tq;
+
+
+       KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_START, q, qcount, 0, 0, 0);
+       
+       iq = &vm_pageout_queue_internal;
+       eq = &vm_pageout_queue_external;
+
+       vm_page_lock_queues();
+
+       while (qcount && !queue_empty(q)) {
+
+               if (current_run++ == 1000) {
+                       if (hibernate_should_abort()) {
+                               retval = 1;
+                               break;
+                       }
+                       current_run = 0;
+               }
+
+               m = (vm_page_t) queue_first(q);
+               m_object = m->object;
+
+               /*
+                * check to see if we currently are working
+                * with the same object... if so, we've
+                * already got the lock
+                */
+               if (m_object != l_object) {
+                       /*
+                        * the object associated with candidate page is 
+                        * different from the one we were just working
+                        * with... dump the lock if we still own it
+                        */
+                       if (l_object != NULL) {
+                               vm_object_unlock(l_object);
+                               l_object = NULL;
+                       }
+                       /*
+                        * Try to lock object; since we've alread got the
+                        * page queues lock, we can only 'try' for this one.
+                        * if the 'try' fails, we need to do a mutex_pause
+                        * to allow the owner of the object lock a chance to
+                        * run... 
+                        */
+                       if ( !vm_object_lock_try_scan(m_object)) {
+
+                               if (try_failed_count > 20) {
+                                       hibernate_stats.hibernate_queue_nolock++;
+
+                                       goto reenter_pg_on_q;
+                               }
+
+                               vm_page_unlock_queues();
+                               mutex_pause(try_failed_count++);
+                               vm_page_lock_queues();
+
+                               hibernate_stats.hibernate_queue_paused++;
+                               continue;
+                       } else {
+                               l_object = m_object;
+                       }
+               }
+               if ( !m_object->alive || m->encrypted_cleaning || m->cleaning || m->laundry || m->busy || m->absent || m->error) {
+                       /*
+                        * page is not to be cleaned
+                        * put it back on the head of its queue
+                        */
+                       if (m->cleaning)
+                               hibernate_stats.hibernate_skipped_cleaning++;
+                       else
+                               hibernate_stats.hibernate_skipped_transient++;
+
+                       goto reenter_pg_on_q;
+               }
+               if (m_object->copy == VM_OBJECT_NULL) {
+                       if (m_object->purgable == VM_PURGABLE_VOLATILE || m_object->purgable == VM_PURGABLE_EMPTY) {
+                               /*
+                                * let the normal hibernate image path
+                                * deal with these
+                                */
+                               goto reenter_pg_on_q;
+                       }
+               }
+               if ( !m->dirty && m->pmapped) {
+                       refmod_state = pmap_get_refmod(m->phys_page);
+
+                       if ((refmod_state & VM_MEM_MODIFIED)) {
+                               SET_PAGE_DIRTY(m, FALSE);
+                       }
+               } else
+                       refmod_state = 0;
+
+               if ( !m->dirty) {
+                       /*
+                        * page is not to be cleaned
+                        * put it back on the head of its queue
+                        */
+                       if (m->precious)
+                               hibernate_stats.hibernate_skipped_precious++;
+
+                       goto reenter_pg_on_q;
+               }
+
+               if (hibernate_skip_external == TRUE && !m_object->internal) {
+
+                       hibernate_stats.hibernate_skipped_external++;
+                       
+                       goto reenter_pg_on_q;
+               }
+               tq = NULL;
+
+               if (m_object->internal) {
+                       if (VM_PAGE_Q_THROTTLED(iq))
+                               tq = iq;
+               } else if (VM_PAGE_Q_THROTTLED(eq))
+                       tq = eq;
+
+               if (tq != NULL) {
+                       wait_result_t   wait_result;
+                       int             wait_count = 5;
+
+                       if (l_object != NULL) {
+                               vm_object_unlock(l_object);
+                               l_object = NULL;
+                       }
+
+                       while (retval == 0) {
+
+                               tq->pgo_throttled = TRUE;
+
+                               assert_wait_timeout((event_t) &tq->pgo_laundry, THREAD_INTERRUPTIBLE, 1000, 1000*NSEC_PER_USEC);
+
+                               vm_page_unlock_queues();
+
+                               wait_result = thread_block(THREAD_CONTINUE_NULL);
+
+                               vm_page_lock_queues();
+
+                               if (wait_result != THREAD_TIMED_OUT)
+                                       break;
+                                if (!VM_PAGE_Q_THROTTLED(tq))
+                                        break;
+
+                               if (hibernate_should_abort())
+                                       retval = 1;
+
+                               if (--wait_count == 0) {
+
+                                       hibernate_stats.hibernate_throttle_timeout++;
+
+                                       if (tq == eq) {
+                                               hibernate_skip_external = TRUE;
+                                               break;
+                                       }
+                                       retval = 1;
+                               }
+                       }
+                       if (retval)
+                               break;
+
+                       hibernate_stats.hibernate_throttled++;
+
+                       continue;
+               }
+               /*
+                * we've already factored out pages in the laundry which
+                * means this page can't be on the pageout queue so it's
+                * safe to do the VM_PAGE_QUEUES_REMOVE
+                */
+                assert(!m->pageout_queue);
+
+               VM_PAGE_QUEUES_REMOVE(m);
+
+               if (COMPRESSED_PAGER_IS_ACTIVE && m_object->internal == TRUE)
+                       pmap_disconnect_options(m->phys_page, PMAP_OPTIONS_COMPRESSOR, NULL);
+
+               vm_pageout_cluster(m, FALSE);
+
+               hibernate_stats.hibernate_found_dirty++;
+
+               goto next_pg;
+
+reenter_pg_on_q:
+               queue_remove(q, m, vm_page_t, pageq);
+               queue_enter(q, m, vm_page_t, pageq);
+
+               hibernate_stats.hibernate_reentered_on_q++;
+next_pg:
+               hibernate_stats.hibernate_considered++;
+
+               qcount--;
+               try_failed_count = 0;
+       }
+       if (l_object != NULL) {
+               vm_object_unlock(l_object);
+               l_object = NULL;
+       }
+
+       vm_page_unlock_queues();
+
+       KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_END, hibernate_stats.hibernate_found_dirty, retval, 0, 0, 0);
+
+       return (retval);
+}
+
+
+static int
+hibernate_flush_dirty_pages(int pass)
+{
+       struct vm_speculative_age_q     *aq;
+       uint32_t        i;
+
+       if (vm_page_local_q) {
+               for (i = 0; i < vm_page_local_q_count; i++)
+                       vm_page_reactivate_local(i, TRUE, FALSE);
+       }
+
+       for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
+               int             qcount;
+               vm_page_t       m;
+
+               aq = &vm_page_queue_speculative[i];
+
+               if (queue_empty(&aq->age_q))
+                       continue;
+               qcount = 0;
+
+               vm_page_lockspin_queues();
+
+               queue_iterate(&aq->age_q,
+                             m,
+                             vm_page_t,
+                             pageq)
+               {
+                       qcount++;
+               }
+               vm_page_unlock_queues();
+
+               if (qcount) {
+                       if (hibernate_flush_queue(&aq->age_q, qcount))
+                               return (1);
+               }
+       }
+       if (hibernate_flush_queue(&vm_page_queue_inactive, vm_page_inactive_count - vm_page_anonymous_count - vm_page_cleaned_count))
+               return (1);
+       if (hibernate_flush_queue(&vm_page_queue_anonymous, vm_page_anonymous_count))
+               return (1);
+       if (hibernate_flush_queue(&vm_page_queue_cleaned, vm_page_cleaned_count))
+               return (1);
+       if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal))
+               return (1);
+
+       if (COMPRESSED_PAGER_IS_ACTIVE && pass == 1)
+               vm_compressor_record_warmup_start();
+
+       if (hibernate_flush_queue(&vm_page_queue_active, vm_page_active_count)) {
+               if (COMPRESSED_PAGER_IS_ACTIVE && pass == 1)
+                       vm_compressor_record_warmup_end();
+               return (1);
+       }
+       if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
+               if (COMPRESSED_PAGER_IS_ACTIVE && pass == 1)
+                       vm_compressor_record_warmup_end();
+               return (1);
+       }
+       if (COMPRESSED_PAGER_IS_ACTIVE && pass == 1)
+               vm_compressor_record_warmup_end();
+
+       if (hibernate_skip_external == FALSE && hibernate_drain_pageout_queue(&vm_pageout_queue_external))
+               return (1);
+
+       return (0);
+}
+
+
+void
+hibernate_reset_stats()
+{
+       bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
+}
+
+
+int
+hibernate_flush_memory()
+{
+       int     retval;
+
+       KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_START, vm_page_free_count, 0, 0, 0, 0);
+
+       hibernate_cleaning_in_progress = TRUE;
+       hibernate_skip_external = FALSE;
+
+       if ((retval = hibernate_flush_dirty_pages(1)) == 0) {
+
+               if (COMPRESSED_PAGER_IS_ACTIVE) {
+
+                               KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_START, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
+
+                               vm_compressor_flush();
+
+                               KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_END, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
+               }
+               if (consider_buffer_cache_collect != NULL) {
+                       unsigned int orig_wire_count;
+
+                       KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, 0, 0, 0, 0, 0);
+                       orig_wire_count = vm_page_wire_count;
+                       
+                       (void)(*consider_buffer_cache_collect)(1);
+                       consider_zone_gc(TRUE);
+
+                       HIBLOG("hibernate_flush_memory: buffer_cache_gc freed up %d wired pages\n", orig_wire_count - vm_page_wire_count);
+
+                       KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_END, orig_wire_count - vm_page_wire_count, 0, 0, 0, 0);
+               }
+       }
+       hibernate_cleaning_in_progress = FALSE;
+
+       KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_END, vm_page_free_count, hibernate_stats.hibernate_found_dirty, retval, 0, 0);
+
+       if (retval && COMPRESSED_PAGER_IS_ACTIVE)
+               HIBLOG("hibernate_flush_memory() failed to finish - vm_page_compressor_count(%d)\n", VM_PAGE_COMPRESSOR_COUNT);
+
+
+    HIBPRINT("hibernate_flush_memory() considered(%d) reentered_on_q(%d) found_dirty(%d)\n",
+                hibernate_stats.hibernate_considered,
+                hibernate_stats.hibernate_reentered_on_q,
+                hibernate_stats.hibernate_found_dirty);
+    HIBPRINT("   skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) skipped_external(%d) queue_nolock(%d)\n",
+                hibernate_stats.hibernate_skipped_cleaning,
+                hibernate_stats.hibernate_skipped_transient,
+                hibernate_stats.hibernate_skipped_precious,
+                hibernate_stats.hibernate_skipped_external,
+                hibernate_stats.hibernate_queue_nolock);
+    HIBPRINT("   queue_paused(%d) throttled(%d) throttle_timeout(%d) drained(%d) drain_timeout(%d)\n",
+                hibernate_stats.hibernate_queue_paused,
+                hibernate_stats.hibernate_throttled,
+                hibernate_stats.hibernate_throttle_timeout,
+                hibernate_stats.hibernate_drained,
+                hibernate_stats.hibernate_drain_timeout);
+
+       return (retval);
+}
+
+
+static void
+hibernate_page_list_zero(hibernate_page_list_t *list)
+{
+    uint32_t             bank;
+    hibernate_bitmap_t * bitmap;
+
+    bitmap = &list->bank_bitmap[0];
+    for (bank = 0; bank < list->bank_count; bank++)
+    {
+        uint32_t last_bit;
+
+       bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2); 
+        // set out-of-bound bits at end of bitmap.
+        last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31);
+       if (last_bit)
+           bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit);
+
+       bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
+    }
+}
+
+void
+hibernate_gobble_pages(uint32_t gobble_count, uint32_t free_page_time)
+{
+    uint32_t i;
+    vm_page_t m;
+    uint64_t start, end, timeout, nsec;
+    clock_interval_to_deadline(free_page_time, 1000 * 1000 /*ms*/, &timeout);
+    clock_get_uptime(&start);
+
+    for (i = 0; i < gobble_count; i++)
+    {
+       while (VM_PAGE_NULL == (m = vm_page_grab()))
+       {
+           clock_get_uptime(&end);
+           if (end >= timeout)
+               break;
+           VM_PAGE_WAIT();
+       }
+       if (!m)
+           break;
+       m->busy = FALSE;
+       vm_page_gobble(m);
+
+       m->pageq.next = (queue_entry_t) hibernate_gobble_queue;
+       hibernate_gobble_queue = m;
+    }
+
+    clock_get_uptime(&end);
+    absolutetime_to_nanoseconds(end - start, &nsec);
+    HIBLOG("Gobbled %d pages, time: %qd ms\n", i, nsec / 1000000ULL);
+}
+
+void
+hibernate_free_gobble_pages(void)
+{
+    vm_page_t m, next;
+    uint32_t  count = 0;
+
+    m = (vm_page_t) hibernate_gobble_queue;
+    while(m)
+    {
+        next = (vm_page_t) m->pageq.next;
+        vm_page_free(m);
+        count++;
+        m = next;
+    }
+    hibernate_gobble_queue = VM_PAGE_NULL;
+    
+    if (count)
+        HIBLOG("Freed %d pages\n", count);
+}
+
+static boolean_t 
+hibernate_consider_discard(vm_page_t m, boolean_t preflight)
+{
+    vm_object_t object = NULL;
+    int                  refmod_state;
+    boolean_t            discard = FALSE;
+
+    do
+    {
+        if (m->private)
+            panic("hibernate_consider_discard: private");
+
+        if (!vm_object_lock_try(m->object)) {
+           if (!preflight) hibernate_stats.cd_lock_failed++;
+            break;
+       }
+        object = m->object;
+
+       if (VM_PAGE_WIRED(m)) {
+           if (!preflight) hibernate_stats.cd_found_wired++;
+            break;
+       }
+        if (m->precious) {
+           if (!preflight) hibernate_stats.cd_found_precious++;
+            break;
+       }
+        if (m->busy || !object->alive) {
+           /*
+            *  Somebody is playing with this page.
+            */
+           if (!preflight) hibernate_stats.cd_found_busy++;
+            break;
+       }
+        if (m->absent || m->unusual || m->error) {
+           /*
+            * If it's unusual in anyway, ignore it
+            */
+           if (!preflight) hibernate_stats.cd_found_unusual++;
+            break;
+       }
+        if (m->cleaning) {
+           if (!preflight) hibernate_stats.cd_found_cleaning++;
+            break;
+       }
+       if (m->laundry) {
+           if (!preflight) hibernate_stats.cd_found_laundry++;
+            break;
+       }
+        if (!m->dirty)
+        {
+            refmod_state = pmap_get_refmod(m->phys_page);
+        
+            if (refmod_state & VM_MEM_REFERENCED)
+                m->reference = TRUE;
+            if (refmod_state & VM_MEM_MODIFIED) {
+               SET_PAGE_DIRTY(m, FALSE);
+           }
+        }
+   
+        /*
+         * If it's clean or purgeable we can discard the page on wakeup.
+         */
+        discard = (!m->dirty) 
+                   || (VM_PURGABLE_VOLATILE == object->purgable)
+                   || (VM_PURGABLE_EMPTY    == object->purgable);
+
+
+        if (discard == FALSE) {
+               if (!preflight)
+                       hibernate_stats.cd_found_dirty++;
+        } else if (m->xpmapped && m->reference && !object->internal) {
+               if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) {
+                       if (!preflight)
+                               hibernate_stats.cd_found_xpmapped++;
+                       discard = FALSE;
+               } else {
+                       if (!preflight)
+                               hibernate_stats.cd_skipped_xpmapped++;
+               }
+        }
+    }
+    while (FALSE);
+
+    if (object)
+        vm_object_unlock(object);
+
+    return (discard);
+}
+
+
+static void
+hibernate_discard_page(vm_page_t m)
+{
+    if (m->absent || m->unusual || m->error)
+       /*
+        * If it's unusual in anyway, ignore
+        */
+        return;
+
+#if MACH_ASSERT || DEBUG
+    vm_object_t object = m->object;
+    if (!vm_object_lock_try(m->object))
+       panic("hibernate_discard_page(%p) !vm_object_lock_try", m);
+#else
+    /* No need to lock page queue for token delete, hibernate_vm_unlock() 
+       makes sure these locks are uncontended before sleep */
+#endif /* MACH_ASSERT || DEBUG */
+
+    if (m->pmapped == TRUE) 
+    {
+        __unused int refmod_state = pmap_disconnect(m->phys_page);
+    }
+
+    if (m->laundry)
+        panic("hibernate_discard_page(%p) laundry", m);
+    if (m->private)
+        panic("hibernate_discard_page(%p) private", m);
+    if (m->fictitious)
+        panic("hibernate_discard_page(%p) fictitious", m);
+
+    if (VM_PURGABLE_VOLATILE == m->object->purgable)
+    {
+       /* object should be on a queue */
+        assert((m->object->objq.next != NULL) && (m->object->objq.prev != NULL));
+        purgeable_q_t old_queue = vm_purgeable_object_remove(m->object);
         assert(old_queue);
-        /* No need to lock page queue for token delete, hibernate_vm_unlock() 
-           makes sure these locks are uncontended before sleep */
-        vm_purgeable_token_delete_first(old_queue);
+       if (m->object->purgeable_when_ripe) {
+               vm_purgeable_token_delete_first(old_queue);
+       }
         m->object->purgable = VM_PURGABLE_EMPTY;
+
+       /*
+        * Purgeable ledgers:  pages of VOLATILE and EMPTY objects are
+        * accounted in the "volatile" ledger, so no change here.
+        * We have to update vm_page_purgeable_count, though, since we're
+        * effectively purging this object.
+        */
+       unsigned int delta;
+       assert(m->object->resident_page_count >= m->object->wired_page_count);
+       delta = (m->object->resident_page_count - m->object->wired_page_count);
+       assert(vm_page_purgeable_count >= delta);
+       assert(delta > 0);
+       OSAddAtomic(-delta, (SInt32 *)&vm_page_purgeable_count);
     }
        
     vm_page_free(m);
+
+#if MACH_ASSERT || DEBUG
+    vm_object_unlock(object);
+#endif /* MACH_ASSERT || DEBUG */
+}
+
+/*
+ Grab locks for hibernate_page_list_setall()
+*/
+void
+hibernate_vm_lock_queues(void)
+{
+    vm_object_lock(compressor_object);
+    vm_page_lock_queues();
+    lck_mtx_lock(&vm_page_queue_free_lock);
+
+    if (vm_page_local_q) {
+       uint32_t  i;
+       for (i = 0; i < vm_page_local_q_count; i++) {
+           struct vpl  *lq;
+           lq = &vm_page_local_q[i].vpl_un.vpl;
+           VPL_LOCK(&lq->vpl_lock);
+       }
+    }
+}
+
+void
+hibernate_vm_unlock_queues(void)
+{
+    if (vm_page_local_q) {
+       uint32_t  i;
+       for (i = 0; i < vm_page_local_q_count; i++) {
+           struct vpl  *lq;
+           lq = &vm_page_local_q[i].vpl_un.vpl;
+           VPL_UNLOCK(&lq->vpl_lock);
+       }
+    }
+    lck_mtx_unlock(&vm_page_queue_free_lock);
+    vm_page_unlock_queues();
+    vm_object_unlock(compressor_object);
 }
 
 /*
@@ -4432,46 +5685,110 @@ hibernate_discard_page(vm_page_t m)
 void
 hibernate_page_list_setall(hibernate_page_list_t * page_list,
                           hibernate_page_list_t * page_list_wired,
+                          hibernate_page_list_t * page_list_pal,
+                          boolean_t preflight, 
+                          boolean_t will_discard,
                           uint32_t * pagesOut)
 {
     uint64_t start, end, nsec;
     vm_page_t m;
+    vm_page_t next;
     uint32_t pages = page_list->page_count;
-    uint32_t count_zf = 0, count_throttled = 0;
-    uint32_t count_inactive = 0, count_active = 0, count_speculative = 0;
+    uint32_t count_anonymous = 0, count_throttled = 0, count_compressor = 0;
+    uint32_t count_inactive = 0, count_active = 0, count_speculative = 0, count_cleaned = 0;
     uint32_t count_wire = pages;
     uint32_t count_discard_active    = 0;
     uint32_t count_discard_inactive  = 0;
+    uint32_t count_discard_cleaned   = 0;
     uint32_t count_discard_purgeable = 0;
     uint32_t count_discard_speculative = 0;
+    uint32_t count_discard_vm_struct_pages = 0;
     uint32_t i;
     uint32_t             bank;
     hibernate_bitmap_t * bitmap;
     hibernate_bitmap_t * bitmap_wired;
+    boolean_t                   discard_all;
+    boolean_t            discard;
+
+    HIBLOG("hibernate_page_list_setall(preflight %d) start %p, %p\n", preflight, page_list, page_list_wired);
+
+    if (preflight) {
+        page_list       = NULL;
+        page_list_wired = NULL;
+        page_list_pal   = NULL;
+               discard_all     = FALSE;
+    } else {
+               discard_all     = will_discard;
+    }
+
+#if MACH_ASSERT || DEBUG
+    if (!preflight)
+    {
+        vm_page_lock_queues();
+       if (vm_page_local_q) {
+           for (i = 0; i < vm_page_local_q_count; i++) {
+               struct vpl      *lq;
+               lq = &vm_page_local_q[i].vpl_un.vpl;
+               VPL_LOCK(&lq->vpl_lock);
+           }
+       }
+    }
+#endif  /* MACH_ASSERT || DEBUG */
 
 
-    HIBLOG("hibernate_page_list_setall start\n");
+    KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0);
 
     clock_get_uptime(&start);
 
-    hibernate_page_list_zero(page_list);
-    hibernate_page_list_zero(page_list_wired);
+    if (!preflight) {
+       hibernate_page_list_zero(page_list);
+       hibernate_page_list_zero(page_list_wired);
+       hibernate_page_list_zero(page_list_pal);
+    
+       hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count;
+       hibernate_stats.cd_pages = pages;
+    }
 
     if (vm_page_local_q) {
            for (i = 0; i < vm_page_local_q_count; i++)
-                   vm_page_reactivate_local(i, TRUE, TRUE);
+                   vm_page_reactivate_local(i, TRUE, !preflight);
+    }
+
+    if (preflight) {
+       vm_object_lock(compressor_object);
+       vm_page_lock_queues();
+       lck_mtx_lock(&vm_page_queue_free_lock);
     }
 
     m = (vm_page_t) hibernate_gobble_queue;
-    while(m)
+    while (m)
     {
        pages--;
        count_wire--;
-       hibernate_page_bitset(page_list,       TRUE, m->phys_page);
-       hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+       if (!preflight) {
+           hibernate_page_bitset(page_list,       TRUE, m->phys_page);
+           hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+       }
        m = (vm_page_t) m->pageq.next;
     }
 
+    if (!preflight) for( i = 0; i < real_ncpus; i++ )
+    {
+       if (cpu_data_ptr[i] && cpu_data_ptr[i]->cpu_processor)
+       {
+           for (m = PROCESSOR_DATA(cpu_data_ptr[i]->cpu_processor, free_pages); m; m = (vm_page_t)m->pageq.next)
+           {
+               pages--;
+               count_wire--;
+               hibernate_page_bitset(page_list,       TRUE, m->phys_page);
+               hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+
+               hibernate_stats.cd_local_free++;
+               hibernate_stats.cd_total_free++;
+           }
+       }
+    }
+
     for( i = 0; i < vm_colors; i++ )
     {
        queue_iterate(&vm_page_queue_free[i],
@@ -4481,8 +5798,12 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list,
        {
            pages--;
            count_wire--;
-           hibernate_page_bitset(page_list,       TRUE, m->phys_page);
-           hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+           if (!preflight) {
+               hibernate_page_bitset(page_list,       TRUE, m->phys_page);
+               hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+    
+               hibernate_stats.cd_total_free++;
+           }
        }
     }
 
@@ -4493,131 +5814,231 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list,
     {
        pages--;
        count_wire--;
-       hibernate_page_bitset(page_list,       TRUE, m->phys_page);
-       hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+       if (!preflight) {
+           hibernate_page_bitset(page_list,       TRUE, m->phys_page);
+           hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+    
+           hibernate_stats.cd_total_free++;
+       }
     }
 
-    queue_iterate( &vm_page_queue_throttled,
-                    m,
-                    vm_page_t,
-                    pageq )
+    m = (vm_page_t) queue_first(&vm_page_queue_throttled);
+    while (m && !queue_end(&vm_page_queue_throttled, (queue_entry_t)m))
     {
+        next = (vm_page_t) m->pageq.next;
+       discard = FALSE;
         if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
-         && hibernate_consider_discard(m))
+         && hibernate_consider_discard(m, preflight))
         {
-            hibernate_page_bitset(page_list, TRUE, m->phys_page);
+            if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
             count_discard_inactive++;
+            discard = discard_all;
         }
         else
             count_throttled++;
        count_wire--;
-       hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+       if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+
+        if (discard) hibernate_discard_page(m);
+       m = next;
     }
 
-    queue_iterate( &vm_page_queue_zf,
-                    m,
-                    vm_page_t,
-                   pageq )
+    m = (vm_page_t) queue_first(&vm_page_queue_anonymous);
+    while (m && !queue_end(&vm_page_queue_anonymous, (queue_entry_t)m))
+    {
+        next = (vm_page_t) m->pageq.next;
+       discard = FALSE;
+        if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
+         && hibernate_consider_discard(m, preflight))
+        {
+            if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
+           if (m->dirty)
+               count_discard_purgeable++;
+           else
+               count_discard_inactive++;
+            discard = discard_all;
+        }
+        else
+            count_anonymous++;
+       count_wire--;
+       if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+        if (discard)    hibernate_discard_page(m);
+       m = next;
+    }
+
+    m = (vm_page_t) queue_first(&vm_page_queue_cleaned);
+    while (m && !queue_end(&vm_page_queue_cleaned, (queue_entry_t)m))
+    {
+        next = (vm_page_t) m->pageq.next;
+       discard = FALSE;
+        if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
+         && hibernate_consider_discard(m, preflight))
+        {
+            if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
+           if (m->dirty)
+               count_discard_purgeable++;
+           else
+               count_discard_cleaned++;
+            discard = discard_all;
+        }
+        else
+            count_cleaned++;
+       count_wire--;
+       if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+        if (discard)    hibernate_discard_page(m);
+       m = next;
+    }
+
+    m = (vm_page_t) queue_first(&vm_page_queue_active);
+    while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m))
     {
-        if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
-         && hibernate_consider_discard(m))
+        next = (vm_page_t) m->pageq.next;
+       discard = FALSE;
+        if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) 
+         && hibernate_consider_discard(m, preflight))
         {
-            hibernate_page_bitset(page_list, TRUE, m->phys_page);
+            if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
            if (m->dirty)
                count_discard_purgeable++;
            else
-               count_discard_inactive++;
+               count_discard_active++;
+            discard = discard_all;
         }
         else
-            count_zf++;
+            count_active++;
        count_wire--;
-       hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+       if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+        if (discard)    hibernate_discard_page(m);
+       m = next;
     }
 
-    queue_iterate( &vm_page_queue_inactive,
-                    m,
-                    vm_page_t,
-                    pageq )
+    m = (vm_page_t) queue_first(&vm_page_queue_inactive);
+    while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m))
     {
+        next = (vm_page_t) m->pageq.next;
+       discard = FALSE;
         if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
-         && hibernate_consider_discard(m))
+         && hibernate_consider_discard(m, preflight))
         {
-            hibernate_page_bitset(page_list, TRUE, m->phys_page);
+            if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
            if (m->dirty)
                count_discard_purgeable++;
            else
                count_discard_inactive++;
+            discard = discard_all;
         }
         else
             count_inactive++;
        count_wire--;
-       hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+       if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+        if (discard)    hibernate_discard_page(m);
+       m = next;
     }
 
     for( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ )
     {
-       queue_iterate(&vm_page_queue_speculative[i].age_q,
-                     m,
-                     vm_page_t,
-                     pageq)
-       {
-           if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
-            && hibernate_consider_discard(m))
-           {
-               hibernate_page_bitset(page_list, TRUE, m->phys_page);
-               count_discard_speculative++;
-           }
-           else
-               count_speculative++;
-           count_wire--;
-           hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
-       }
+       m = (vm_page_t) queue_first(&vm_page_queue_speculative[i].age_q);
+       while (m && !queue_end(&vm_page_queue_speculative[i].age_q, (queue_entry_t)m))
+       {
+           next = (vm_page_t) m->pageq.next;
+           discard = FALSE;
+           if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
+            && hibernate_consider_discard(m, preflight))
+           {
+               if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
+               count_discard_speculative++;
+               discard = discard_all;
+           }
+           else
+               count_speculative++;
+           count_wire--;
+           if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+           if (discard)    hibernate_discard_page(m);
+           m = next;
+       }
     }
 
-    queue_iterate( &vm_page_queue_active,
-                    m,
-                    vm_page_t,
-                    pageq )
+    queue_iterate(&compressor_object->memq, m, vm_page_t, listq)
     {
-        if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) 
-         && hibernate_consider_discard(m))
-        {
-            hibernate_page_bitset(page_list, TRUE, m->phys_page);
-           if (m->dirty)
-               count_discard_purgeable++;
-           else
-               count_discard_active++;
-        }
-        else
-            count_active++;
+        count_compressor++;
        count_wire--;
-       hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+       if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
     }
 
-    // pull wired from hibernate_bitmap
+    if (preflight == FALSE && discard_all == TRUE) {
+           KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_START, 0, 0, 0, 0, 0);
 
-    bitmap = &page_list->bank_bitmap[0];
-    bitmap_wired = &page_list_wired->bank_bitmap[0];
-    for (bank = 0; bank < page_list->bank_count; bank++)
-    {
-       for (i = 0; i < bitmap->bitmapwords; i++)
-           bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
-       bitmap       = (hibernate_bitmap_t *) &bitmap->bitmap      [bitmap->bitmapwords];
-       bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
+           HIBLOG("hibernate_teardown started\n");
+           count_discard_vm_struct_pages = hibernate_teardown_vm_structs(page_list, page_list_wired);
+           HIBLOG("hibernate_teardown completed - discarded %d\n", count_discard_vm_struct_pages);
+
+           pages -= count_discard_vm_struct_pages;
+           count_wire -= count_discard_vm_struct_pages;
+
+           hibernate_stats.cd_vm_struct_pages_unneeded = count_discard_vm_struct_pages;
+
+           KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END, 0, 0, 0, 0, 0);
+    }
+
+    if (!preflight) {
+       // pull wired from hibernate_bitmap
+       bitmap = &page_list->bank_bitmap[0];
+       bitmap_wired = &page_list_wired->bank_bitmap[0];
+       for (bank = 0; bank < page_list->bank_count; bank++)
+       {
+           for (i = 0; i < bitmap->bitmapwords; i++)
+               bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
+           bitmap       = (hibernate_bitmap_t *) &bitmap->bitmap      [bitmap->bitmapwords];
+           bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
+       }
     }
 
     // machine dependent adjustments
-    hibernate_page_list_setall_machine(page_list, page_list_wired, &pages);
+    hibernate_page_list_setall_machine(page_list, page_list_wired, preflight, &pages);
+
+    if (!preflight) {
+       hibernate_stats.cd_count_wire = count_wire;
+       hibernate_stats.cd_discarded = count_discard_active + count_discard_inactive + count_discard_purgeable +
+               count_discard_speculative + count_discard_cleaned + count_discard_vm_struct_pages;
+    }
 
     clock_get_uptime(&end);
     absolutetime_to_nanoseconds(end - start, &nsec);
     HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
 
-    HIBLOG("pages %d, wire %d, act %d, inact %d, spec %d, zf %d, throt %d, could discard act %d inact %d purgeable %d spec %d\n", 
-                pages, count_wire, count_active, count_inactive, count_speculative, count_zf, count_throttled,
-                count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative);
+    HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n  %s discard act %d inact %d purgeable %d spec %d cleaned %d\n", 
+          pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative, count_anonymous, count_throttled, count_compressor, hibernate_stats.cd_found_xpmapped,
+               discard_all ? "did" : "could",
+               count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
+
+    if (hibernate_stats.cd_skipped_xpmapped)
+           HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped);
+
+    *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned;
+
+    if (preflight && will_discard) *pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;
+
+#if MACH_ASSERT || DEBUG
+    if (!preflight)
+    {
+       if (vm_page_local_q) {
+           for (i = 0; i < vm_page_local_q_count; i++) {
+               struct vpl      *lq;
+               lq = &vm_page_local_q[i].vpl_un.vpl;
+               VPL_UNLOCK(&lq->vpl_lock);
+           }
+       }
+        vm_page_unlock_queues();
+    }
+#endif  /* MACH_ASSERT || DEBUG */
+
+    if (preflight) {
+       lck_mtx_unlock(&vm_page_queue_free_lock);
+       vm_page_unlock_queues();
+       vm_object_unlock(compressor_object);
+    }
 
-    *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative;
+    KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_END, count_wire, *pagesOut, 0, 0, 0);
 }
 
 void
@@ -4630,12 +6051,25 @@ hibernate_page_list_discard(hibernate_page_list_t * page_list)
     uint32_t  count_discard_active    = 0;
     uint32_t  count_discard_inactive  = 0;
     uint32_t  count_discard_purgeable = 0;
+    uint32_t  count_discard_cleaned   = 0;
     uint32_t  count_discard_speculative = 0;
 
+
+#if MACH_ASSERT || DEBUG
+        vm_page_lock_queues();
+       if (vm_page_local_q) {
+           for (i = 0; i < vm_page_local_q_count; i++) {
+               struct vpl      *lq;
+               lq = &vm_page_local_q[i].vpl_un.vpl;
+               VPL_LOCK(&lq->vpl_lock);
+           }
+       }
+#endif  /* MACH_ASSERT || DEBUG */
+
     clock_get_uptime(&start);
 
-    m = (vm_page_t) queue_first(&vm_page_queue_zf);
-    while (m && !queue_end(&vm_page_queue_zf, (queue_entry_t)m))
+    m = (vm_page_t) queue_first(&vm_page_queue_anonymous);
+    while (m && !queue_end(&vm_page_queue_anonymous, (queue_entry_t)m))
     {
         next = (vm_page_t) m->pageq.next;
         if (hibernate_page_bittst(page_list, m->phys_page))
@@ -4694,13 +6128,388 @@ hibernate_page_list_discard(hibernate_page_list_t * page_list)
         m = next;
     }
 
+    m = (vm_page_t) queue_first(&vm_page_queue_cleaned);
+    while (m && !queue_end(&vm_page_queue_cleaned, (queue_entry_t)m))
+    {
+        next = (vm_page_t) m->pageq.next;
+        if (hibernate_page_bittst(page_list, m->phys_page))
+        {
+           if (m->dirty)
+               count_discard_purgeable++;
+           else
+               count_discard_cleaned++;
+            hibernate_discard_page(m);
+        }
+        m = next;
+    }
+
+#if MACH_ASSERT || DEBUG
+       if (vm_page_local_q) {
+           for (i = 0; i < vm_page_local_q_count; i++) {
+               struct vpl      *lq;
+               lq = &vm_page_local_q[i].vpl_un.vpl;
+               VPL_UNLOCK(&lq->vpl_lock);
+           }
+       }
+        vm_page_unlock_queues();
+#endif  /* MACH_ASSERT || DEBUG */
+
     clock_get_uptime(&end);
     absolutetime_to_nanoseconds(end - start, &nsec);
-    HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d\n",
+    HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d cleaned %d\n",
                 nsec / 1000000ULL,
-                count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative);
+               count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
+}
+
+boolean_t       hibernate_paddr_map_inited = FALSE;
+boolean_t       hibernate_rebuild_needed = FALSE;
+unsigned int   hibernate_teardown_last_valid_compact_indx = -1;
+vm_page_t      hibernate_rebuild_hash_list = NULL;
+
+unsigned int   hibernate_teardown_found_tabled_pages = 0;
+unsigned int   hibernate_teardown_found_created_pages = 0;
+unsigned int   hibernate_teardown_found_free_pages = 0;
+unsigned int   hibernate_teardown_vm_page_free_count;
+
+
+struct ppnum_mapping {
+       struct ppnum_mapping    *ppnm_next;
+       ppnum_t                 ppnm_base_paddr;
+       unsigned int            ppnm_sindx;
+       unsigned int            ppnm_eindx;
+};
+
+struct ppnum_mapping   *ppnm_head;
+struct ppnum_mapping   *ppnm_last_found = NULL;
+
+
+void
+hibernate_create_paddr_map() 
+{
+       unsigned int    i;
+       ppnum_t         next_ppnum_in_run = 0;
+       struct ppnum_mapping *ppnm = NULL;
+
+       if (hibernate_paddr_map_inited == FALSE) {
+
+               for (i = 0; i < vm_pages_count; i++) {
+
+                       if (ppnm)
+                               ppnm->ppnm_eindx = i;
+
+                       if (ppnm == NULL || vm_pages[i].phys_page != next_ppnum_in_run) {
+
+                               ppnm = kalloc(sizeof(struct ppnum_mapping));
+
+                               ppnm->ppnm_next = ppnm_head;
+                               ppnm_head = ppnm;
+
+                               ppnm->ppnm_sindx = i;
+                               ppnm->ppnm_base_paddr = vm_pages[i].phys_page;
+                       }
+                       next_ppnum_in_run = vm_pages[i].phys_page + 1;
+               }
+               ppnm->ppnm_eindx++;
+
+               hibernate_paddr_map_inited = TRUE;
+       }
+}
+
+ppnum_t
+hibernate_lookup_paddr(unsigned int indx)
+{
+       struct ppnum_mapping *ppnm = NULL;
+       
+       ppnm = ppnm_last_found;
+
+       if (ppnm) {
+               if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx)
+                       goto done;
+       }
+       for (ppnm = ppnm_head; ppnm; ppnm = ppnm->ppnm_next) {
+
+               if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
+                       ppnm_last_found = ppnm;
+                       break;
+               }
+       }
+       if (ppnm == NULL)
+               panic("hibernate_lookup_paddr of %d failed\n", indx);
+done:
+       return (ppnm->ppnm_base_paddr + (indx - ppnm->ppnm_sindx));
+}
+
+
+uint32_t
+hibernate_mark_as_unneeded(addr64_t saddr, addr64_t eaddr, hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
+{
+       addr64_t        saddr_aligned;
+       addr64_t        eaddr_aligned;
+       addr64_t        addr;
+       ppnum_t         paddr;
+       unsigned int    mark_as_unneeded_pages = 0;
+
+       saddr_aligned = (saddr + PAGE_MASK_64) & ~PAGE_MASK_64;
+       eaddr_aligned = eaddr & ~PAGE_MASK_64;
+
+       for (addr = saddr_aligned; addr < eaddr_aligned; addr += PAGE_SIZE_64) {
+
+               paddr = pmap_find_phys(kernel_pmap, addr);
+
+               assert(paddr);
+
+               hibernate_page_bitset(page_list,       TRUE, paddr);
+               hibernate_page_bitset(page_list_wired, TRUE, paddr);
+
+               mark_as_unneeded_pages++;
+       }
+       return (mark_as_unneeded_pages);
+}
+
+
+void
+hibernate_hash_insert_page(vm_page_t mem)
+{
+       vm_page_bucket_t *bucket;
+       int             hash_id;
+
+       assert(mem->hashed);
+       assert(mem->object);
+       assert(mem->offset != (vm_object_offset_t) -1);
+
+       /*
+        *      Insert it into the object_object/offset hash table
+        */
+       hash_id = vm_page_hash(mem->object, mem->offset);
+       bucket = &vm_page_buckets[hash_id];
+
+       mem->next_m = bucket->page_list;
+       bucket->page_list = VM_PAGE_PACK_PTR(mem);
+}
+
+
+void
+hibernate_free_range(int sindx, int eindx)
+{
+       vm_page_t       mem;
+       unsigned int    color;
+
+       while (sindx < eindx) {
+               mem = &vm_pages[sindx];
+
+               vm_page_init(mem, hibernate_lookup_paddr(sindx), FALSE);
+
+               mem->lopage = FALSE;
+               mem->free = TRUE;
+
+               color = mem->phys_page & vm_color_mask;
+               queue_enter_first(&vm_page_queue_free[color],
+                                 mem,
+                                 vm_page_t,
+                                 pageq);
+               vm_page_free_count++;
+
+               sindx++;
+       }
+}
+
+
+extern void hibernate_rebuild_pmap_structs(void);
+
+void
+hibernate_rebuild_vm_structs(void)
+{
+       int             cindx, sindx, eindx;
+       vm_page_t       mem, tmem, mem_next;
+       AbsoluteTime    startTime, endTime;
+       uint64_t        nsec;
+
+       if (hibernate_rebuild_needed == FALSE)
+               return;
+
+       KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_START, 0, 0, 0, 0, 0);
+       HIBLOG("hibernate_rebuild started\n");
+
+       clock_get_uptime(&startTime);
+
+       hibernate_rebuild_pmap_structs();
+
+       bzero(&vm_page_buckets[0], vm_page_bucket_count * sizeof(vm_page_bucket_t));
+       eindx = vm_pages_count;
+
+       for (cindx = hibernate_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
+               
+               mem = &vm_pages[cindx];
+               /*
+                * hibernate_teardown_vm_structs leaves the location where
+                * this vm_page_t must be located in "next".
+                */
+               tmem = VM_PAGE_UNPACK_PTR(mem->next_m);
+               mem->next_m = VM_PAGE_PACK_PTR(NULL);
+
+               sindx = (int)(tmem - &vm_pages[0]);
+
+               if (mem != tmem) {
+                       /*
+                        * this vm_page_t was moved by hibernate_teardown_vm_structs,
+                        * so move it back to its real location
+                        */
+                       *tmem = *mem;
+                       mem = tmem;
+               }
+               if (mem->hashed)
+                       hibernate_hash_insert_page(mem);
+               /*
+                * the 'hole' between this vm_page_t and the previous
+                * vm_page_t we moved needs to be initialized as 
+                * a range of free vm_page_t's
+                */
+               hibernate_free_range(sindx + 1, eindx);
+
+               eindx = sindx;
+       }
+       if (sindx)
+               hibernate_free_range(0, sindx);
+
+       assert(vm_page_free_count == hibernate_teardown_vm_page_free_count);
+
+       /*
+        * process the list of vm_page_t's that were entered in the hash,
+        * but were not located in the vm_pages arrary... these are 
+        * vm_page_t's that were created on the fly (i.e. fictitious)
+        */
+       for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) {
+               mem_next = VM_PAGE_UNPACK_PTR(mem->next_m);
+
+               mem->next_m = VM_PAGE_PACK_PTR(NULL);
+               hibernate_hash_insert_page(mem);
+       }
+       hibernate_rebuild_hash_list = NULL;
+
+        clock_get_uptime(&endTime);
+        SUB_ABSOLUTETIME(&endTime, &startTime);
+        absolutetime_to_nanoseconds(endTime, &nsec);
+
+       HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec / 1000000ULL);
+
+       hibernate_rebuild_needed = FALSE;
+
+       KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END, 0, 0, 0, 0, 0);
+}
+
+
+extern void hibernate_teardown_pmap_structs(addr64_t *, addr64_t *);
+
+uint32_t
+hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
+{
+       unsigned int    i;
+       unsigned int    compact_target_indx;
+       vm_page_t       mem, mem_next;
+       vm_page_bucket_t *bucket;
+       unsigned int    mark_as_unneeded_pages = 0;
+       unsigned int    unneeded_vm_page_bucket_pages = 0;
+       unsigned int    unneeded_vm_pages_pages = 0;
+       unsigned int    unneeded_pmap_pages = 0;
+       addr64_t        start_of_unneeded = 0;
+       addr64_t        end_of_unneeded = 0;
+
+       
+       if (hibernate_should_abort())
+               return (0);
+
+       HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, active_pages %d, inactive_pages %d, speculative_pages %d, cleaned_pages %d, compressor_pages %d\n",
+              vm_page_wire_count, vm_page_free_count, vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count,
+              vm_page_cleaned_count, compressor_object->resident_page_count);
+
+       for (i = 0; i < vm_page_bucket_count; i++) {
+
+               bucket = &vm_page_buckets[i];
+
+               for (mem = VM_PAGE_UNPACK_PTR(bucket->page_list); mem != VM_PAGE_NULL; mem = mem_next) {
+                       assert(mem->hashed);
+
+                       mem_next = VM_PAGE_UNPACK_PTR(mem->next_m);
+
+                       if (mem < &vm_pages[0] || mem >= &vm_pages[vm_pages_count]) {
+                               mem->next_m = VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list);
+                               hibernate_rebuild_hash_list = mem;
+                       }
+               }
+       }
+       unneeded_vm_page_bucket_pages = hibernate_mark_as_unneeded((addr64_t)&vm_page_buckets[0], (addr64_t)&vm_page_buckets[vm_page_bucket_count], page_list, page_list_wired);
+       mark_as_unneeded_pages += unneeded_vm_page_bucket_pages;
+
+       hibernate_teardown_vm_page_free_count = vm_page_free_count;
+
+       compact_target_indx = 0;
+
+       for (i = 0; i < vm_pages_count; i++) {
+
+               mem = &vm_pages[i];
+
+               if (mem->free) {
+                       unsigned int color;
+
+                       assert(mem->busy);
+                       assert(!mem->lopage);
+
+                       color = mem->phys_page & vm_color_mask;
+
+                       queue_remove(&vm_page_queue_free[color],
+                                    mem,
+                                    vm_page_t,
+                                    pageq);
+                       mem->pageq.next = NULL;
+                       mem->pageq.prev = NULL;
+
+                       vm_page_free_count--;
+
+                       hibernate_teardown_found_free_pages++;
+
+                       if ( !vm_pages[compact_target_indx].free)
+                               compact_target_indx = i;
+               } else {
+                       /*
+                        * record this vm_page_t's original location
+                        * we need this even if it doesn't get moved
+                        * as an indicator to the rebuild function that
+                        * we don't have to move it
+                        */
+                       mem->next_m = VM_PAGE_PACK_PTR(mem);
+
+                       if (vm_pages[compact_target_indx].free) {
+                               /*
+                                * we've got a hole to fill, so
+                                * move this vm_page_t to it's new home
+                                */
+                               vm_pages[compact_target_indx] = *mem;
+                               mem->free = TRUE;
+
+                               hibernate_teardown_last_valid_compact_indx = compact_target_indx;
+                               compact_target_indx++;
+                       } else
+                               hibernate_teardown_last_valid_compact_indx = i;
+               }
+       }
+       unneeded_vm_pages_pages = hibernate_mark_as_unneeded((addr64_t)&vm_pages[hibernate_teardown_last_valid_compact_indx+1],
+                                                            (addr64_t)&vm_pages[vm_pages_count-1], page_list, page_list_wired);
+       mark_as_unneeded_pages += unneeded_vm_pages_pages;
+
+       hibernate_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded);
+
+       if (start_of_unneeded) {
+               unneeded_pmap_pages = hibernate_mark_as_unneeded(start_of_unneeded, end_of_unneeded, page_list, page_list_wired);
+               mark_as_unneeded_pages += unneeded_pmap_pages;
+       }
+       HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n", unneeded_vm_page_bucket_pages, unneeded_vm_pages_pages, unneeded_pmap_pages);
+
+       hibernate_rebuild_needed = TRUE;
+
+       return (mark_as_unneeded_pages);
 }
 
+
 #endif /* HIBERNATION */
 
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
@@ -4741,7 +6550,7 @@ vm_page_info(
                bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
                lck_spin_lock(bucket_lock);
 
-               for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next)
+               for (m = VM_PAGE_UNPACK_PTR(bucket->page_list); m != VM_PAGE_NULL; m = VM_PAGE_UNPACK_PTR(m->next_m))
                        bucket_count++;
 
                lck_spin_unlock(bucket_lock);
@@ -4754,63 +6563,75 @@ vm_page_info(
 }
 #endif /* MACH_VM_DEBUG */
 
-#include <mach_kdb.h>
-#if    MACH_KDB
-
-#include <ddb/db_output.h>
-#include <vm/vm_print.h>
-#define        printf  kdbprintf
-
-/*
- *     Routine:        vm_page_print [exported]
- */
+#if VM_PAGE_BUCKETS_CHECK
 void
-vm_page_print(
-       db_addr_t       db_addr)
+vm_page_buckets_check(void)
 {
-       vm_page_t       p;
+       unsigned int i;
+       vm_page_t p;
+       unsigned int p_hash;
+       vm_page_bucket_t *bucket;
+       lck_spin_t      *bucket_lock;
+
+       if (!vm_page_buckets_check_ready) {
+               return;
+       }
 
-       p = (vm_page_t) (long) db_addr;
+#if HIBERNATION
+       if (hibernate_rebuild_needed ||
+           hibernate_rebuild_hash_list) {
+               panic("BUCKET_CHECK: hibernation in progress: "
+                     "rebuild_needed=%d rebuild_hash_list=%p\n",
+                     hibernate_rebuild_needed,
+                     hibernate_rebuild_hash_list);
+       }
+#endif /* HIBERNATION */
 
-       iprintf("page 0x%x\n", p);
+#if VM_PAGE_FAKE_BUCKETS
+       char *cp;
+       for (cp = (char *) vm_page_fake_buckets_start;
+            cp < (char *) vm_page_fake_buckets_end;
+            cp++) {
+               if (*cp != 0x5a) {
+                       panic("BUCKET_CHECK: corruption at %p in fake buckets "
+                             "[0x%llx:0x%llx]\n",
+                             cp,
+                             (uint64_t) vm_page_fake_buckets_start,
+                             (uint64_t) vm_page_fake_buckets_end);
+               }
+       }
+#endif /* VM_PAGE_FAKE_BUCKETS */
 
-       db_indent += 2;
+       for (i = 0; i < vm_page_bucket_count; i++) {
+               bucket = &vm_page_buckets[i];
+               if (!bucket->page_list) {
+                       continue;
+               }
 
-       iprintf("object=0x%x", p->object);
-       printf(", offset=0x%x", p->offset);
-       printf(", wire_count=%d", p->wire_count);
+               bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
+               lck_spin_lock(bucket_lock);
+               p = VM_PAGE_UNPACK_PTR(bucket->page_list);
+               while (p != VM_PAGE_NULL) {
+                       if (!p->hashed) {
+                               panic("BUCKET_CHECK: page %p (%p,0x%llx) "
+                                     "hash %d in bucket %d at %p "
+                                     "is not hashed\n",
+                                     p, p->object, p->offset,
+                                     p_hash, i, bucket);
+                       }
+                       p_hash = vm_page_hash(p->object, p->offset);
+                       if (p_hash != i) {
+                               panic("BUCKET_CHECK: corruption in bucket %d "
+                                     "at %p: page %p object %p offset 0x%llx "
+                                     "hash %d\n",
+                                     i, bucket, p, p->object, p->offset,
+                                     p_hash);
+                       }
+                       p = VM_PAGE_UNPACK_PTR(p->next_m);
+               }
+               lck_spin_unlock(bucket_lock);
+       }
 
-       iprintf("%slocal, %sinactive, %sactive, %sthrottled, %sgobbled, %slaundry, %sfree, %sref, %sencrypted\n",
-               (p->local ? "" : "!"),
-               (p->inactive ? "" : "!"),
-               (p->active ? "" : "!"),
-               (p->throttled ? "" : "!"),
-               (p->gobbled ? "" : "!"),
-               (p->laundry ? "" : "!"),
-               (p->free ? "" : "!"),
-               (p->reference ? "" : "!"),
-               (p->encrypted ? "" : "!"));
-       iprintf("%sbusy, %swanted, %stabled, %sfictitious, %sprivate, %sprecious\n",
-               (p->busy ? "" : "!"),
-               (p->wanted ? "" : "!"),
-               (p->tabled ? "" : "!"),
-               (p->fictitious ? "" : "!"),
-               (p->private ? "" : "!"),
-               (p->precious ? "" : "!"));
-       iprintf("%sabsent, %serror, %sdirty, %scleaning, %spageout, %sclustered\n",
-               (p->absent ? "" : "!"),
-               (p->error ? "" : "!"),
-               (p->dirty ? "" : "!"),
-               (p->cleaning ? "" : "!"),
-               (p->pageout ? "" : "!"),
-               (p->clustered ? "" : "!"));
-       iprintf("%soverwriting, %srestart, %sunusual\n",
-               (p->overwriting ? "" : "!"),
-               (p->restart ? "" : "!"),
-               (p->unusual ? "" : "!"));
-
-       iprintf("phys_page=0x%x", p->phys_page);
-
-       db_indent -= 2;
+//     printf("BUCKET_CHECK: checked buckets\n");
 }
-#endif /* MACH_KDB */
+#endif /* VM_PAGE_BUCKETS_CHECK */