]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_resident.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / vm / vm_resident.c
index a548c0600eed6b49427c01f8d072b451a7f0bf0c..b34f1b2d505715f45977528c534b303631cb69b1 100644 (file)
@@ -64,6 +64,7 @@
 
 #include <debug.h>
 #include <libkern/OSAtomic.h>
+#include <libkern/OSDebug.h>
 
 #include <mach/clock_types.h>
 #include <mach/vm_prot.h>
 #include <mach/sdt.h>
 #include <kern/counters.h>
 #include <kern/sched_prim.h>
+#include <kern/policy_internal.h>
 #include <kern/task.h>
 #include <kern/thread.h>
 #include <kern/kalloc.h>
 #include <kern/zalloc.h>
 #include <kern/xpr.h>
+#include <kern/ledger.h>
 #include <vm/pmap.h>
 #include <vm/vm_init.h>
 #include <vm/vm_map.h>
 #include <vm/vm_kern.h>                        /* kernel_memory_allocate() */
 #include <kern/misc_protos.h>
 #include <zone_debug.h>
+#include <mach_debug/zone_info.h>
 #include <vm/cpm.h>
 #include <pexpert/pexpert.h>
+#include <san/kasan.h>
 
 #include <vm/vm_protos.h>
 #include <vm/memory_object.h>
 #include <vm/vm_purgeable_internal.h>
+#include <vm/vm_compressor.h>
+
+#if CONFIG_PHANTOM_CACHE
+#include <vm/vm_phantom_cache.h>
+#endif
 
 #include <IOKit/IOHibernatePrivate.h>
 
 #include <sys/kdebug.h>
 
+
+char   vm_page_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
+char   vm_page_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
+char   vm_page_non_speculative_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
+char   vm_page_active_or_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
+
+#if CONFIG_SECLUDED_MEMORY
+struct vm_page_secluded_data vm_page_secluded;
+#endif /* CONFIG_SECLUDED_MEMORY */
+
 boolean_t      hibernate_cleaning_in_progress = FALSE;
 boolean_t      vm_page_free_verify = TRUE;
 
@@ -118,8 +138,11 @@ __private_extern__ void            vm_page_init_lck_grp(void);
 static void            vm_page_free_prepare(vm_page_t  page);
 static vm_page_t       vm_page_grab_fictitious_common(ppnum_t phys_addr);
 
+static void vm_tag_init(void);
 
-
+uint64_t       vm_min_kernel_and_kext_address = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
+uint32_t       vm_packed_from_vm_pages_array_mask = VM_PACKED_FROM_VM_PAGES_ARRAY;
+uint32_t       vm_packed_pointer_shift = VM_PACKED_POINTER_SHIFT;
 
 /*
  *     Associated with page of user-allocatable memory is a
@@ -145,7 +168,7 @@ uint32_t    vm_page_pages;
  *     or VP, table.]
  */
 typedef struct {
-       vm_page_t       pages;
+       vm_page_packed_t page_list;
 #if    MACH_PAGE_HASH_STATS
        int             cur_count;              /* current count */
        int             hi_count;               /* high water mark */
@@ -162,7 +185,34 @@ unsigned int       vm_page_hash_shift;             /* Shift for hash function */
 uint32_t       vm_page_bucket_hash;            /* Basic bucket hash */
 unsigned int   vm_page_bucket_lock_count = 0;          /* How big is array of locks? */
 
+#ifndef VM_TAG_ACTIVE_UPDATE
+#error VM_TAG_ACTIVE_UPDATE
+#endif
+#ifndef VM_MAX_TAG_ZONES
+#error VM_MAX_TAG_ZONES
+#endif
+
+boolean_t   vm_tag_active_update = VM_TAG_ACTIVE_UPDATE;
 lck_spin_t     *vm_page_bucket_locks;
+lck_spin_t     vm_objects_wired_lock;
+lck_spin_t     vm_allocation_sites_lock;
+
+vm_allocation_site_t            vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC + 1];
+vm_allocation_site_t *          vm_allocation_sites[VM_MAX_TAG_VALUE];
+#if VM_MAX_TAG_ZONES
+vm_allocation_zone_total_t **   vm_allocation_zone_totals;
+#endif /* VM_MAX_TAG_ZONES */
+
+vm_tag_t vm_allocation_tag_highest;
+
+#if VM_PAGE_BUCKETS_CHECK
+boolean_t vm_page_buckets_check_ready = FALSE;
+#if VM_PAGE_FAKE_BUCKETS
+vm_page_bucket_t *vm_page_fake_buckets;        /* decoy buckets */
+vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
 
 
 #if    MACH_PAGE_HASH_STATS
@@ -211,9 +261,15 @@ hash_debug(void)
  *     module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
  *     constants.
  */
+#if defined(__arm__) || defined(__arm64__)
+vm_size_t      page_size;
+vm_size_t      page_mask;
+int            page_shift;
+#else
 vm_size_t      page_size  = PAGE_SIZE;
 vm_size_t      page_mask  = PAGE_MASK;
 int            page_shift = PAGE_SHIFT;
+#endif
 
 /*
  *     Resident page structures are initialized from
@@ -226,6 +282,10 @@ int                page_shift = PAGE_SHIFT;
 struct vm_page vm_page_template;
 
 vm_page_t      vm_pages = VM_PAGE_NULL;
+vm_page_t      vm_page_array_beginning_addr;
+vm_page_t      vm_page_array_ending_addr;
+vm_page_t      vm_page_array_boundary;
+
 unsigned int   vm_pages_count = 0;
 ppnum_t                vm_page_lowest = 0;
 
@@ -237,13 +297,22 @@ ppnum_t           vm_page_lowest = 0;
 unsigned int   vm_colors;
 unsigned int    vm_color_mask;                 /* mask is == (vm_colors-1) */
 unsigned int   vm_cache_geometry_colors = 0;   /* set by hw dependent code during startup */
-queue_head_t   vm_page_queue_free[MAX_COLORS];
+unsigned int   vm_free_magazine_refill_limit = 0;
+
+
+struct vm_page_queue_free_head {
+       vm_page_queue_head_t    qhead;
+} __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
+
+struct vm_page_queue_free_head vm_page_queue_free[MAX_COLORS];
+
+
 unsigned int   vm_page_free_wanted;
 unsigned int   vm_page_free_wanted_privileged;
+#if CONFIG_SECLUDED_MEMORY
+unsigned int   vm_page_free_wanted_secluded;
+#endif /* CONFIG_SECLUDED_MEMORY */
 unsigned int   vm_page_free_count;
-unsigned int   vm_page_fictitious_count;
-
-unsigned int   vm_page_free_count_minimum;     /* debugging */
 
 /*
  *     Occasionally, the virtual memory system uses
@@ -254,6 +323,7 @@ unsigned int        vm_page_free_count_minimum;     /* debugging */
  *     These page structures are allocated the way
  *     most other kernel structures are.
  */
+zone_t vm_page_array_zone;
 zone_t vm_page_zone;
 vm_locks_array_t vm_page_locks;
 decl_lck_mtx_data(,vm_page_alloc_lock)
@@ -275,7 +345,7 @@ struct vplq     *vm_page_local_q = NULL;
  *     For debugging, this should be a strange value
  *     that the pmap module can recognize in assertions.
  */
-ppnum_t vm_page_fictitious_addr = (ppnum_t) -1;
+const ppnum_t vm_page_fictitious_addr = (ppnum_t) -1;
 
 /*
  *     Guard pages are not accessible so they don't
@@ -285,7 +355,7 @@ ppnum_t vm_page_fictitious_addr = (ppnum_t) -1;
  *     we don't use a real physical page with that
  *     physical address.
  */
-ppnum_t vm_page_guard_addr = (ppnum_t) -2;
+const ppnum_t vm_page_guard_addr = (ppnum_t) -2;
 
 /*
  *     Resident page structures are also chained on
@@ -293,37 +363,71 @@ ppnum_t vm_page_guard_addr = (ppnum_t) -2;
  *     system (pageout daemon).  These queues are
  *     defined here, but are shared by the pageout
  *     module.  The inactive queue is broken into 
- *     inactive and zf for convenience as the 
+ *     file backed and anonymous for convenience as the 
  *     pageout daemon often assignes a higher 
- *     affinity to zf pages
+ *     importance to anonymous pages (less likely to pick)
  */
-queue_head_t   vm_page_queue_active;
-queue_head_t   vm_page_queue_inactive;
-queue_head_t   vm_page_queue_anonymous;        /* inactive memory queue for anonymous pages */
-queue_head_t   vm_page_queue_throttled;
+vm_page_queue_head_t   vm_page_queue_active __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
+vm_page_queue_head_t   vm_page_queue_inactive __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
+#if CONFIG_SECLUDED_MEMORY
+vm_page_queue_head_t   vm_page_queue_secluded __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
+#endif /* CONFIG_SECLUDED_MEMORY */
+vm_page_queue_head_t   vm_page_queue_anonymous __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));  /* inactive memory queue for anonymous pages */
+vm_page_queue_head_t   vm_page_queue_throttled __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
+
+queue_head_t   vm_objects_wired;
+
+#if CONFIG_BACKGROUND_QUEUE
+vm_page_queue_head_t   vm_page_queue_background __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
+uint32_t       vm_page_background_target;
+uint32_t       vm_page_background_count;
+uint64_t       vm_page_background_promoted_count;
+
+uint32_t       vm_page_background_internal_count;
+uint32_t       vm_page_background_external_count;
+
+uint32_t       vm_page_background_mode;
+uint32_t       vm_page_background_exclude_external;
+#endif
 
 unsigned int   vm_page_active_count;
 unsigned int   vm_page_inactive_count;
+#if CONFIG_SECLUDED_MEMORY
+unsigned int   vm_page_secluded_count;
+unsigned int   vm_page_secluded_count_free;
+unsigned int   vm_page_secluded_count_inuse;
+#endif /* CONFIG_SECLUDED_MEMORY */
 unsigned int   vm_page_anonymous_count;
 unsigned int   vm_page_throttled_count;
 unsigned int   vm_page_speculative_count;
+
 unsigned int   vm_page_wire_count;
+unsigned int   vm_page_wire_count_on_boot = 0;
+unsigned int   vm_page_stolen_count;
 unsigned int   vm_page_wire_count_initial;
+unsigned int   vm_page_pages_initial;
 unsigned int   vm_page_gobble_count = 0;
-unsigned int   vm_page_wire_count_warning = 0;
-unsigned int   vm_page_gobble_count_warning = 0;
+
+#define        VM_PAGE_WIRE_COUNT_WARNING      0
+#define VM_PAGE_GOBBLE_COUNT_WARNING   0
 
 unsigned int   vm_page_purgeable_count = 0; /* # of pages purgeable now */
 unsigned int   vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */
 uint64_t       vm_page_purged_count = 0;    /* total count of purged pages */
 
+unsigned int   vm_page_xpmapped_external_count = 0;
+unsigned int   vm_page_external_count = 0;
+unsigned int   vm_page_internal_count = 0;
+unsigned int   vm_page_pageable_external_count = 0;
+unsigned int   vm_page_pageable_internal_count = 0;
+
 #if DEVELOPMENT || DEBUG
 unsigned int   vm_page_speculative_recreated = 0;
 unsigned int   vm_page_speculative_created = 0;
 unsigned int   vm_page_speculative_used = 0;
 #endif
 
-queue_head_t    vm_page_queue_cleaned;
+vm_page_queue_head_t    vm_page_queue_cleaned __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
 
 unsigned int   vm_page_cleaned_count = 0;
 unsigned int   vm_pageout_enqueued_cleaned = 0;
@@ -341,9 +445,11 @@ ppnum_t            max_valid_low_ppnum = 0xffffffff;
 unsigned int   vm_page_free_target = 0;
 unsigned int   vm_page_free_min = 0;
 unsigned int   vm_page_throttle_limit = 0;
-uint32_t       vm_page_creation_throttle = 0;
 unsigned int   vm_page_inactive_target = 0;
-unsigned int   vm_page_anonymous_min = 0;
+#if CONFIG_SECLUDED_MEMORY
+unsigned int   vm_page_secluded_target = 0;
+#endif /* CONFIG_SECLUDED_MEMORY */
+unsigned int   vm_page_anonymous_min = 0;
 unsigned int   vm_page_inactive_min = 0;
 unsigned int   vm_page_free_reserved = 0;
 unsigned int   vm_page_throttle_count = 0;
@@ -373,7 +479,9 @@ struct vm_page_stats_reusable vm_page_stats_reusable;
 void
 vm_set_page_size(void)
 {
-       page_mask = page_size - 1;
+       page_size  = PAGE_SIZE;
+       page_mask  = PAGE_MASK;
+       page_shift = PAGE_SHIFT;
 
        if ((page_mask & page_size) != 0)
                panic("vm_set_page_size: page size not a power of two");
@@ -383,6 +491,50 @@ vm_set_page_size(void)
                        break;
 }
 
+#if defined (__x86_64__)
+
+#define MAX_CLUMP_SIZE      16
+#define DEFAULT_CLUMP_SIZE  4
+
+unsigned int vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold;
+
+#if DEVELOPMENT || DEBUG
+unsigned long vm_clump_stats[MAX_CLUMP_SIZE+1];
+unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
+
+static inline void vm_clump_update_stats(unsigned int c) {
+    assert(c<=vm_clump_size);
+    if(c>0 && c<=vm_clump_size) vm_clump_stats[c]+=c;
+    vm_clump_allocs+=c;
+}
+#endif  /*  if DEVELOPMENT || DEBUG */
+
+/* Called once to setup the VM clump knobs */
+static void
+vm_page_setup_clump( void )
+{
+    unsigned int override, n;
+
+    vm_clump_size = DEFAULT_CLUMP_SIZE;
+    if ( PE_parse_boot_argn("clump_size", &override, sizeof (override)) ) vm_clump_size = override;
+
+    if(vm_clump_size > MAX_CLUMP_SIZE) panic("vm_page_setup_clump:: clump_size is too large!");
+    if(vm_clump_size < 1) panic("vm_page_setup_clump:: clump_size must be >= 1");
+    if((vm_clump_size & (vm_clump_size-1)) != 0)  panic("vm_page_setup_clump:: clump_size must be a power of 2");
+
+    vm_clump_promote_threshold = vm_clump_size;
+    vm_clump_mask = vm_clump_size - 1;
+    for(vm_clump_shift=0, n=vm_clump_size; n>1; n>>=1, vm_clump_shift++);
+
+#if DEVELOPMENT || DEBUG
+    bzero(vm_clump_stats, sizeof(vm_clump_stats));
+    vm_clump_allocs = vm_clump_inserts = vm_clump_inrange = vm_clump_promotes = 0;
+#endif  /*  if DEVELOPMENT || DEBUG */
+}
+
+#endif /* #if defined (__x86_64__) */
+
+#define COLOR_GROUPS_TO_STEAL  4
 
 /* Called once during statup, once the cache geometry is known.
  */
@@ -390,7 +542,11 @@ static void
 vm_page_set_colors( void )
 {
        unsigned int    n, override;
-       
+
+#if defined (__x86_64__)       
+       /* adjust #colors because we need to color outside the clump boundary */
+       vm_cache_geometry_colors >>= vm_clump_shift;
+#endif
        if ( PE_parse_boot_argn("colors", &override, sizeof (override)) )               /* colors specified as a boot-arg? */
                n = override;   
        else if ( vm_cache_geometry_colors )                    /* do we know what the cache geometry is? */
@@ -404,10 +560,18 @@ vm_page_set_colors( void )
                
        /* the count must be a power of 2  */
        if ( ( n & (n - 1)) != 0  )
-               panic("vm_page_set_colors");
+               n = DEFAULT_COLORS;                             /* use default if all else fails */
        
        vm_colors = n;
        vm_color_mask = n - 1;
+
+       vm_free_magazine_refill_limit = vm_colors * COLOR_GROUPS_TO_STEAL;
+
+#if defined (__x86_64__)
+        /* adjust for reduction in colors due to clumping and multiple cores */
+       if (real_ncpus)
+               vm_free_magazine_refill_limit *= (vm_clump_size * real_ncpus);
+#endif
 }
 
 
@@ -436,8 +600,12 @@ vm_page_init_lck_grp(void)
        lck_grp_init(&vm_page_lck_grp_bucket, "vm_page_bucket", &vm_page_lck_grp_attr);
        lck_attr_setdefault(&vm_page_lck_attr);
        lck_mtx_init_ext(&vm_page_alloc_lock, &vm_page_alloc_lock_ext, &vm_page_lck_grp_alloc, &vm_page_lck_attr);
+
+       vm_compressor_init_locks();
 }
 
+#define ROUNDUP_NEXTP2(X) (1U << (32 - __builtin_clz((X) - 1)))
+
 void
 vm_page_init_local_q()
 {
@@ -451,15 +619,25 @@ vm_page_init_local_q()
         * no point in this for a uni-processor system
         */
        if (num_cpus >= 2) {
-               t_local_q = (struct vplq *)kalloc(num_cpus * sizeof(struct vplq));
+#if KASAN
+               /* KASAN breaks the expectation of a size-aligned object by adding a
+                * redzone, so explicitly align. */
+               t_local_q = (struct vplq *)kalloc(num_cpus * sizeof(struct vplq) + VM_PACKED_POINTER_ALIGNMENT);
+               t_local_q = (void *)(((uintptr_t)t_local_q + (VM_PACKED_POINTER_ALIGNMENT-1)) & ~(VM_PACKED_POINTER_ALIGNMENT-1));
+#else
+               /* round the size up to the nearest power of two */
+               t_local_q = (struct vplq *)kalloc(ROUNDUP_NEXTP2(num_cpus * sizeof(struct vplq)));
+#endif
 
                for (i = 0; i < num_cpus; i++) {
                        struct vpl      *lq;
 
                        lq = &t_local_q[i].vpl_un.vpl;
                        VPL_LOCK_INIT(lq, &vm_page_lck_grp_local, &vm_page_lck_attr);
-                       queue_init(&lq->vpl_queue);
+                       vm_page_queue_init(&lq->vpl_queue);
                        lq->vpl_count = 0;
+                       lq->vpl_internal_count = 0;
+                       lq->vpl_external_count = 0;
                }
                vm_page_local_q_count = num_cpus;
 
@@ -467,6 +645,17 @@ vm_page_init_local_q()
        }
 }
 
+/*
+ * vm_init_before_launchd
+ *
+ * This should be called right before launchd is loaded.
+ */
+void
+vm_init_before_launchd()
+{
+       vm_page_wire_count_on_boot = vm_page_wire_count;
+}
+
 
 /*
  *     vm_page_bootstrap:
@@ -484,7 +673,7 @@ vm_page_bootstrap(
        vm_offset_t             *startp,
        vm_offset_t             *endp)
 {
-       register vm_page_t      m;
+       vm_page_t               m;
        unsigned int            i;
        unsigned int            log1;
        unsigned int            log2;
@@ -497,38 +686,40 @@ vm_page_bootstrap(
        m = &vm_page_template;
        bzero(m, sizeof (*m));
 
-       m->pageq.next = NULL;
-       m->pageq.prev = NULL;
-       m->listq.next = NULL;
-       m->listq.prev = NULL;
-       m->next = VM_PAGE_NULL;
+#if CONFIG_BACKGROUND_QUEUE
+       m->vm_page_backgroundq.next = 0;
+       m->vm_page_backgroundq.prev = 0;
+       m->vm_page_in_background = FALSE;
+       m->vm_page_on_backgroundq = FALSE;
+#endif
+
+       VM_PAGE_ZERO_PAGEQ_ENTRY(m);
+       m->listq.next = 0;
+       m->listq.prev = 0;
+       m->next_m = 0;
 
-       m->object = VM_OBJECT_NULL;             /* reset later */
+       m->vm_page_object = 0;                  /* reset later */
        m->offset = (vm_object_offset_t) -1;    /* reset later */
 
        m->wire_count = 0;
-       m->local = FALSE;
-       m->inactive = FALSE;
-       m->active = FALSE;
-       m->pageout_queue = FALSE;
-       m->speculative = FALSE;
+       m->vm_page_q_state = VM_PAGE_NOT_ON_Q;
        m->laundry = FALSE;
-       m->free = FALSE;
        m->reference = FALSE;
        m->gobbled = FALSE;
        m->private = FALSE;
-       m->throttled = FALSE;
        m->__unused_pageq_bits = 0;
 
-       m->phys_page = 0;               /* reset later */
-
+#if    !defined(__arm__) && !defined(__arm64__)
+       VM_PAGE_SET_PHYS_PAGE(m, 0);            /* reset later */
+#endif
        m->busy = TRUE;
        m->wanted = FALSE;
        m->tabled = FALSE;
+       m->hashed = FALSE;
        m->fictitious = FALSE;
        m->pmapped = FALSE;
        m->wpmapped = FALSE;
-       m->pageout = FALSE;
+       m->free_when_done = FALSE;
        m->absent = FALSE;
        m->error = FALSE;
        m->dirty = FALSE;
@@ -538,17 +729,16 @@ vm_page_bootstrap(
        m->overwriting = FALSE;
        m->restart = FALSE;
        m->unusual = FALSE;
-       m->encrypted = FALSE;
-       m->encrypted_cleaning = FALSE;
        m->cs_validated = FALSE;
        m->cs_tainted = FALSE;
+       m->cs_nx = FALSE;
        m->no_cache = FALSE;
        m->reusable = FALSE;
        m->slid = FALSE;
-       m->was_dirty = FALSE;
+       m->xpmapped = FALSE;
+       m->written_by_kernel = FALSE;
        m->__unused_object_bits = 0;
 
-
        /*
         *      Initialize the page queues.
         */
@@ -573,33 +763,117 @@ vm_page_bootstrap(
                purgeable_queues[i].debug_count_objects = 0;
 #endif
        };
+       purgeable_nonvolatile_count = 0;
+       queue_init(&purgeable_nonvolatile_queue);
     
        for (i = 0; i < MAX_COLORS; i++ )
-               queue_init(&vm_page_queue_free[i]);
-
-       queue_init(&vm_lopage_queue_free);
-       queue_init(&vm_page_queue_active);
-       queue_init(&vm_page_queue_inactive);
-       queue_init(&vm_page_queue_cleaned);
-       queue_init(&vm_page_queue_throttled);
-       queue_init(&vm_page_queue_anonymous);
+               vm_page_queue_init(&vm_page_queue_free[i].qhead);
+
+       vm_page_queue_init(&vm_lopage_queue_free);
+       vm_page_queue_init(&vm_page_queue_active);
+       vm_page_queue_init(&vm_page_queue_inactive);
+#if CONFIG_SECLUDED_MEMORY
+       vm_page_queue_init(&vm_page_queue_secluded);
+#endif /* CONFIG_SECLUDED_MEMORY */
+       vm_page_queue_init(&vm_page_queue_cleaned);
+       vm_page_queue_init(&vm_page_queue_throttled);
+       vm_page_queue_init(&vm_page_queue_anonymous);
+       queue_init(&vm_objects_wired);
 
        for ( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ ) {
-               queue_init(&vm_page_queue_speculative[i].age_q);
+               vm_page_queue_init(&vm_page_queue_speculative[i].age_q);
 
                vm_page_queue_speculative[i].age_ts.tv_sec = 0;
                vm_page_queue_speculative[i].age_ts.tv_nsec = 0;
        }
+#if CONFIG_BACKGROUND_QUEUE
+       vm_page_queue_init(&vm_page_queue_background);
+
+       vm_page_background_count = 0;
+       vm_page_background_internal_count = 0;
+       vm_page_background_external_count = 0;
+       vm_page_background_promoted_count = 0;
+
+       vm_page_background_target = (unsigned int)(atop_64(max_mem) / 25);
+
+       if (vm_page_background_target > VM_PAGE_BACKGROUND_TARGET_MAX)
+               vm_page_background_target = VM_PAGE_BACKGROUND_TARGET_MAX;
+
+       vm_page_background_mode = VM_PAGE_BG_LEVEL_1;
+       vm_page_background_exclude_external = 0;
+
+       PE_parse_boot_argn("vm_page_bg_mode", &vm_page_background_mode, sizeof(vm_page_background_mode));
+       PE_parse_boot_argn("vm_page_bg_exclude_external", &vm_page_background_exclude_external, sizeof(vm_page_background_exclude_external));
+       PE_parse_boot_argn("vm_page_bg_target", &vm_page_background_target, sizeof(vm_page_background_target));
+
+       if (vm_page_background_mode > VM_PAGE_BG_LEVEL_1)
+               vm_page_background_mode = VM_PAGE_BG_LEVEL_1;
+#endif
        vm_page_free_wanted = 0;
        vm_page_free_wanted_privileged = 0;
+#if CONFIG_SECLUDED_MEMORY
+       vm_page_free_wanted_secluded = 0;
+#endif /* CONFIG_SECLUDED_MEMORY */
        
+#if defined (__x86_64__)
+       /* this must be called before vm_page_set_colors() */
+       vm_page_setup_clump();
+#endif
+
        vm_page_set_colors();
 
+       bzero(vm_page_inactive_states, sizeof(vm_page_inactive_states));
+       vm_page_inactive_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
+       vm_page_inactive_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
+       vm_page_inactive_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
+
+       bzero(vm_page_pageable_states, sizeof(vm_page_pageable_states));
+       vm_page_pageable_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
+       vm_page_pageable_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
+       vm_page_pageable_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
+       vm_page_pageable_states[VM_PAGE_ON_ACTIVE_Q] = 1;
+       vm_page_pageable_states[VM_PAGE_ON_SPECULATIVE_Q] = 1;
+       vm_page_pageable_states[VM_PAGE_ON_THROTTLED_Q] = 1;
+#if CONFIG_SECLUDED_MEMORY
+       vm_page_pageable_states[VM_PAGE_ON_SECLUDED_Q] = 1;
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+       bzero(vm_page_non_speculative_pageable_states, sizeof(vm_page_non_speculative_pageable_states));
+       vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
+       vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
+       vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
+       vm_page_non_speculative_pageable_states[VM_PAGE_ON_ACTIVE_Q] = 1;
+       vm_page_non_speculative_pageable_states[VM_PAGE_ON_THROTTLED_Q] = 1;
+#if CONFIG_SECLUDED_MEMORY
+       vm_page_non_speculative_pageable_states[VM_PAGE_ON_SECLUDED_Q] = 1;
+#endif /* CONFIG_SECLUDED_MEMORY */
+       
+       bzero(vm_page_active_or_inactive_states, sizeof(vm_page_active_or_inactive_states));
+       vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
+       vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
+       vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
+       vm_page_active_or_inactive_states[VM_PAGE_ON_ACTIVE_Q] = 1;
+#if CONFIG_SECLUDED_MEMORY
+       vm_page_active_or_inactive_states[VM_PAGE_ON_SECLUDED_Q] = 1;
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+       for (i = 0; i < VM_KERN_MEMORY_FIRST_DYNAMIC; i++)
+       {
+               vm_allocation_sites_static[i].refcount = 2;
+               vm_allocation_sites_static[i].tag = i;
+               vm_allocation_sites[i] = &vm_allocation_sites_static[i];
+       }
+       vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].refcount = 2;
+       vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].tag = VM_KERN_MEMORY_ANY;
+       vm_allocation_sites[VM_KERN_MEMORY_ANY] = &vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC];
 
        /*
         *      Steal memory for the map and zone subsystems.
         */
-       zone_steal_memory();
+#if CONFIG_GZALLOC
+       gzalloc_configure();
+#endif
+       kernel_debug_string_early("vm_map_steal_memory");
        vm_map_steal_memory();
 
        /*
@@ -645,18 +919,44 @@ vm_page_bootstrap(
        if (vm_page_hash_mask & vm_page_bucket_count)
                printf("vm_page_bootstrap: WARNING -- strange page hash\n");
 
+#if VM_PAGE_BUCKETS_CHECK
+#if VM_PAGE_FAKE_BUCKETS
+       /*
+        * Allocate a decoy set of page buckets, to detect
+        * any stomping there.
+        */
+       vm_page_fake_buckets = (vm_page_bucket_t *)
+               pmap_steal_memory(vm_page_bucket_count *
+                                 sizeof(vm_page_bucket_t));
+       vm_page_fake_buckets_start = (vm_map_offset_t) vm_page_fake_buckets;
+       vm_page_fake_buckets_end =
+               vm_map_round_page((vm_page_fake_buckets_start +
+                                  (vm_page_bucket_count *
+                                   sizeof (vm_page_bucket_t))),
+                                 PAGE_MASK);
+       char *cp;
+       for (cp = (char *)vm_page_fake_buckets_start;
+            cp < (char *)vm_page_fake_buckets_end;
+            cp++) {
+               *cp = 0x5a;
+       }
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
+       kernel_debug_string_early("vm_page_buckets");
        vm_page_buckets = (vm_page_bucket_t *)
                pmap_steal_memory(vm_page_bucket_count *
                                  sizeof(vm_page_bucket_t));
 
+       kernel_debug_string_early("vm_page_bucket_locks");
        vm_page_bucket_locks = (lck_spin_t *)
                pmap_steal_memory(vm_page_bucket_lock_count *
                                  sizeof(lck_spin_t));
 
        for (i = 0; i < vm_page_bucket_count; i++) {
-               register vm_page_bucket_t *bucket = &vm_page_buckets[i];
+               vm_page_bucket_t *bucket = &vm_page_buckets[i];
 
-               bucket->pages = VM_PAGE_NULL;
+               bucket->page_list = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
 #if     MACH_PAGE_HASH_STATS
                bucket->cur_count = 0;
                bucket->hi_count = 0;
@@ -666,6 +966,14 @@ vm_page_bootstrap(
        for (i = 0; i < vm_page_bucket_lock_count; i++)
                lck_spin_init(&vm_page_bucket_locks[i], &vm_page_lck_grp_bucket, &vm_page_lck_attr);
 
+       lck_spin_init(&vm_objects_wired_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
+       lck_spin_init(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
+       vm_tag_init();
+
+#if VM_PAGE_BUCKETS_CHECK
+       vm_page_buckets_check_ready = TRUE;
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
        /*
         *      Machine-dependent code allocates the resident page table.
         *      It uses vm_page_init to initialize the page frames.
@@ -674,6 +982,7 @@ vm_page_bootstrap(
         *      to get the alignment right.
         */
 
+       kernel_debug_string_early("pmap_startup");
        pmap_startup(&virtual_space_start, &virtual_space_end);
        virtual_space_start = round_page(virtual_space_start);
        virtual_space_end = trunc_page(virtual_space_end);
@@ -690,12 +999,16 @@ vm_page_bootstrap(
         */
        assert((unsigned int) atop_64(max_mem) == atop_64(max_mem));
        vm_page_wire_count = ((unsigned int) atop_64(max_mem)) - vm_page_free_count - vm_lopage_free_count;     /* initial value */
+#if CONFIG_SECLUDED_MEMORY
+       vm_page_wire_count -= vm_page_secluded_count;
+#endif
        vm_page_wire_count_initial = vm_page_wire_count;
-       vm_page_free_count_minimum = vm_page_free_count;
+       vm_page_pages_initial = vm_page_pages;
 
        printf("vm_page_bootstrap: %d free pages and %d wired pages\n",
               vm_page_free_count, vm_page_wire_count);
 
+       kernel_debug_string_early("vm_page_bootstrap complete");
        simple_lock_init(&vm_paging_lock, 0);
 }
 
@@ -709,8 +1022,9 @@ void *
 pmap_steal_memory(
        vm_size_t size)
 {
+       kern_return_t kr;
        vm_offset_t addr, vaddr;
-       ppnum_t phys_page;
+       ppnum_t phys_page;
 
        /*
         *      We round the size to a round multiple.
@@ -753,29 +1067,61 @@ pmap_steal_memory(
             vaddr += PAGE_SIZE) {
 
                if (!pmap_next_page_hi(&phys_page))
-                       panic("pmap_steal_memory");
+                       panic("pmap_steal_memory() size: 0x%llx\n", (uint64_t)size);
 
                /*
                 *      XXX Logically, these mappings should be wired,
                 *      but some pmap modules barf if they are.
                 */
 #if defined(__LP64__)
+#ifdef __arm64__
+               /* ARM64_TODO: verify that we really don't need this */
+#else
                pmap_pre_expand(kernel_pmap, vaddr);
+#endif
 #endif
 
-               pmap_enter(kernel_pmap, vaddr, phys_page,
-                          VM_PROT_READ|VM_PROT_WRITE, VM_PROT_NONE,
-                               VM_WIMG_USE_DEFAULT, FALSE);
+               kr = pmap_enter(kernel_pmap, vaddr, phys_page,
+                               VM_PROT_READ|VM_PROT_WRITE, VM_PROT_NONE,
+                               VM_WIMG_USE_DEFAULT, FALSE);
+
+               if (kr != KERN_SUCCESS) {
+                       panic("pmap_steal_memory() pmap_enter failed, vaddr=%#lx, phys_page=%u",
+                             (unsigned long)vaddr, phys_page);
+               }
+
                /*
                 * Account for newly stolen memory
                 */
                vm_page_wire_count++;
-
+               vm_page_stolen_count++;
        }
 
+#if KASAN
+       kasan_notify_address(round_page(addr), size);
+#endif
        return (void *) addr;
 }
 
+#if CONFIG_SECLUDED_MEMORY
+/* boot-args to control secluded memory */
+unsigned int secluded_mem_mb = 0;      /* # of MBs of RAM to seclude */
+int secluded_for_iokit = 1;            /* IOKit can use secluded memory */
+int secluded_for_apps = 1;             /* apps can use secluded memory */
+int secluded_for_filecache = 2;                /* filecache can use seclude memory */
+#if 11
+int secluded_for_fbdp = 0;
+#endif
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+
+#if defined(__arm__) || defined(__arm64__)
+extern void patch_low_glo_vm_page_info(void *, void *, uint32_t);
+unsigned int vm_first_phys_ppnum = 0;
+#endif
+
+
+void vm_page_release_startup(vm_page_t mem);
 void
 pmap_startup(
        vm_offset_t *startp,
@@ -785,6 +1131,16 @@ pmap_startup(
        ppnum_t         phys_page;
        addr64_t        tmpaddr;
 
+#if    defined(__LP64__)
+       /*
+        * make sure we are aligned on a 64 byte boundary
+        * for VM_PAGE_PACK_PTR (it clips off the low-order
+        * 6 bits of the pointer)
+        */
+       if (virtual_space_start != virtual_space_end)
+               virtual_space_start = round_page(virtual_space_start);
+#endif
+
        /*
         *      We calculate how many page frames we will have
         *      and then allocate the page structures in one chunk.
@@ -799,9 +1155,21 @@ pmap_startup(
        /*
         *      Initialize the page frames.
         */
+       kernel_debug_string_early("Initialize the page frames");
+
+       vm_page_array_beginning_addr = &vm_pages[0];
+       vm_page_array_ending_addr = &vm_pages[npages];
+
        for (i = 0, pages_initialized = 0; i < npages; i++) {
                if (!pmap_next_page(&phys_page))
                        break;
+#if defined(__arm__) || defined(__arm64__)
+               if (pages_initialized == 0) {
+                       vm_first_phys_ppnum = phys_page;
+                       patch_low_glo_vm_page_info((void *)vm_page_array_beginning_addr, (void *)vm_page_array_ending_addr, vm_first_phys_ppnum);
+               }
+               assert((i + vm_first_phys_ppnum) == phys_page);
+#endif
                if (pages_initialized == 0 || phys_page < vm_page_lowest)
                        vm_page_lowest = phys_page;
 
@@ -810,7 +1178,17 @@ pmap_startup(
                pages_initialized++;
        }
        vm_pages_count = pages_initialized;
+       vm_page_array_boundary = &vm_pages[pages_initialized];
 
+#if    defined(__LP64__)
+
+       if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[0]))) != &vm_pages[0])
+               panic("VM_PAGE_PACK_PTR failed on &vm_pages[0] - %p", (void *)&vm_pages[0]);
+
+       if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[vm_pages_count-1]))) != &vm_pages[vm_pages_count-1])
+               panic("VM_PAGE_PACK_PTR failed on &vm_pages[vm_pages_count-1] - %p", (void *)&vm_pages[vm_pages_count-1]);
+#endif
+       kernel_debug_string_early("page fill/release");
        /*
         * Check if we want to initialize pages to a known value
         */
@@ -828,12 +1206,46 @@ pmap_startup(
 #endif
        if (fill)
                kprintf("Filling vm_pages with pattern: 0x%x\n", fillval);
+
+#if CONFIG_SECLUDED_MEMORY
+       /* default: no secluded mem */
+       secluded_mem_mb = 0;
+       if (max_mem > 1*1024*1024*1024) {
+               /* default to 90MB for devices with > 1GB of RAM */
+               secluded_mem_mb = 90;
+       }
+       /* override with value from device tree, if provided */
+       PE_get_default("kern.secluded_mem_mb",
+                      &secluded_mem_mb, sizeof(secluded_mem_mb));
+       /* override with value from boot-args, if provided */
+       PE_parse_boot_argn("secluded_mem_mb",
+                          &secluded_mem_mb,
+                          sizeof (secluded_mem_mb));
+
+       vm_page_secluded_target = (unsigned int)
+               ((secluded_mem_mb * 1024ULL * 1024ULL) / PAGE_SIZE);
+       PE_parse_boot_argn("secluded_for_iokit",
+                          &secluded_for_iokit,
+                          sizeof (secluded_for_iokit));
+       PE_parse_boot_argn("secluded_for_apps",
+                          &secluded_for_apps,
+                          sizeof (secluded_for_apps));
+       PE_parse_boot_argn("secluded_for_filecache",
+                          &secluded_for_filecache,
+                          sizeof (secluded_for_filecache));
+#if 11
+       PE_parse_boot_argn("secluded_for_fbdp",
+                          &secluded_for_fbdp,
+                          sizeof (secluded_for_fbdp));
+#endif
+#endif /* CONFIG_SECLUDED_MEMORY */
+
        // -debug code remove
        if (2 == vm_himemory_mode) {
                // free low -> high so high is preferred
                for (i = 1; i <= pages_initialized; i++) {
-                       if(fill) fillPage(vm_pages[i - 1].phys_page, fillval);          /* Fill the page with a know value if requested at boot */                      
-                       vm_page_release(&vm_pages[i - 1]);
+                       if(fill) fillPage(VM_PAGE_GET_PHYS_PAGE(&vm_pages[i - 1]), fillval);            /* Fill the page with a know value if requested at boot */
+                       vm_page_release_startup(&vm_pages[i - 1]);
                }
        }
        else
@@ -846,10 +1258,12 @@ pmap_startup(
         * they require several consecutive pages.
         */
        for (i = pages_initialized; i > 0; i--) {
-               if(fill) fillPage(vm_pages[i - 1].phys_page, fillval);          /* Fill the page with a know value if requested at boot */                      
-               vm_page_release(&vm_pages[i - 1]);
+               if(fill) fillPage(VM_PAGE_GET_PHYS_PAGE(&vm_pages[i - 1]), fillval);            /* Fill the page with a know value if requested at boot */                      
+               vm_page_release_startup(&vm_pages[i - 1]);
        }
 
+       VM_CHECK_MEMORYSTATUS;
+       
 #if 0
        {
                vm_page_t xx, xxo, xxl;
@@ -859,7 +1273,7 @@ pmap_startup(
                xxl = 0;
                
                for( i = 0; i < vm_colors; i++ ) {
-                       queue_iterate(&vm_page_queue_free[i],
+                       queue_iterate(&vm_page_queue_free[i].qhead,
                                      xx,
                                      vm_page_t,
                                      pageq) {  /* BRINGUP */
@@ -873,7 +1287,7 @@ pmap_startup(
                                
                                if(((j - 1) & 0xFFFF) == 0) kprintf("checking number %d of %d\n", j, vm_page_free_count);
 
-                               for(xxo = xx->pageq.next; xxo != &vm_page_queue_free[i]; xxo = xxo->pageq.next) {       /* (BRINGUP) */
+                               for(xxo = xx->pageq.next; xxo != &vm_page_queue_free[i].qhead; xxo = xxo->pageq.next) { /* (BRINGUP) */
                                        k++;
                                        if(k > l) panic("pmap_startup: too many in secondary check %d %d\n", k, l);
                                        if((xx->phys_page & 0xFFFFFFFF) == (xxo->phys_page & 0xFFFFFFFF)) {     /* (BRINGUP) */
@@ -913,25 +1327,41 @@ pmap_startup(
 void
 vm_page_module_init(void)
 {
-       vm_page_zone = zinit((vm_size_t) sizeof(struct vm_page),
-                            0, PAGE_SIZE, "vm pages");
+       uint64_t vm_page_zone_pages, vm_page_array_zone_data_size;
+       vm_size_t       vm_page_with_ppnum_size;
 
-#if    ZONE_DEBUG
-       zone_debug_disable(vm_page_zone);
-#endif /* ZONE_DEBUG */
+       vm_page_array_zone = zinit((vm_size_t) sizeof(struct vm_page),
+                            0, PAGE_SIZE, "vm pages array");
+
+       zone_change(vm_page_array_zone, Z_CALLERACCT, FALSE);
+       zone_change(vm_page_array_zone, Z_EXPAND, FALSE);
+       zone_change(vm_page_array_zone, Z_EXHAUST, TRUE);
+       zone_change(vm_page_array_zone, Z_FOREIGN, TRUE);
+       zone_change(vm_page_array_zone, Z_GZALLOC_EXEMPT, TRUE);
+       /*
+        * Adjust zone statistics to account for the real pages allocated
+        * in vm_page_create(). [Q: is this really what we want?]
+        */
+       vm_page_array_zone->count += vm_page_pages;
+       vm_page_array_zone->sum_count += vm_page_pages;
+       vm_page_array_zone_data_size = vm_page_pages * vm_page_array_zone->elem_size;
+       vm_page_array_zone->cur_size += vm_page_array_zone_data_size;
+       vm_page_zone_pages = ((round_page(vm_page_array_zone_data_size)) / PAGE_SIZE);
+       OSAddAtomic64(vm_page_zone_pages, &(vm_page_array_zone->page_count));
+       /* since zone accounts for these, take them out of stolen */
+       VM_PAGE_MOVE_STOLEN(vm_page_zone_pages);
+
+       vm_page_with_ppnum_size = (sizeof(struct vm_page_with_ppnum) + (VM_PACKED_POINTER_ALIGNMENT-1)) & ~(VM_PACKED_POINTER_ALIGNMENT - 1);
+
+       vm_page_zone = zinit(vm_page_with_ppnum_size,
+                            0, PAGE_SIZE, "vm pages");
 
        zone_change(vm_page_zone, Z_CALLERACCT, FALSE);
        zone_change(vm_page_zone, Z_EXPAND, FALSE);
        zone_change(vm_page_zone, Z_EXHAUST, TRUE);
        zone_change(vm_page_zone, Z_FOREIGN, TRUE);
        zone_change(vm_page_zone, Z_GZALLOC_EXEMPT, TRUE);
-        /*
-         * Adjust zone statistics to account for the real pages allocated
-         * in vm_page_create(). [Q: is this really what we want?]
-         */
-        vm_page_zone->count += vm_page_pages;
-        vm_page_zone->sum_count += vm_page_pages;
-        vm_page_zone->cur_size += vm_page_pages * vm_page_zone->elem_size;
+       zone_change(vm_page_zone, Z_ALIGNMENT_REQUIRED, TRUE);
 }
 
 /*
@@ -962,7 +1392,7 @@ vm_page_create(
                pmap_clear_noencrypt(phys_page);
 
                vm_page_pages++;
-               vm_page_release(m);
+               vm_page_release(m, FALSE);
        }
 }
 
@@ -992,7 +1422,17 @@ vm_page_insert(
        vm_object_t             object,
        vm_object_offset_t      offset)
 {
-       vm_page_insert_internal(mem, object, offset, FALSE, TRUE, FALSE);
+       vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, FALSE, FALSE, NULL);
+}
+
+void
+vm_page_insert_wired(
+       vm_page_t               mem,
+       vm_object_t             object,
+       vm_object_offset_t      offset,
+       vm_tag_t                tag)
+{
+       vm_page_insert_internal(mem, object, offset, tag, FALSE, TRUE, FALSE, FALSE, NULL);
 }
 
 void
@@ -1000,13 +1440,17 @@ vm_page_insert_internal(
        vm_page_t               mem,
        vm_object_t             object,
        vm_object_offset_t      offset,
+       vm_tag_t                tag,
        boolean_t               queues_lock_held,
        boolean_t               insert_in_hash,
-       boolean_t               batch_pmap_op)
+       boolean_t               batch_pmap_op,
+        boolean_t               batch_accounting,
+       uint64_t                *delayed_ledger_update)
 {
-       vm_page_bucket_t *bucket;
-       lck_spin_t      *bucket_lock;
-       int     hash_id;
+       vm_page_bucket_t        *bucket;
+       lck_spin_t              *bucket_lock;
+       int                     hash_id;
+       task_t                  owner;
 
         XPR(XPR_VM_PAGE,
                 "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n",
@@ -1019,30 +1463,32 @@ vm_page_insert_internal(
        VM_PAGE_CHECK(mem);
 #endif
 
-       if (object == vm_submap_object) {
-               /* the vm_submap_object is only a placeholder for submaps */
-               panic("vm_page_insert(vm_submap_object,0x%llx)\n", offset);
-       }
+       assert(page_aligned(offset));
+
+       assert(!VM_PAGE_WIRED(mem) || mem->private || mem->fictitious || (tag != VM_KERN_MEMORY_NONE));
+
+       /* the vm_submap_object is only a placeholder for submaps */
+       assert(object != vm_submap_object);
 
        vm_object_lock_assert_exclusive(object);
-#if DEBUG
-       lck_mtx_assert(&vm_page_queue_lock,
+       LCK_MTX_ASSERT(&vm_page_queue_lock,
                       queues_lock_held ? LCK_MTX_ASSERT_OWNED
                                        : LCK_MTX_ASSERT_NOTOWNED);
-#endif /* DEBUG */
-       
+
+       if (queues_lock_held == FALSE)
+               assert(!VM_PAGE_PAGEABLE(mem));
+
        if (insert_in_hash == TRUE) {
-#if DEBUG
-               if (mem->tabled || mem->object != VM_OBJECT_NULL)
+#if DEBUG || VM_PAGE_CHECK_BUCKETS
+               if (mem->tabled || mem->vm_page_object)
                        panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
                              "already in (obj=%p,off=0x%llx)",
-                             mem, object, offset, mem->object, mem->offset);
+                             mem, object, offset, VM_PAGE_OBJECT(mem), mem->offset);
 #endif
-               assert(!object->internal || offset < object->vo_size);
-
-               /* only insert "pageout" pages into "pageout" objects,
-                * and normal pages into normal objects */
-               assert(object->pageout == mem->pageout);
+               if (object->internal && (offset >= object->vo_size)) {
+                       panic("vm_page_insert_internal: (page=%p,obj=%p,off=0x%llx,size=0x%llx) inserted at offset past object bounds",
+                             mem, object, offset, object->vo_size);
+               }
 
                assert(vm_page_lookup(object, offset) == VM_PAGE_NULL);
                
@@ -1050,9 +1496,15 @@ vm_page_insert_internal(
                 *      Record the object/offset pair in this page
                 */
 
-               mem->object = object;
+               mem->vm_page_object = VM_PAGE_PACK_OBJECT(object);
                mem->offset = offset;
 
+#if CONFIG_SECLUDED_MEMORY
+               if (object->eligible_for_secluded) {
+                       vm_page_secluded.eligible_for_secluded++;
+               }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
                /*
                 *      Insert it into the object_object/offset hash table
                 */
@@ -1062,13 +1514,15 @@ vm_page_insert_internal(
        
                lck_spin_lock(bucket_lock);
 
-               mem->next = bucket->pages;
-               bucket->pages = mem;
+               mem->next_m = bucket->page_list;
+               bucket->page_list = VM_PAGE_PACK_PTR(mem);
+               assert(mem == (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)));
+
 #if     MACH_PAGE_HASH_STATS
                if (++bucket->cur_count > bucket->hi_count)
                        bucket->hi_count = bucket->cur_count;
 #endif /* MACH_PAGE_HASH_STATS */
-
+               mem->hashed = TRUE;
                lck_spin_unlock(bucket_lock);
        }
 
@@ -1084,8 +1538,8 @@ vm_page_insert_internal(
        /*
         *      Now link into the object's list of backed pages.
         */
-
-       VM_PAGE_INSERT(mem, object);
+       vm_page_queue_enter(&object->memq, mem, vm_page_t, listq);
+       object->memq_hint = mem;
        mem->tabled = TRUE;
 
        /*
@@ -1094,20 +1548,78 @@ vm_page_insert_internal(
 
        object->resident_page_count++;
        if (VM_PAGE_WIRED(mem)) {
-               object->wired_page_count++;
+           assert(mem->wire_count > 0);
+           VM_OBJECT_WIRED_PAGE_UPDATE_START(object);
+           VM_OBJECT_WIRED_PAGE_ADD(object, mem);
+           VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag);
        }
        assert(object->resident_page_count >= object->wired_page_count);
 
+        if (batch_accounting == FALSE) {
+               if (object->internal) {
+                       OSAddAtomic(1, &vm_page_internal_count);
+               } else {
+                       OSAddAtomic(1, &vm_page_external_count);
+               }
+       }
+
+       /*
+        * It wouldn't make sense to insert a "reusable" page in
+        * an object (the page would have been marked "reusable" only
+        * at the time of a madvise(MADV_FREE_REUSABLE) if it was already
+        * in the object at that time).
+        * But a page could be inserted in a "all_reusable" object, if
+        * something faults it in (a vm_read() from another task or a
+        * "use-after-free" issue in user space, for example).  It can
+        * also happen if we're relocating a page from that object to
+        * a different physical page during a physically-contiguous
+        * allocation.
+        */
        assert(!mem->reusable);
+       if (object->all_reusable) {
+               OSAddAtomic(+1, &vm_page_stats_reusable.reusable_count);
+       }
+
+       if (object->purgable == VM_PURGABLE_DENY) {
+               owner = TASK_NULL;
+       } else {
+               owner = object->vo_purgeable_owner;
+       }
+       if (owner &&
+           (object->purgable == VM_PURGABLE_NONVOLATILE ||
+            VM_PAGE_WIRED(mem))) {
+
+               if (delayed_ledger_update)
+                       *delayed_ledger_update += PAGE_SIZE;
+               else {
+                       /* more non-volatile bytes */
+                       ledger_credit(owner->ledger,
+                                     task_ledgers.purgeable_nonvolatile,
+                                     PAGE_SIZE);
+                       /* more footprint */
+                       ledger_credit(owner->ledger,
+                                     task_ledgers.phys_footprint,
+                                     PAGE_SIZE);
+               }
+
+       } else if (owner &&
+                  (object->purgable == VM_PURGABLE_VOLATILE ||
+                   object->purgable == VM_PURGABLE_EMPTY)) {
+               assert(! VM_PAGE_WIRED(mem));
+               /* more volatile bytes */
+               ledger_credit(owner->ledger,
+                             task_ledgers.purgeable_volatile,
+                             PAGE_SIZE);
+       }
 
        if (object->purgable == VM_PURGABLE_VOLATILE) {
                if (VM_PAGE_WIRED(mem)) {
-                       OSAddAtomic(1, &vm_page_purgeable_wired_count);
+                       OSAddAtomic(+1, &vm_page_purgeable_wired_count);
                } else {
-                       OSAddAtomic(1, &vm_page_purgeable_count);
+                       OSAddAtomic(+1, &vm_page_purgeable_count);
                }
        } else if (object->purgable == VM_PURGABLE_EMPTY &&
-                  mem->throttled) {
+                  mem->vm_page_q_state == VM_PAGE_ON_THROTTLED_Q) {
                /*
                 * This page belongs to a purged VM object but hasn't
                 * been purged (because it was "busy").
@@ -1122,6 +1634,25 @@ vm_page_insert_internal(
                if (queues_lock_held == FALSE)
                        vm_page_unlock_queues();
        }
+
+#if VM_OBJECT_TRACKING_OP_MODIFIED
+       if (vm_object_tracking_inited &&
+           object->internal &&
+           object->resident_page_count == 0 &&
+           object->pager == NULL &&
+           object->shadow != NULL &&
+           object->shadow->copy == object) {
+               void *bt[VM_OBJECT_TRACKING_BTDEPTH];
+               int numsaved = 0;
+
+               numsaved =OSBacktrace(bt, VM_OBJECT_TRACKING_BTDEPTH);
+               btlog_add_entry(vm_object_tracking_btlog,
+                               object,
+                               VM_OBJECT_TRACKING_OP_MODIFIED,
+                               bt,
+                               numsaved);
+       }
+#endif /* VM_OBJECT_TRACKING_OP_MODIFIED */
 }
 
 /*
@@ -1134,9 +1665,9 @@ vm_page_insert_internal(
  */
 void
 vm_page_replace(
-       register vm_page_t              mem,
-       register vm_object_t            object,
-       register vm_object_offset_t     offset)
+       vm_page_t               mem,
+       vm_object_t             object,
+       vm_object_offset_t      offset)
 {
        vm_page_bucket_t *bucket;
        vm_page_t        found_m = VM_PAGE_NULL;
@@ -1151,18 +1682,20 @@ vm_page_replace(
        VM_PAGE_CHECK(mem);
 #endif
        vm_object_lock_assert_exclusive(object);
-#if DEBUG
-       if (mem->tabled || mem->object != VM_OBJECT_NULL)
+#if DEBUG || VM_PAGE_CHECK_BUCKETS
+       if (mem->tabled || mem->vm_page_object)
                panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
                      "already in (obj=%p,off=0x%llx)",
-                     mem, object, offset, mem->object, mem->offset);
-       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
+                     mem, object, offset, VM_PAGE_OBJECT(mem), mem->offset);
 #endif
+       LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
+
+       assert(!VM_PAGE_PAGEABLE(mem));
+
        /*
         *      Record the object/offset pair in this page
         */
-
-       mem->object = object;
+       mem->vm_page_object = VM_PAGE_PACK_OBJECT(object);
        mem->offset = offset;
 
        /*
@@ -1176,31 +1709,37 @@ vm_page_replace(
 
        lck_spin_lock(bucket_lock);
 
-       if (bucket->pages) {
-               vm_page_t *mp = &bucket->pages;
-               vm_page_t m = *mp;
+       if (bucket->page_list) {
+               vm_page_packed_t *mp = &bucket->page_list;
+               vm_page_t m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp));
 
                do {
-                       if (m->object == object && m->offset == offset) {
+                       /*
+                        * compare packed object pointers
+                        */
+                       if (m->vm_page_object == mem->vm_page_object && m->offset == offset) {
                                /*
                                 * Remove old page from hash list
                                 */
-                               *mp = m->next;
+                               *mp = m->next_m;
+                               m->hashed = FALSE;
+                               m->next_m = VM_PAGE_PACK_PTR(NULL);
 
                                found_m = m;
                                break;
                        }
-                       mp = &m->next;
-               } while ((m = *mp));
+                       mp = &m->next_m;
+               } while ((m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp))));
 
-               mem->next = bucket->pages;
+               mem->next_m = bucket->page_list;
        } else {
-               mem->next = VM_PAGE_NULL;
+               mem->next_m = VM_PAGE_PACK_PTR(NULL);
        }
        /*
         * insert new page at head of hash list
         */
-       bucket->pages = mem;
+       bucket->page_list = VM_PAGE_PACK_PTR(mem);
+       mem->hashed = TRUE;
 
        lck_spin_unlock(bucket_lock);
 
@@ -1212,7 +1751,7 @@ vm_page_replace(
                 */
                vm_page_free_unlocked(found_m, FALSE);
        }
-       vm_page_insert_internal(mem, object, offset, FALSE, FALSE, FALSE);
+       vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, FALSE, FALSE, FALSE, NULL);
 }
 
 /*
@@ -1233,16 +1772,24 @@ vm_page_remove(
        vm_page_t       this;
        lck_spin_t      *bucket_lock;
        int             hash_id;
+       task_t          owner;
+       vm_object_t     m_object;
+
+       m_object = VM_PAGE_OBJECT(mem);
 
         XPR(XPR_VM_PAGE,
                 "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n",
-                mem->object, mem->offset, 
+                m_object, mem->offset, 
                mem, 0,0);
 
-       vm_object_lock_assert_exclusive(mem->object);
+       vm_object_lock_assert_exclusive(m_object);
        assert(mem->tabled);
        assert(!mem->cleaning);
        assert(!mem->laundry);
+
+       if (VM_PAGE_PAGEABLE(mem)) {
+               LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+       }
 #if 0
        /*
         * we don't hold the page queue lock
@@ -1254,70 +1801,113 @@ vm_page_remove(
                /*
                 *      Remove from the object_object/offset hash table
                 */
-               hash_id = vm_page_hash(mem->object, mem->offset);
+               hash_id = vm_page_hash(m_object, mem->offset);
                bucket = &vm_page_buckets[hash_id];
                bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
 
                lck_spin_lock(bucket_lock);
 
-               if ((this = bucket->pages) == mem) {
+               if ((this = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list))) == mem) {
                        /* optimize for common case */
 
-                       bucket->pages = mem->next;
+                       bucket->page_list = mem->next_m;
                } else {
-                       vm_page_t       *prev;
+                       vm_page_packed_t        *prev;
 
-                       for (prev = &this->next;
-                            (this = *prev) != mem;
-                            prev = &this->next)
+                       for (prev = &this->next_m;
+                            (this = (vm_page_t)(VM_PAGE_UNPACK_PTR(*prev))) != mem;
+                            prev = &this->next_m)
                                continue;
-                       *prev = this->next;
+                       *prev = this->next_m;
                }
 #if     MACH_PAGE_HASH_STATS
                bucket->cur_count--;
 #endif /* MACH_PAGE_HASH_STATS */
-
+               mem->hashed = FALSE;
+               this->next_m = VM_PAGE_PACK_PTR(NULL);
                lck_spin_unlock(bucket_lock);
        }
        /*
         *      Now remove from the object's list of backed pages.
         */
 
-       VM_PAGE_REMOVE(mem);
+       vm_page_remove_internal(mem);
 
        /*
         *      And show that the object has one fewer resident
         *      page.
         */
 
-       assert(mem->object->resident_page_count > 0);
-       mem->object->resident_page_count--;
+       assert(m_object->resident_page_count > 0);
+       m_object->resident_page_count--;
+
+       if (m_object->internal) {
+#if DEBUG
+               assert(vm_page_internal_count);
+#endif /* DEBUG */
+
+               OSAddAtomic(-1, &vm_page_internal_count);
+       } else {
+               assert(vm_page_external_count);
+               OSAddAtomic(-1, &vm_page_external_count);
 
-       if (!mem->object->internal && (mem->object->objq.next || mem->object->objq.prev)) {
-               if (mem->object->resident_page_count == 0)
-                       vm_object_cache_remove(mem->object);
+               if (mem->xpmapped) {
+                       assert(vm_page_xpmapped_external_count);
+                       OSAddAtomic(-1, &vm_page_xpmapped_external_count);
+               }
+       }
+       if (!m_object->internal && (m_object->objq.next || m_object->objq.prev)) {
+               if (m_object->resident_page_count == 0)
+                       vm_object_cache_remove(m_object);
        }
 
        if (VM_PAGE_WIRED(mem)) {
-               assert(mem->object->wired_page_count > 0);
-               mem->object->wired_page_count--;
+               assert(mem->wire_count > 0);
+               VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
+               VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
+               VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
        }
-       assert(mem->object->resident_page_count >=
-              mem->object->wired_page_count);
+       assert(m_object->resident_page_count >=
+              m_object->wired_page_count);
        if (mem->reusable) {
-               assert(mem->object->reusable_page_count > 0);
-               mem->object->reusable_page_count--;
-               assert(mem->object->reusable_page_count <=
-                      mem->object->resident_page_count);
+               assert(m_object->reusable_page_count > 0);
+               m_object->reusable_page_count--;
+               assert(m_object->reusable_page_count <=
+                      m_object->resident_page_count);
                mem->reusable = FALSE;
                OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
                vm_page_stats_reusable.reused_remove++;
-       } else if (mem->object->all_reusable) {
+       } else if (m_object->all_reusable) {
                OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
                vm_page_stats_reusable.reused_remove++;
        }
 
-       if (mem->object->purgable == VM_PURGABLE_VOLATILE) {
+       if (m_object->purgable == VM_PURGABLE_DENY) {
+               owner = TASK_NULL;
+       } else {
+               owner = m_object->vo_purgeable_owner;
+       }
+       if (owner &&
+           (m_object->purgable == VM_PURGABLE_NONVOLATILE ||
+            VM_PAGE_WIRED(mem))) {
+               /* less non-volatile bytes */
+               ledger_debit(owner->ledger,
+                            task_ledgers.purgeable_nonvolatile,
+                            PAGE_SIZE);
+               /* less footprint */
+               ledger_debit(owner->ledger,
+                            task_ledgers.phys_footprint,
+                            PAGE_SIZE);
+       } else if (owner &&
+                  (m_object->purgable == VM_PURGABLE_VOLATILE ||
+                   m_object->purgable == VM_PURGABLE_EMPTY)) {
+               assert(! VM_PAGE_WIRED(mem));
+               /* less volatile bytes */
+               ledger_debit(owner->ledger,
+                            task_ledgers.purgeable_volatile,
+                            PAGE_SIZE);
+       }
+       if (m_object->purgable == VM_PURGABLE_VOLATILE) {
                if (VM_PAGE_WIRED(mem)) {
                        assert(vm_page_purgeable_wired_count > 0);
                        OSAddAtomic(-1, &vm_page_purgeable_wired_count);
@@ -1326,11 +1916,12 @@ vm_page_remove(
                        OSAddAtomic(-1, &vm_page_purgeable_count);
                }
        }
-       if (mem->object->set_cache_attr == TRUE)
-               pmap_set_cache_attributes(mem->phys_page, 0);
+
+       if (m_object->set_cache_attr == TRUE)
+               pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), 0);
 
        mem->tabled = FALSE;
-       mem->object = VM_OBJECT_NULL;
+       mem->vm_page_object = 0;
        mem->offset = (vm_object_offset_t) -1;
 }
 
@@ -1344,13 +1935,55 @@ vm_page_remove(
  *     The object must be locked.  No side effects.
  */
 
-unsigned long vm_page_lookup_hint = 0;
-unsigned long vm_page_lookup_hint_next = 0;
-unsigned long vm_page_lookup_hint_prev = 0;
-unsigned long vm_page_lookup_hint_miss = 0;
-unsigned long vm_page_lookup_bucket_NULL = 0;
-unsigned long vm_page_lookup_miss = 0;
+#define        VM_PAGE_HASH_LOOKUP_THRESHOLD   10
+
+#if DEBUG_VM_PAGE_LOOKUP
+
+struct {
+       uint64_t        vpl_total;
+       uint64_t        vpl_empty_obj;
+       uint64_t        vpl_bucket_NULL;
+       uint64_t        vpl_hit_hint;
+       uint64_t        vpl_hit_hint_next;
+       uint64_t        vpl_hit_hint_prev;
+       uint64_t        vpl_fast;
+       uint64_t        vpl_slow;
+       uint64_t        vpl_hit;
+       uint64_t        vpl_miss;
+
+       uint64_t        vpl_fast_elapsed;
+       uint64_t        vpl_slow_elapsed;
+} vm_page_lookup_stats __attribute__((aligned(8)));
+
+#endif
+
+#define        KDP_VM_PAGE_WALK_MAX    1000
+
+vm_page_t
+kdp_vm_page_lookup(
+       vm_object_t             object,
+       vm_object_offset_t      offset)
+{
+       vm_page_t cur_page;
+       int num_traversed = 0;
+
+       if (not_in_kdp) {
+               panic("panic: kdp_vm_page_lookup done outside of kernel debugger");
+       }
+
+       vm_page_queue_iterate(&object->memq, cur_page, vm_page_t, listq) {
+               if (cur_page->offset == offset) {
+                       return cur_page;
+               }
+               num_traversed++;
+
+               if (num_traversed >= KDP_VM_PAGE_WALK_MAX) {
+                       return VM_PAGE_NULL;
+               }
+       }
 
+       return VM_PAGE_NULL;
+}
 
 vm_page_t
 vm_page_lookup(
@@ -1359,46 +1992,64 @@ vm_page_lookup(
 {
        vm_page_t       mem;
        vm_page_bucket_t *bucket;
-       queue_entry_t   qe;
-       lck_spin_t      *bucket_lock;
+       vm_page_queue_entry_t   qe;
+       lck_spin_t      *bucket_lock = NULL;
        int             hash_id;
+#if DEBUG_VM_PAGE_LOOKUP
+       uint64_t        start, elapsed;
 
+       OSAddAtomic64(1, &vm_page_lookup_stats.vpl_total);
+#endif
        vm_object_lock_assert_held(object);
+
+       if (object->resident_page_count == 0) {
+#if DEBUG_VM_PAGE_LOOKUP
+               OSAddAtomic64(1, &vm_page_lookup_stats.vpl_empty_obj);
+#endif
+               return (VM_PAGE_NULL);
+       }
+
        mem = object->memq_hint;
 
        if (mem != VM_PAGE_NULL) {
-               assert(mem->object == object);
+               assert(VM_PAGE_OBJECT(mem) == object);
 
                if (mem->offset == offset) {
-                       vm_page_lookup_hint++;
-                       return mem;
+#if DEBUG_VM_PAGE_LOOKUP
+                       OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint);
+#endif
+                       return (mem);
                }
-               qe = queue_next(&mem->listq);
+               qe = (vm_page_queue_entry_t)vm_page_queue_next(&mem->listq);
 
-               if (! queue_end(&object->memq, qe)) {
+               if (! vm_page_queue_end(&object->memq, qe)) {
                        vm_page_t       next_page;
 
-                       next_page = (vm_page_t) qe;
-                       assert(next_page->object == object);
+                       next_page = (vm_page_t)((uintptr_t)qe);
+                       assert(VM_PAGE_OBJECT(next_page) == object);
 
                        if (next_page->offset == offset) {
-                               vm_page_lookup_hint_next++;
                                object->memq_hint = next_page; /* new hint */
-                               return next_page;
+#if DEBUG_VM_PAGE_LOOKUP
+                               OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_next);
+#endif
+                               return (next_page);
                        }
                }
-               qe = queue_prev(&mem->listq);
+               qe = (vm_page_queue_entry_t)vm_page_queue_prev(&mem->listq);
 
-               if (! queue_end(&object->memq, qe)) {
+               if (! vm_page_queue_end(&object->memq, qe)) {
                        vm_page_t prev_page;
 
-                       prev_page = (vm_page_t) qe;
-                       assert(prev_page->object == object);
+                       prev_page = (vm_page_t)((uintptr_t)qe);
+                       assert(VM_PAGE_OBJECT(prev_page) == object);
 
                        if (prev_page->offset == offset) {
-                               vm_page_lookup_hint_prev++;
                                object->memq_hint = prev_page; /* new hint */
-                               return prev_page;
+#if DEBUG_VM_PAGE_LOOKUP
+                               OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_prev);
+#endif
+                               return (prev_page);
                        }
                }
        }
@@ -1416,74 +2067,103 @@ vm_page_lookup(
         * at outside the scope of the hash bucket lock... this is a 
         * really cheap optimiztion to avoid taking the lock
         */
-       if (bucket->pages == VM_PAGE_NULL) {
-               vm_page_lookup_bucket_NULL++;
-
+       if (!bucket->page_list) {
+#if DEBUG_VM_PAGE_LOOKUP
+               OSAddAtomic64(1, &vm_page_lookup_stats.vpl_bucket_NULL);
+#endif
                return (VM_PAGE_NULL);
        }
-       bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
 
-       lck_spin_lock(bucket_lock);
-
-       for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
-#if 0
+#if DEBUG_VM_PAGE_LOOKUP
+       start = mach_absolute_time();
+#endif
+       if (object->resident_page_count <= VM_PAGE_HASH_LOOKUP_THRESHOLD) {
                /*
-                * we don't hold the page queue lock
-                * so this check isn't safe to make
+                * on average, it's roughly 3 times faster to run a short memq list
+                * than to take the spin lock and go through the hash list
                 */
-               VM_PAGE_CHECK(mem);
-#endif
-               if ((mem->object == object) && (mem->offset == offset))
-                       break;
-       }
-       lck_spin_unlock(bucket_lock);
+               mem = (vm_page_t)vm_page_queue_first(&object->memq);
 
-       if (mem != VM_PAGE_NULL) {
-               if (object->memq_hint != VM_PAGE_NULL) {
-                       vm_page_lookup_hint_miss++;
+               while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
+
+                       if (mem->offset == offset)
+                               break;
+
+                       mem = (vm_page_t)vm_page_queue_next(&mem->listq);
                }
-               assert(mem->object == object);
-               object->memq_hint = mem;
-       } else
-               vm_page_lookup_miss++;
+               if (vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem))
+                       mem = NULL;
+       } else {
+               vm_page_object_t        packed_object;
 
-       return(mem);
-}
+               packed_object = VM_PAGE_PACK_OBJECT(object);
 
+               bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
 
-/*
- *     vm_page_rename:
- *
- *     Move the given memory entry from its
- *     current object to the specified target object/offset.
- *
- *     The object must be locked.
- */
-void
-vm_page_rename(
-       register vm_page_t              mem,
-       register vm_object_t            new_object,
-       vm_object_offset_t              new_offset,
-       boolean_t                       encrypted_ok)
-{
-       assert(mem->object != new_object);
+               lck_spin_lock(bucket_lock);
 
-       /*
-        * ENCRYPTED SWAP:
-        * The encryption key is based on the page's memory object
-        * (aka "pager") and paging offset.  Moving the page to
-        * another VM object changes its "pager" and "paging_offset"
-        * so it has to be decrypted first, or we would lose the key.
-        *
-        * One exception is VM object collapsing, where we transfer pages
-        * from one backing object to its parent object.  This operation also
-        * transfers the paging information, so the <pager,paging_offset> info
-        * should remain consistent.  The caller (vm_object_do_collapse())
-        * sets "encrypted_ok" in this case.
-        */
-       if (!encrypted_ok && mem->encrypted) {
-               panic("vm_page_rename: page %p is encrypted\n", mem);
+               for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
+                    mem != VM_PAGE_NULL;
+                    mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->next_m))) {
+#if 0
+                       /*
+                        * we don't hold the page queue lock
+                        * so this check isn't safe to make
+                        */
+                       VM_PAGE_CHECK(mem);
+#endif
+                       if ((mem->vm_page_object == packed_object) && (mem->offset == offset))
+                               break;
+               }
+               lck_spin_unlock(bucket_lock);
+       }
+
+#if DEBUG_VM_PAGE_LOOKUP
+       elapsed = mach_absolute_time() - start;
+
+       if (bucket_lock) {
+               OSAddAtomic64(1, &vm_page_lookup_stats.vpl_slow);
+               OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_slow_elapsed);
+       } else {
+               OSAddAtomic64(1, &vm_page_lookup_stats.vpl_fast);
+               OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_fast_elapsed);
+       }
+       if (mem != VM_PAGE_NULL)
+               OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit);
+       else
+               OSAddAtomic64(1, &vm_page_lookup_stats.vpl_miss);
+#endif
+       if (mem != VM_PAGE_NULL) {
+               assert(VM_PAGE_OBJECT(mem) == object);
+
+               object->memq_hint = mem;
        }
+       return (mem);
+}
+
+
+/*
+ *     vm_page_rename:
+ *
+ *     Move the given memory entry from its
+ *     current object to the specified target object/offset.
+ *
+ *     The object must be locked.
+ */
+void
+vm_page_rename(
+       vm_page_t               mem,
+       vm_object_t             new_object,
+       vm_object_offset_t      new_offset)
+{
+       boolean_t       internal_to_external, external_to_internal;
+       vm_tag_t        tag;
+       vm_object_t     m_object;
+
+       m_object = VM_PAGE_OBJECT(mem);
+
+       assert(m_object != new_object);
+        assert(m_object);
 
         XPR(XPR_VM_PAGE,
                 "vm_page_rename, new object 0x%X, offset 0x%X page 0x%X\n",
@@ -1496,8 +2176,39 @@ vm_page_rename(
         */
        vm_page_lockspin_queues();
 
+       internal_to_external = FALSE;
+       external_to_internal = FALSE;
+
+       if (mem->vm_page_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q) {
+               /*
+                * it's much easier to get the vm_page_pageable_xxx accounting correct
+                * if we first move the page to the active queue... it's going to end
+                * up there anyway, and we don't do vm_page_rename's frequently enough
+                * for this to matter.
+                */
+               vm_page_queues_remove(mem, FALSE);
+               vm_page_activate(mem);
+       }
+       if (VM_PAGE_PAGEABLE(mem)) {
+               if (m_object->internal && !new_object->internal) {
+                       internal_to_external = TRUE;
+               }
+               if (!m_object->internal && new_object->internal) {
+                       external_to_internal = TRUE;
+               }
+       }
+
+       tag = m_object->wire_tag;
        vm_page_remove(mem, TRUE);
-       vm_page_insert_internal(mem, new_object, new_offset, TRUE, TRUE, FALSE);
+       vm_page_insert_internal(mem, new_object, new_offset, tag, TRUE, TRUE, FALSE, FALSE, NULL);
+
+       if (internal_to_external) {
+               vm_page_pageable_internal_count--;
+               vm_page_pageable_external_count++;
+       } else if (external_to_internal) {
+               vm_page_pageable_external_count--;
+               vm_page_pageable_internal_count++;
+       }
 
        vm_page_unlock_queues();
 }
@@ -1525,7 +2236,8 @@ vm_page_init(
        }
 #endif
        *mem = vm_page_template;
-       mem->phys_page = phys_page;
+
+       VM_PAGE_SET_PHYS_PAGE(mem, phys_page);
 #if 0
        /*
         * we're leaving this turned off for now... currently pages
@@ -1583,10 +2295,16 @@ vm_page_grab_fictitious(void)
        return vm_page_grab_fictitious_common(vm_page_fictitious_addr);
 }
 
+int vm_guard_count;
+
+
 vm_page_t
 vm_page_grab_guard(void)
 {
-       return vm_page_grab_fictitious_common(vm_page_guard_addr);
+       vm_page_t page;
+       page = vm_page_grab_fictitious_common(vm_page_guard_addr);
+    if (page) OSAddAtomic(1, &vm_guard_count);
+       return page;
 }
 
 
@@ -1599,10 +2317,13 @@ void
 vm_page_release_fictitious(
        vm_page_t m)
 {
-       assert(!m->free);
+       assert((m->vm_page_q_state == VM_PAGE_NOT_ON_Q) || (m->vm_page_q_state == VM_PAGE_IS_WIRED));
        assert(m->fictitious);
-       assert(m->phys_page == vm_page_fictitious_addr ||
-              m->phys_page == vm_page_guard_addr);
+       assert(VM_PAGE_GET_PHYS_PAGE(m) == vm_page_fictitious_addr ||
+              VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr);
+
+
+if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) OSAddAtomic(-1, &vm_guard_count);
 
        c_vm_page_release_fictitious++;
 
@@ -1667,8 +2388,8 @@ void vm_page_more_fictitious(void)
        }
 
        retval = kernel_memory_allocate(zone_map,
-                                       &addr, PAGE_SIZE, VM_PROT_ALL,
-                                       KMA_KOBJECT|KMA_NOPAGEWAIT);
+                                       &addr, PAGE_SIZE, 0,
+                                       KMA_KOBJECT|KMA_NOPAGEWAIT, VM_KERN_MEMORY_ZONE);
        if (retval != KERN_SUCCESS) { 
                /*
                 * No page was available. Drop the
@@ -1679,6 +2400,7 @@ void vm_page_more_fictitious(void)
                vm_page_wait(THREAD_UNINT);
                return;
        }
+
        zcram(vm_page_zone, addr, PAGE_SIZE);
 
        lck_mtx_unlock(&vm_page_alloc_lock);
@@ -1700,12 +2422,134 @@ vm_pool_low(void)
 }
 
 
+#if CONFIG_BACKGROUND_QUEUE
+
+void
+vm_page_update_background_state(vm_page_t mem)
+{
+       if (vm_page_background_mode == VM_PAGE_BG_DISABLED)
+               return;
+
+       if (mem->vm_page_in_background == FALSE)
+               return;
+
+#if BACKGROUNDQ_BASED_ON_QOS
+        if (proc_get_effective_thread_policy(current_thread(), TASK_POLICY_QOS) <= THREAD_QOS_LEGACY)
+               return;
+#else
+       task_t  my_task;
+
+       my_task = current_task();
+       
+       if (my_task) {
+               if (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG))
+                       return;
+       }
+#endif
+       vm_page_lockspin_queues();
+
+       mem->vm_page_in_background = FALSE;
+       vm_page_background_promoted_count++;
+
+       vm_page_remove_from_backgroundq(mem);
+
+       vm_page_unlock_queues();
+}
+
+
+void
+vm_page_assign_background_state(vm_page_t mem)
+{
+       if (vm_page_background_mode == VM_PAGE_BG_DISABLED)
+               return;
+
+#if BACKGROUNDQ_BASED_ON_QOS
+        if (proc_get_effective_thread_policy(current_thread(), TASK_POLICY_QOS) <= THREAD_QOS_LEGACY)
+                mem->vm_page_in_background = TRUE;
+       else
+                mem->vm_page_in_background = FALSE;
+#else
+       task_t  my_task;
+
+       my_task = current_task();
+
+       if (my_task)
+               mem->vm_page_in_background = proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG);
+#endif
+}
+
+
+void
+vm_page_remove_from_backgroundq(
+       vm_page_t       mem)
+{
+       vm_object_t     m_object;
+
+       LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+
+       if (mem->vm_page_on_backgroundq) {
+               vm_page_queue_remove(&vm_page_queue_background, mem, vm_page_t, vm_page_backgroundq);
+
+               mem->vm_page_backgroundq.next = 0;
+               mem->vm_page_backgroundq.prev = 0;
+               mem->vm_page_on_backgroundq = FALSE;
+               
+               vm_page_background_count--;
+
+               m_object = VM_PAGE_OBJECT(mem);
+
+               if (m_object->internal)
+                       vm_page_background_internal_count--;
+               else
+                       vm_page_background_external_count--;
+       } else {
+               assert(VM_PAGE_UNPACK_PTR(mem->vm_page_backgroundq.next) == (uintptr_t)NULL &&
+                      VM_PAGE_UNPACK_PTR(mem->vm_page_backgroundq.prev) == (uintptr_t)NULL);
+       }
+}
+
+
+void
+vm_page_add_to_backgroundq(
+       vm_page_t       mem,
+       boolean_t       first)
+{                              
+       vm_object_t     m_object;
+
+       LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+
+       if (vm_page_background_mode == VM_PAGE_BG_DISABLED)
+               return;
+
+       if (mem->vm_page_on_backgroundq == FALSE) {
+
+               m_object = VM_PAGE_OBJECT(mem);
+
+               if (vm_page_background_exclude_external && !m_object->internal)
+                       return;
+
+               if (first == TRUE)
+                       vm_page_queue_enter_first(&vm_page_queue_background, mem, vm_page_t, vm_page_backgroundq);
+               else
+                       vm_page_queue_enter(&vm_page_queue_background, mem, vm_page_t, vm_page_backgroundq);
+               mem->vm_page_on_backgroundq = TRUE;
+               
+               vm_page_background_count++;
+
+               if (m_object->internal)
+                       vm_page_background_internal_count++;
+               else
+                       vm_page_background_external_count++;
+       }
+}
+
+#endif
 
 /*
  * this is an interface to support bring-up of drivers
  * on platforms with physical memory > 4G...
  */
-int            vm_himemory_mode = 0;
+int            vm_himemory_mode = 2;
 
 
 /*
@@ -1716,7 +2560,7 @@ int               vm_himemory_mode = 0;
 unsigned int   vm_lopages_allocated_q = 0;
 unsigned int   vm_lopages_allocated_cpm_success = 0;
 unsigned int   vm_lopages_allocated_cpm_failed = 0;
-queue_head_t   vm_lopage_queue_free;
+vm_page_queue_head_t   vm_lopage_queue_free __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
 
 vm_page_t
 vm_page_grablo(void)
@@ -1728,12 +2572,14 @@ vm_page_grablo(void)
 
        lck_mtx_lock_spin(&vm_page_queue_free_lock);
 
-        if ( !queue_empty(&vm_lopage_queue_free)) {
-                queue_remove_first(&vm_lopage_queue_free,
+        if ( !vm_page_queue_empty(&vm_lopage_queue_free)) {
+                vm_page_queue_remove_first(&vm_lopage_queue_free,
                                    mem,
                                    vm_page_t,
                                    pageq);
                assert(vm_lopage_free_count);
+               assert(mem->vm_page_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
+               mem->vm_page_q_state = VM_PAGE_NOT_ON_Q;
 
                 vm_lopage_free_count--;
                vm_lopages_allocated_q++;
@@ -1742,6 +2588,10 @@ vm_page_grablo(void)
                        vm_lopage_refill = TRUE;
 
                lck_mtx_unlock(&vm_page_queue_free_lock);
+
+#if CONFIG_BACKGROUND_QUEUE
+               vm_page_assign_background_state(mem);
+#endif
        } else {
                lck_mtx_unlock(&vm_page_queue_free_lock);
 
@@ -1753,6 +2603,8 @@ vm_page_grablo(void)
 
                        return (VM_PAGE_NULL);
                }
+               assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+
                mem->busy = TRUE;
 
                vm_page_lockspin_queues();
@@ -1765,13 +2617,11 @@ vm_page_grablo(void)
                vm_page_unlock_queues();
        }
        assert(mem->busy);
-       assert(!mem->free);
        assert(!mem->pmapped);
        assert(!mem->wpmapped);
-       assert(!pmap_is_noencrypt(mem->phys_page));
+       assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
 
-       mem->pageq.next = NULL;
-       mem->pageq.prev = NULL;
+       VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
 
        return (mem);
 }
@@ -1798,41 +2648,57 @@ vm_page_grablo(void)
  *     request from the per-cpu queue.
  */
 
-#define COLOR_GROUPS_TO_STEAL  4
+#if CONFIG_SECLUDED_MEMORY
+vm_page_t vm_page_grab_secluded(void);
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+vm_page_t
+vm_page_grab(void)
+{
+       return vm_page_grab_options(0);
+}
 
+#if HIBERNATION
+boolean_t       hibernate_rebuild_needed = FALSE;
+#endif /* HIBERNATION */
 
 vm_page_t
-vm_page_grab( void )
+vm_page_grab_options(
+       int grab_options)
 {
        vm_page_t       mem;
 
-
        disable_preemption();
 
        if ((mem = PROCESSOR_DATA(current_processor(), free_pages))) {
 return_page_from_cpu_list:
+               assert(mem->vm_page_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
+
+#if HIBERNATION
+               if (hibernate_rebuild_needed) {
+                       panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
+               }
+#endif /* HIBERNATION */
                PROCESSOR_DATA(current_processor(), page_grab_count) += 1;
-               PROCESSOR_DATA(current_processor(), free_pages) = mem->pageq.next;
-               mem->pageq.next = NULL;
+               PROCESSOR_DATA(current_processor(), free_pages) = mem->snext;
 
                enable_preemption();
+               VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
+               mem->vm_page_q_state = VM_PAGE_NOT_ON_Q;
 
-               assert(mem->listq.next == NULL && mem->listq.prev == NULL);
+               assert(mem->listq.next == 0 && mem->listq.prev == 0);
                assert(mem->tabled == FALSE);
-               assert(mem->object == VM_OBJECT_NULL);
+               assert(mem->vm_page_object == 0);
                assert(!mem->laundry);
-               assert(!mem->free);
-               assert(pmap_verify_free(mem->phys_page));
+               assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem)));
                assert(mem->busy);
-               assert(!mem->encrypted);
                assert(!mem->pmapped);
                assert(!mem->wpmapped);
-               assert(!mem->active);
-               assert(!mem->inactive);
-               assert(!mem->throttled);
-               assert(!mem->speculative);
-               assert(!pmap_is_noencrypt(mem->phys_page));
+               assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
 
+#if CONFIG_BACKGROUND_QUEUE
+               vm_page_assign_background_state(mem);
+#endif
                return mem;
        }
        enable_preemption();
@@ -1842,18 +2708,18 @@ return_page_from_cpu_list:
         *      Optionally produce warnings if the wire or gobble
         *      counts exceed some threshold.
         */
-       if (vm_page_wire_count_warning > 0
-           && vm_page_wire_count >= vm_page_wire_count_warning) {
+#if VM_PAGE_WIRE_COUNT_WARNING
+       if (vm_page_wire_count >= VM_PAGE_WIRE_COUNT_WARNING) {
                printf("mk: vm_page_grab(): high wired page count of %d\n",
                        vm_page_wire_count);
-               assert(vm_page_wire_count < vm_page_wire_count_warning);
        }
-       if (vm_page_gobble_count_warning > 0
-           && vm_page_gobble_count >= vm_page_gobble_count_warning) {
+#endif
+#if VM_PAGE_GOBBLE_COUNT_WARNING
+       if (vm_page_gobble_count >= VM_PAGE_GOBBLE_COUNT_WARNING) {
                printf("mk: vm_page_grab(): high gobbled page count of %d\n",
                        vm_page_gobble_count);
-               assert(vm_page_gobble_count < vm_page_gobble_count_warning);
        }
+#endif
 
        lck_mtx_lock_spin(&vm_page_queue_free_lock);
 
@@ -1863,14 +2729,37 @@ return_page_from_cpu_list:
         */
        if ((vm_page_free_count < vm_page_free_reserved) &&
            !(current_thread()->options & TH_OPT_VMPRIV)) {
+               /* no page for us in the free queue... */
                lck_mtx_unlock(&vm_page_queue_free_lock);
                mem = VM_PAGE_NULL;
+
+#if CONFIG_SECLUDED_MEMORY
+               /* ... but can we try and grab from the secluded queue? */
+               if (vm_page_secluded_count > 0 &&
+                   ((grab_options & VM_PAGE_GRAB_SECLUDED) ||
+                    task_can_use_secluded_mem(current_task()))) {
+                       mem = vm_page_grab_secluded();
+                       if (grab_options & VM_PAGE_GRAB_SECLUDED) {
+                               vm_page_secluded.grab_for_iokit++;
+                               if (mem) {
+                                       vm_page_secluded.grab_for_iokit_success++;
+                               }
+                       }
+                       if (mem) {
+                               VM_CHECK_MEMORYSTATUS;
+                               return mem;
+                       }
+               }
+#else /* CONFIG_SECLUDED_MEMORY */
+               (void) grab_options;
+#endif /* CONFIG_SECLUDED_MEMORY */
        }
        else {
               vm_page_t        head;
               vm_page_t        tail;
               unsigned int     pages_to_steal;
               unsigned int     color;
+              unsigned int clump_end, sub_count;
 
               while ( vm_page_free_count == 0 ) {
 
@@ -1899,58 +2788,87 @@ return_page_from_cpu_list:
                if (vm_page_free_count <= vm_page_free_reserved)
                        pages_to_steal = 1;
                else {
-                       pages_to_steal = COLOR_GROUPS_TO_STEAL * vm_colors;
-               
-                       if (pages_to_steal > (vm_page_free_count - vm_page_free_reserved))
+                       if (vm_free_magazine_refill_limit <= (vm_page_free_count - vm_page_free_reserved))
+                               pages_to_steal = vm_free_magazine_refill_limit;
+                       else
                                pages_to_steal = (vm_page_free_count - vm_page_free_reserved);
                }
                color = PROCESSOR_DATA(current_processor(), start_color);
                head = tail = NULL;
 
+               vm_page_free_count -= pages_to_steal;
+               clump_end = sub_count = 0;
+
                while (pages_to_steal--) {
-                       if (--vm_page_free_count < vm_page_free_count_minimum)
-                               vm_page_free_count_minimum = vm_page_free_count;
 
-                       while (queue_empty(&vm_page_queue_free[color]))
+                       while (vm_page_queue_empty(&vm_page_queue_free[color].qhead))
                                color = (color + 1) & vm_color_mask;
-               
-                       queue_remove_first(&vm_page_queue_free[color],
-                                          mem,
-                                          vm_page_t,
-                                          pageq);
-                       mem->pageq.next = NULL;
-                       mem->pageq.prev = NULL;
-
-                       assert(!mem->active);
-                       assert(!mem->inactive);
-                       assert(!mem->throttled);
-                       assert(!mem->speculative);                      
+#if defined(__x86_64__)
+                       vm_page_queue_remove_first_with_clump(&vm_page_queue_free[color].qhead,
+                                                             mem,
+                                                             vm_page_t,
+                                                             pageq,
+                                                             clump_end);
+#else
+                       vm_page_queue_remove_first(&vm_page_queue_free[color].qhead,
+                                                             mem,
+                                                             vm_page_t,
+                                                             pageq);
+#endif
+
+                       assert(mem->vm_page_q_state == VM_PAGE_ON_FREE_Q);
+
+                       VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
 
+#if defined(__arm__) || defined(__arm64__)
                        color = (color + 1) & vm_color_mask;
+#else
+
+#if DEVELOPMENT || DEBUG
+
+                       sub_count++;
+                       if (clump_end) {
+                               vm_clump_update_stats(sub_count);
+                               sub_count = 0;
+                               color = (color + 1) & vm_color_mask;
+                       }
+#else
+                       if (clump_end) color = (color + 1) & vm_color_mask;
+
+#endif /* if DEVELOPMENT || DEBUG */
+
+#endif  /* if defined(__arm__) || defined(__arm64__) */
 
                        if (head == NULL)
                                head = mem;
                        else
-                               tail->pageq.next = (queue_t)mem;
+                               tail->snext = mem;
                        tail = mem;
 
-                       mem->pageq.prev = NULL;
-                       assert(mem->listq.next == NULL && mem->listq.prev == NULL);
+                       assert(mem->listq.next == 0 && mem->listq.prev == 0);
                        assert(mem->tabled == FALSE);
-                       assert(mem->object == VM_OBJECT_NULL);
+                       assert(mem->vm_page_object == 0);
                        assert(!mem->laundry);
-                       assert(mem->free);
-                       mem->free = FALSE;
 
-                       assert(pmap_verify_free(mem->phys_page));
+                       mem->vm_page_q_state = VM_PAGE_ON_FREE_LOCAL_Q;
+
+                       assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem)));
                        assert(mem->busy);
-                       assert(!mem->free);
-                       assert(!mem->encrypted);
                        assert(!mem->pmapped);
                        assert(!mem->wpmapped);
-                       assert(!pmap_is_noencrypt(mem->phys_page));
+                       assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
+               }
+#if defined (__x86_64__) && (DEVELOPMENT || DEBUG)
+               vm_clump_update_stats(sub_count);
+#endif
+               lck_mtx_unlock(&vm_page_queue_free_lock);
+
+#if HIBERNATION
+               if (hibernate_rebuild_needed) {
+                       panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
                }
-               PROCESSOR_DATA(current_processor(), free_pages) = head->pageq.next;
+#endif /* HIBERNATION */
+               PROCESSOR_DATA(current_processor(), free_pages) = head->snext;
                PROCESSOR_DATA(current_processor(), start_color) = color;
 
                /*
@@ -1958,9 +2876,10 @@ return_page_from_cpu_list:
                 */
                PROCESSOR_DATA(current_processor(), page_grab_count) += 1;
                mem = head;
-               mem->pageq.next = NULL;
+               assert(mem->vm_page_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
 
-               lck_mtx_unlock(&vm_page_queue_free_lock);
+               VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
+               mem->vm_page_q_state = VM_PAGE_NOT_ON_Q;
 
                enable_preemption();
        }
@@ -1980,11 +2899,157 @@ return_page_from_cpu_list:
                 thread_wakeup((event_t) &vm_page_free_wanted);
 
        VM_CHECK_MEMORYSTATUS;
-       
-//     dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 4);      /* (TEST/DEBUG) */
+
+       if (mem) {
+//             dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 4);  /* (TEST/DEBUG) */
+
+#if CONFIG_BACKGROUND_QUEUE
+               vm_page_assign_background_state(mem);
+#endif
+       }
+       return mem;
+}
+
+#if CONFIG_SECLUDED_MEMORY
+vm_page_t
+vm_page_grab_secluded(void)
+{
+       vm_page_t       mem;
+       vm_object_t     object;
+       int             refmod_state;
+
+       if (vm_page_secluded_count == 0) {
+               /* no secluded pages to grab... */
+               return VM_PAGE_NULL;
+       }
+
+       /* secluded queue is protected by the VM page queue lock */
+       vm_page_lock_queues();
+
+       if (vm_page_secluded_count == 0) {
+               /* no secluded pages to grab... */
+               vm_page_unlock_queues();
+               return VM_PAGE_NULL;
+       }
+
+#if 00
+       /* can we grab from the secluded queue? */
+       if (vm_page_secluded_count > vm_page_secluded_target ||
+           (vm_page_secluded_count > 0 &&
+            task_can_use_secluded_mem(current_task()))) {
+               /* OK */
+       } else {
+               /* can't grab from secluded queue... */
+               vm_page_unlock_queues();
+               return VM_PAGE_NULL;
+       }
+#endif
+
+       /* we can grab a page from secluded queue! */
+       assert((vm_page_secluded_count_free +
+               vm_page_secluded_count_inuse) ==
+              vm_page_secluded_count);
+       if (current_task()->task_can_use_secluded_mem) {
+               assert(num_tasks_can_use_secluded_mem > 0);
+       }
+       assert(!vm_page_queue_empty(&vm_page_queue_secluded));
+       LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+       mem = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
+       assert(mem->vm_page_q_state == VM_PAGE_ON_SECLUDED_Q);
+       vm_page_queues_remove(mem, TRUE);
+
+       object = VM_PAGE_OBJECT(mem);
+
+       assert(!mem->fictitious);
+       assert(!VM_PAGE_WIRED(mem));
+       if (object == VM_OBJECT_NULL) {
+               /* free for grab! */
+               vm_page_unlock_queues();
+               vm_page_secluded.grab_success_free++;
+
+               assert(mem->busy);
+               assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+               assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
+               assert(mem->pageq.next == 0);
+               assert(mem->pageq.prev == 0);
+               assert(mem->listq.next == 0);
+               assert(mem->listq.prev == 0);
+#if CONFIG_BACKGROUND_QUEUE
+               assert(mem->vm_page_on_backgroundq == 0);
+               assert(mem->vm_page_backgroundq.next == 0);
+               assert(mem->vm_page_backgroundq.prev == 0);
+#endif /* CONFIG_BACKGROUND_QUEUE */
+               return mem;
+       }
+
+       assert(!object->internal);
+//     vm_page_pageable_external_count--;
+
+       if (!vm_object_lock_try(object)) {
+//             printf("SECLUDED: page %p: object %p locked\n", mem, object);
+               vm_page_secluded.grab_failure_locked++;
+       reactivate_secluded_page:
+               vm_page_activate(mem);
+               vm_page_unlock_queues();
+               return VM_PAGE_NULL;
+       }
+       if (mem->busy ||
+           mem->cleaning ||
+           mem->laundry) {
+               /* can't steal page in this state... */
+               vm_object_unlock(object);
+               vm_page_secluded.grab_failure_state++;
+               goto reactivate_secluded_page;
+       }
+
+       mem->busy = TRUE;
+       refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem));
+       if (refmod_state & VM_MEM_REFERENCED) {
+               mem->reference = TRUE;
+       }
+       if (refmod_state & VM_MEM_MODIFIED) {
+               SET_PAGE_DIRTY(mem, FALSE);
+       }
+       if (mem->dirty || mem->precious) {
+               /* can't grab a dirty page; re-activate */
+//             printf("SECLUDED: dirty page %p\n", mem);
+               PAGE_WAKEUP_DONE(mem);
+               vm_page_secluded.grab_failure_dirty++;
+               vm_object_unlock(object);
+               goto reactivate_secluded_page;
+       }
+       if (mem->reference) {
+               /* it's been used but we do need to grab a page... */
+       }
+
+       vm_page_unlock_queues();
+
+       /* finish what vm_page_free() would have done... */
+       vm_page_free_prepare_object(mem, TRUE);
+       vm_object_unlock(object);
+       object = VM_OBJECT_NULL;
+       if (vm_page_free_verify) {
+               assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem)));
+       }
+       pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
+       vm_page_secluded.grab_success_other++;
+
+       assert(mem->busy);
+       assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+       assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
+       assert(mem->pageq.next == 0);
+       assert(mem->pageq.prev == 0);
+       assert(mem->listq.next == 0);
+       assert(mem->listq.prev == 0);
+#if CONFIG_BACKGROUND_QUEUE
+       assert(mem->vm_page_on_backgroundq == 0);
+       assert(mem->vm_page_backgroundq.next == 0);
+       assert(mem->vm_page_backgroundq.prev == 0);
+#endif /* CONFIG_BACKGROUND_QUEUE */
 
        return mem;
 }
+#endif /* CONFIG_SECLUDED_MEMORY */
 
 /*
  *     vm_page_release:
@@ -1994,62 +3059,112 @@ return_page_from_cpu_list:
 
 void
 vm_page_release(
-       register vm_page_t      mem)
+       vm_page_t       mem,
+       boolean_t       page_queues_locked)
 {
        unsigned int    color;
        int     need_wakeup = 0;
        int     need_priv_wakeup = 0;
+#if CONFIG_SECLUDED_MEMORY
+       int     need_secluded_wakeup = 0;
+#endif /* CONFIG_SECLUDED_MEMORY */
 
+       if (page_queues_locked) {
+               LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+       } else {
+               LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
+       }
 
        assert(!mem->private && !mem->fictitious);
        if (vm_page_free_verify) {
-               assert(pmap_verify_free(mem->phys_page));
+               assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem)));
        }
-//     dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 5);      /* (TEST/DEBUG) */
+//     dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 5);  /* (TEST/DEBUG) */
 
-       pmap_clear_noencrypt(mem->phys_page);
+       pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
 
        lck_mtx_lock_spin(&vm_page_queue_free_lock);
-#if DEBUG
-       if (mem->free)
-               panic("vm_page_release");
-#endif
 
+       assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
        assert(mem->busy);
        assert(!mem->laundry);
-       assert(mem->object == VM_OBJECT_NULL);
-       assert(mem->pageq.next == NULL &&
-              mem->pageq.prev == NULL);
-       assert(mem->listq.next == NULL &&
-              mem->listq.prev == NULL);
-       
+       assert(mem->vm_page_object == 0);
+       assert(mem->pageq.next == 0 && mem->pageq.prev == 0);
+       assert(mem->listq.next == 0 && mem->listq.prev == 0);
+#if CONFIG_BACKGROUND_QUEUE
+       assert(mem->vm_page_backgroundq.next == 0 &&
+              mem->vm_page_backgroundq.prev == 0 &&
+              mem->vm_page_on_backgroundq == FALSE);
+#endif 
        if ((mem->lopage == TRUE || vm_lopage_refill == TRUE) &&
            vm_lopage_free_count < vm_lopage_free_limit &&
-           mem->phys_page < max_valid_low_ppnum) {
+           VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
                /*
                 * this exists to support hardware controllers
                 * incapable of generating DMAs with more than 32 bits
                 * of address on platforms with physical memory > 4G...
                 */
-               queue_enter_first(&vm_lopage_queue_free,
-                                 mem,
-                                 vm_page_t,
-                                 pageq);
+               vm_page_queue_enter_first(&vm_lopage_queue_free,
+                                         mem,
+                                         vm_page_t,
+                                         pageq);
                vm_lopage_free_count++;
 
                if (vm_lopage_free_count >= vm_lopage_free_limit)
                        vm_lopage_refill = FALSE;
 
+               mem->vm_page_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
                mem->lopage = TRUE;
-       } else {          
+#if CONFIG_SECLUDED_MEMORY
+       } else if (vm_page_free_count > vm_page_free_reserved &&
+                  vm_page_secluded_count < vm_page_secluded_target &&
+                  num_tasks_can_use_secluded_mem == 0) {
+               /*
+                * XXX FBDP TODO: also avoid refilling secluded queue
+                * when some IOKit objects are already grabbing from it...
+                */
+               if (!page_queues_locked) {
+                       if (!vm_page_trylock_queues()) {
+                               /* take locks in right order */
+                               lck_mtx_unlock(&vm_page_queue_free_lock);
+                               vm_page_lock_queues();
+                               lck_mtx_lock_spin(&vm_page_queue_free_lock);
+                       }
+               }
                mem->lopage = FALSE;
-               mem->free = TRUE;
-
-               color = mem->phys_page & vm_color_mask;
-               queue_enter_first(&vm_page_queue_free[color],
-                                 mem,
-                                 vm_page_t,
-                                 pageq);
+               LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+               vm_page_queue_enter_first(&vm_page_queue_secluded,
+                                         mem,
+                                         vm_page_t,
+                                         pageq);
+               mem->vm_page_q_state = VM_PAGE_ON_SECLUDED_Q;
+               vm_page_secluded_count++;
+               vm_page_secluded_count_free++;
+               if (!page_queues_locked) {
+                       vm_page_unlock_queues();
+               }
+               LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_OWNED);
+               if (vm_page_free_wanted_secluded > 0) {
+                       vm_page_free_wanted_secluded--;
+                       need_secluded_wakeup = 1;
+               }
+#endif /* CONFIG_SECLUDED_MEMORY */
+       } else {
+               mem->lopage = FALSE;
+               mem->vm_page_q_state = VM_PAGE_ON_FREE_Q;
+
+               color = VM_PAGE_GET_COLOR(mem);
+#if defined(__x86_64__)
+               vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead,
+                                         mem,
+                                         vm_page_t,
+                                         pageq);
+#else
+               vm_page_queue_enter(&vm_page_queue_free[color].qhead,
+                                         mem,
+                                         vm_page_t,
+                                         pageq);
+#endif
                vm_page_free_count++;
                /*
                 *      Check if we should wake up someone waiting for page.
@@ -2075,6 +3190,12 @@ vm_page_release(
                if (vm_page_free_wanted_privileged > 0) {
                        vm_page_free_wanted_privileged--;
                        need_priv_wakeup = 1;
+#if CONFIG_SECLUDED_MEMORY
+               } else if (vm_page_free_wanted_secluded > 0 &&
+                          vm_page_free_count > vm_page_free_reserved) {
+                       vm_page_free_wanted_secluded--;
+                       need_secluded_wakeup = 1;
+#endif /* CONFIG_SECLUDED_MEMORY */
                } else if (vm_page_free_wanted > 0 &&
                           vm_page_free_count > vm_page_free_reserved) {
                        vm_page_free_wanted--;
@@ -2085,12 +3206,58 @@ vm_page_release(
 
        if (need_priv_wakeup)
                thread_wakeup_one((event_t) &vm_page_free_wanted_privileged);
+#if CONFIG_SECLUDED_MEMORY
+       else if (need_secluded_wakeup)
+               thread_wakeup_one((event_t) &vm_page_free_wanted_secluded);
+#endif /* CONFIG_SECLUDED_MEMORY */
        else if (need_wakeup)
                thread_wakeup_one((event_t) &vm_page_free_count);
 
        VM_CHECK_MEMORYSTATUS;
 }
 
+/*
+ * This version of vm_page_release() is used only at startup
+ * when we are single-threaded and pages are being released 
+ * for the first time. Hence, no locking or unnecessary checks are made.
+ * Note: VM_CHECK_MEMORYSTATUS invoked by the caller.
+ */
+void
+vm_page_release_startup(
+       vm_page_t       mem)
+{
+       vm_page_queue_t queue_free;
+
+       if (vm_lopage_free_count < vm_lopage_free_limit &&
+           VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
+               mem->lopage = TRUE;
+               mem->vm_page_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
+               vm_lopage_free_count++;
+               queue_free = &vm_lopage_queue_free;
+#if CONFIG_SECLUDED_MEMORY
+       } else if (vm_page_secluded_count < vm_page_secluded_target) {
+               mem->lopage = FALSE;
+               mem->vm_page_q_state = VM_PAGE_ON_SECLUDED_Q;
+               vm_page_secluded_count++;
+               vm_page_secluded_count_free++;
+               queue_free = &vm_page_queue_secluded;
+#endif /* CONFIG_SECLUDED_MEMORY */
+       } else {
+               mem->lopage = FALSE;
+               mem->vm_page_q_state = VM_PAGE_ON_FREE_Q;
+               vm_page_free_count++;
+               queue_free = &vm_page_queue_free[VM_PAGE_GET_COLOR(mem)].qhead;
+       }
+       if (mem->vm_page_q_state == VM_PAGE_ON_FREE_Q) {
+#if defined(__x86_64__)
+               vm_page_queue_enter_clump(queue_free, mem, vm_page_t, pageq);
+#else
+               vm_page_queue_enter(queue_free, mem, vm_page_t, pageq);
+#endif
+       } else
+               vm_page_queue_enter_first(queue_free, mem, vm_page_t, pageq);
+}
+
 /*
  *     vm_page_wait:
  *
@@ -2123,31 +3290,63 @@ vm_page_wait(
                lck_mtx_unlock(&vm_page_queue_free_lock);
                return TRUE;
        }
-       if (vm_page_free_count < vm_page_free_target) {
 
-               if (is_privileged) {
-                       if (vm_page_free_wanted_privileged++ == 0)
-                               need_wakeup = 1;
-                       wait_result = assert_wait((event_t)&vm_page_free_wanted_privileged, interruptible);
-               } else {
-                       if (vm_page_free_wanted++ == 0)
-                               need_wakeup = 1;
-                       wait_result = assert_wait((event_t)&vm_page_free_count, interruptible);
-               }
+       if (vm_page_free_count >= vm_page_free_target) {
                lck_mtx_unlock(&vm_page_queue_free_lock);
-               counter(c_vm_page_wait_block++);
+               return TRUE;
+       }
 
-               if (need_wakeup)
-                       thread_wakeup((event_t)&vm_page_free_wanted);
-
-               if (wait_result == THREAD_WAITING)
-                       wait_result = thread_block(THREAD_CONTINUE_NULL);
-
-               return(wait_result == THREAD_AWAKENED);
+       if (is_privileged) {
+               if (vm_page_free_wanted_privileged++ == 0)
+                       need_wakeup = 1;
+               wait_result = assert_wait((event_t)&vm_page_free_wanted_privileged, interruptible);
+#if CONFIG_SECLUDED_MEMORY
+       } else if (secluded_for_apps &&
+                  task_can_use_secluded_mem(current_task())) {
+#if 00
+               /* XXX FBDP: need pageq lock for this... */
+               /* XXX FBDP: might wait even if pages available, */
+               /* XXX FBDP: hopefully not for too long... */
+               if (vm_page_secluded_count > 0) {
+                       lck_mtx_unlock(&vm_page_queue_free_lock);
+                       return TRUE;
+               }
+#endif
+               if (vm_page_free_wanted_secluded++ == 0) {
+                       need_wakeup = 1;
+               }
+               wait_result = assert_wait(
+                       (event_t)&vm_page_free_wanted_secluded,
+                       interruptible);
+#endif /* CONFIG_SECLUDED_MEMORY */
        } else {
-               lck_mtx_unlock(&vm_page_queue_free_lock);
-               return TRUE;
+               if (vm_page_free_wanted++ == 0)
+                       need_wakeup = 1;
+               wait_result = assert_wait((event_t)&vm_page_free_count,
+                                         interruptible);
+       }
+       lck_mtx_unlock(&vm_page_queue_free_lock);
+       counter(c_vm_page_wait_block++);
+
+       if (need_wakeup)
+               thread_wakeup((event_t)&vm_page_free_wanted);
+
+       if (wait_result == THREAD_WAITING) {
+               VM_DEBUG_EVENT(vm_page_wait_block, VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
+                              vm_page_free_wanted_privileged,
+                              vm_page_free_wanted,
+#if CONFIG_SECLUDED_MEMORY
+                              vm_page_free_wanted_secluded,
+#else /* CONFIG_SECLUDED_MEMORY */
+                              0,
+#endif /* CONFIG_SECLUDED_MEMORY */
+                              0);
+               wait_result = thread_block(THREAD_CONTINUE_NULL);
+               VM_DEBUG_EVENT(vm_page_wait_block,
+                              VM_PAGE_WAIT_BLOCK, DBG_FUNC_END, 0, 0, 0, 0);
        }
+
+       return (wait_result == THREAD_AWAKENED);
 }
 
 /*
@@ -2164,27 +3363,17 @@ vm_page_alloc(
        vm_object_t             object,
        vm_object_offset_t      offset)
 {
-       register vm_page_t      mem;
-
-       vm_object_lock_assert_exclusive(object);
-       mem = vm_page_grab();
-       if (mem == VM_PAGE_NULL)
-               return VM_PAGE_NULL;
-
-       vm_page_insert(mem, object, offset);
-
-       return(mem);
-}
-
-vm_page_t
-vm_page_alloclo(
-       vm_object_t             object,
-       vm_object_offset_t      offset)
-{
-       register vm_page_t      mem;
+       vm_page_t       mem;
+       int             grab_options;
 
        vm_object_lock_assert_exclusive(object);
-       mem = vm_page_grablo();
+       grab_options = 0;
+#if CONFIG_SECLUDED_MEMORY
+       if (object->can_grab_secluded) {
+               grab_options |= VM_PAGE_GRAB_SECLUDED;
+       }
+#endif /* CONFIG_SECLUDED_MEMORY */
+       mem = vm_page_grab_options(grab_options);
        if (mem == VM_PAGE_NULL)
                return VM_PAGE_NULL;
 
@@ -2193,7 +3382,6 @@ vm_page_alloclo(
        return(mem);
 }
 
-
 /*
  *     vm_page_alloc_guard:
  *     
@@ -2207,7 +3395,7 @@ vm_page_alloc_guard(
        vm_object_t             object,
        vm_object_offset_t      offset)
 {
-       register vm_page_t      mem;
+       vm_page_t       mem;
 
        vm_object_lock_assert_exclusive(object);
        mem = vm_page_grab_guard();
@@ -2243,16 +3431,17 @@ void
 vm_page_free_prepare_queues(
        vm_page_t       mem)
 {
+       vm_object_t     m_object;
+
        VM_PAGE_CHECK(mem);
-       assert(!mem->free);
+
+       assert(mem->vm_page_q_state != VM_PAGE_ON_FREE_Q);
        assert(!mem->cleaning);
-#if DEBUG
-       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-       if (mem->free)
-               panic("vm_page_free: freeing page on free list\n");
-#endif
-       if (mem->object) {
-               vm_object_lock_assert_exclusive(mem->object);
+       m_object = VM_PAGE_OBJECT(mem);
+
+       LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+       if (m_object) {
+               vm_object_lock_assert_exclusive(m_object);
        }
        if (mem->laundry) {
                /*
@@ -2265,24 +3454,55 @@ vm_page_free_prepare_queues(
                vm_pageout_steal_laundry(mem, TRUE);
                counter(++c_laundry_pages_freed);
        }
-               
-       VM_PAGE_QUEUES_REMOVE(mem);     /* clears local/active/inactive/throttled/speculative */
+       
+       vm_page_queues_remove(mem, TRUE);
 
        if (VM_PAGE_WIRED(mem)) {
-               if (mem->object) {
-                       assert(mem->object->wired_page_count > 0);
-                       mem->object->wired_page_count--;
-                       assert(mem->object->resident_page_count >=
-                              mem->object->wired_page_count);
+               assert(mem->wire_count > 0);
 
-                       if (mem->object->purgable == VM_PURGABLE_VOLATILE) {
+               if (m_object) {
+
+                       VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
+                       VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
+                       VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
+
+                       assert(m_object->resident_page_count >=
+                              m_object->wired_page_count);
+
+                       if (m_object->purgable == VM_PURGABLE_VOLATILE) {
                                OSAddAtomic(+1, &vm_page_purgeable_count);
                                assert(vm_page_purgeable_wired_count > 0);
                                OSAddAtomic(-1, &vm_page_purgeable_wired_count);
                        }
+                       if ((m_object->purgable == VM_PURGABLE_VOLATILE ||
+                            m_object->purgable == VM_PURGABLE_EMPTY) &&
+                           m_object->vo_purgeable_owner != TASK_NULL) {
+                               task_t owner;
+
+                               owner = m_object->vo_purgeable_owner;
+                               /*
+                                * While wired, this page was accounted
+                                * as "non-volatile" but it should now
+                                * be accounted as "volatile".
+                                */
+                               /* one less "non-volatile"... */
+                               ledger_debit(owner->ledger,
+                                            task_ledgers.purgeable_nonvolatile,
+                                            PAGE_SIZE);
+                               /* ... and "phys_footprint" */
+                               ledger_debit(owner->ledger,
+                                            task_ledgers.phys_footprint,
+                                            PAGE_SIZE);
+                               /* one more "volatile" */
+                               ledger_credit(owner->ledger,
+                                             task_ledgers.purgeable_volatile,
+                                             PAGE_SIZE);
+                       }
                }
                if (!mem->private && !mem->fictitious)
                        vm_page_wire_count--;
+
+               mem->vm_page_q_state = VM_PAGE_NOT_ON_Q;
                mem->wire_count = 0;
                assert(!mem->gobbled);
        } else if (mem->gobbled) {
@@ -2306,10 +3526,19 @@ vm_page_free_prepare_object(
        if (mem->private) {
                mem->private = FALSE;
                mem->fictitious = TRUE;
-               mem->phys_page = vm_page_fictitious_addr;
+               VM_PAGE_SET_PHYS_PAGE(mem, vm_page_fictitious_addr);
        }
        if ( !mem->fictitious) {
-               vm_page_init(mem, mem->phys_page, mem->lopage);
+               assert(mem->pageq.next == 0);
+               assert(mem->pageq.prev == 0);
+               assert(mem->listq.next == 0);
+               assert(mem->listq.prev == 0);
+#if CONFIG_BACKGROUND_QUEUE
+               assert(mem->vm_page_backgroundq.next == 0);
+               assert(mem->vm_page_backgroundq.prev == 0);
+#endif /* CONFIG_BACKGROUND_QUEUE */
+               assert(mem->next_m == 0);
+               vm_page_init(mem, VM_PAGE_GET_PHYS_PAGE(mem), mem->lopage);
        }
 }
 
@@ -2331,7 +3560,8 @@ vm_page_free(
        if (mem->fictitious) {
                vm_page_release_fictitious(mem);
        } else {
-               vm_page_release(mem);
+               vm_page_release(mem,
+                               TRUE); /* page queues are locked */
        }
 }
 
@@ -2350,7 +3580,7 @@ vm_page_free_unlocked(
        if (mem->fictitious) {
                vm_page_release_fictitious(mem);
        } else {
-               vm_page_release(mem);
+               vm_page_release(mem, FALSE); /* page queues are not locked */
        }
 }
 
@@ -2360,6 +3590,9 @@ vm_page_free_unlocked(
  * as blocked up by vm_pageout_scan().
  * The big win is not having to take the free list lock once
  * per page.
+ *
+ * The VM page queues lock (vm_page_queue_lock) should NOT be held.
+ * The VM page free queues lock (vm_page_queue_free_lock) should NOT be held.
  */
 void
 vm_page_free_list(
@@ -2371,6 +3604,9 @@ vm_page_free_list(
        vm_page_t       local_freeq;
        int             pg_count;
 
+       LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
+       LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_NOTOWNED);
+
        while (freeq) {
 
                pg_count = 0;
@@ -2385,18 +3621,19 @@ vm_page_free_list(
                 */
                while (mem && pg_count < 64) {
 
-                       assert(!mem->inactive);
-                       assert(!mem->active);
-                       assert(!mem->throttled);
-                       assert(!mem->free);
-                       assert(!mem->speculative);
-                       assert(!VM_PAGE_WIRED(mem));
-                       assert(mem->pageq.prev == NULL);
+                       assert((mem->vm_page_q_state == VM_PAGE_NOT_ON_Q) ||
+                              (mem->vm_page_q_state == VM_PAGE_IS_WIRED));
+#if CONFIG_BACKGROUND_QUEUE
+                       assert(mem->vm_page_backgroundq.next == 0 &&
+                              mem->vm_page_backgroundq.prev == 0 &&
+                              mem->vm_page_on_backgroundq == FALSE);
+#endif
+                       nxt = mem->snext;
+                       mem->snext = NULL;
+                       assert(mem->pageq.prev == 0);
 
-                       nxt = (vm_page_t)(mem->pageq.next);
-               
                        if (vm_page_free_verify && !mem->fictitious && !mem->private) {
-                               assert(pmap_verify_free(mem->phys_page));
+                               assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem)));
                        }
                        if (prepare_object == TRUE)
                                vm_page_free_prepare_object(mem, TRUE);
@@ -2406,9 +3643,14 @@ vm_page_free_list(
 
                                if ((mem->lopage == TRUE || vm_lopage_refill == TRUE) &&
                                    vm_lopage_free_count < vm_lopage_free_limit &&
-                                   mem->phys_page < max_valid_low_ppnum) {
-                                       mem->pageq.next = NULL;
-                                       vm_page_release(mem);
+                                   VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
+                                       vm_page_release(mem, FALSE); /* page queues are not locked */
+#if CONFIG_SECLUDED_MEMORY
+                               } else if (vm_page_secluded_count < vm_page_secluded_target &&
+                                          num_tasks_can_use_secluded_mem == 0) {
+                                       vm_page_release(mem,
+                                                       FALSE); /* page queues are not locked */
+#endif /* CONFIG_SECLUDED_MEMORY */
                                } else {
                                        /*
                                         * IMPORTANT: we can't set the page "free" here
@@ -2419,15 +3661,15 @@ vm_page_free_list(
                                         * cause trouble because the page is not actually
                                         * in the free queue yet...
                                         */
-                                       mem->pageq.next = (queue_entry_t)local_freeq;
+                                       mem->snext = local_freeq;
                                        local_freeq = mem;
                                        pg_count++;
 
-                                       pmap_clear_noencrypt(mem->phys_page);
+                                       pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
                                }
                        } else {
-                               assert(mem->phys_page == vm_page_fictitious_addr ||
-                                      mem->phys_page == vm_page_guard_addr);
+                               assert(VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_fictitious_addr ||
+                                      VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_guard_addr);
                                vm_page_release_fictitious(mem);
                        }
                        mem = nxt;
@@ -2438,23 +3680,34 @@ vm_page_free_list(
                        unsigned int    avail_free_count;
                        unsigned int    need_wakeup = 0;
                        unsigned int    need_priv_wakeup = 0;
+#if CONFIG_SECLUDED_MEMORY
+                       unsigned int    need_wakeup_secluded = 0;
+#endif /* CONFIG_SECLUDED_MEMORY */
          
                        lck_mtx_lock_spin(&vm_page_queue_free_lock);
 
                        while (mem) {
                                int     color;
 
-                               nxt = (vm_page_t)(mem->pageq.next);
+                               nxt = mem->snext;
 
-                               assert(!mem->free);
+                               assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
                                assert(mem->busy);
-                               mem->free = TRUE;
-
-                               color = mem->phys_page & vm_color_mask;
-                               queue_enter_first(&vm_page_queue_free[color],
-                                                 mem,
-                                                 vm_page_t,
-                                                 pageq);
+                               mem->lopage = FALSE;
+                               mem->vm_page_q_state = VM_PAGE_ON_FREE_Q;
+
+                               color = VM_PAGE_GET_COLOR(mem);
+#if defined(__x86_64__)
+                               vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead,
+                                                         mem,
+                                                         vm_page_t,
+                                                         pageq);
+#else
+                               vm_page_queue_enter(&vm_page_queue_free[color].qhead,
+                                                         mem,
+                                                         vm_page_t,
+                                                         pageq);
+#endif
                                mem = nxt;
                        }
                        vm_page_free_count += pg_count;
@@ -2468,10 +3721,31 @@ vm_page_free_list(
                                        avail_free_count = 0;
                                } else {
                                        need_priv_wakeup = vm_page_free_wanted_privileged;
-                                       vm_page_free_wanted_privileged = 0;
                                        avail_free_count -= vm_page_free_wanted_privileged;
+                                       vm_page_free_wanted_privileged = 0;
+                               }
+                       }
+#if CONFIG_SECLUDED_MEMORY
+                       if (vm_page_free_wanted_secluded > 0 &&
+                           avail_free_count > vm_page_free_reserved) {
+                               unsigned int available_pages;
+                               available_pages = (avail_free_count -
+                                                  vm_page_free_reserved);
+                               if (available_pages <
+                                   vm_page_free_wanted_secluded) {
+                                       need_wakeup_secluded = available_pages;
+                                       vm_page_free_wanted_secluded -=
+                                               available_pages;
+                                       avail_free_count -= available_pages;
+                               } else {
+                                       need_wakeup_secluded =
+                                               vm_page_free_wanted_secluded;
+                                       avail_free_count -=
+                                               vm_page_free_wanted_secluded;
+                                       vm_page_free_wanted_secluded = 0;
                                }
                        }
+#endif /* CONFIG_SECLUDED_MEMORY */
                        if (vm_page_free_wanted > 0 && avail_free_count > vm_page_free_reserved) {
                                unsigned int  available_pages;
 
@@ -2495,6 +3769,21 @@ vm_page_free_list(
                                 */
                                thread_wakeup((event_t)&vm_page_free_wanted_privileged);
                        }
+#if CONFIG_SECLUDED_MEMORY
+                       if (need_wakeup_secluded != 0 &&
+                           vm_page_free_wanted_secluded == 0) {
+                               thread_wakeup((event_t)
+                                             &vm_page_free_wanted_secluded);
+                       } else {
+                               for (;
+                                    need_wakeup_secluded != 0;
+                                    need_wakeup_secluded--) {
+                                       thread_wakeup_one(
+                                               (event_t)
+                                               &vm_page_free_wanted_secluded);
+                               }
+                       }
+#endif /* CONFIG_SECLUDED_MEMORY */
                        if (need_wakeup != 0 && vm_page_free_wanted == 0) {
                                /*
                                 * We don't expect to have any more waiters
@@ -2524,16 +3813,23 @@ vm_page_free_list(
  *
  *     The page's object and the page queues must be locked.
  */
+
+
 void
 vm_page_wire(
-       register vm_page_t      mem)
+       vm_page_t mem,
+       vm_tag_t           tag,
+       boolean_t          check_memorystatus)
 {
+       vm_object_t     m_object;
+
+       m_object = VM_PAGE_OBJECT(mem);
 
-//     dbgLog(current_thread(), mem->offset, mem->object, 1);  /* (TEST/DEBUG) */
+//     dbgLog(current_thread(), mem->offset, m_object, 1);     /* (TEST/DEBUG) */
 
        VM_PAGE_CHECK(mem);
-       if (mem->object) {
-               vm_object_lock_assert_exclusive(mem->object);
+       if (m_object) {
+               vm_object_lock_assert_exclusive(m_object);
        } else {
                /*
                 * In theory, the page should be in an object before it
@@ -2545,27 +3841,50 @@ vm_page_wire(
                 * that page and update it at the same time.
                 */
        }
-#if DEBUG
-       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
+       LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
        if ( !VM_PAGE_WIRED(mem)) {
 
-               if (mem->pageout_queue) {
-                       mem->pageout = FALSE;
-                       vm_pageout_throttle_up(mem);
-               }
-               VM_PAGE_QUEUES_REMOVE(mem);
+               if (mem->laundry)
+                       vm_pageout_steal_laundry(mem, TRUE);
+
+               vm_page_queues_remove(mem, TRUE);
+
+               assert(mem->wire_count == 0);
+               mem->vm_page_q_state = VM_PAGE_IS_WIRED;
+
+               if (m_object) {
 
-               if (mem->object) {
-                       mem->object->wired_page_count++;
-                       assert(mem->object->resident_page_count >=
-                              mem->object->wired_page_count);
-                       if (mem->object->purgable == VM_PURGABLE_VOLATILE) {
+                       VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
+                       VM_OBJECT_WIRED_PAGE_ADD(m_object, mem);
+                       VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, tag);
+
+                       assert(m_object->resident_page_count >=
+                              m_object->wired_page_count);
+                       if (m_object->purgable == VM_PURGABLE_VOLATILE) {
                                assert(vm_page_purgeable_count > 0);
                                OSAddAtomic(-1, &vm_page_purgeable_count);
                                OSAddAtomic(1, &vm_page_purgeable_wired_count);
                        }
-                       if (mem->object->all_reusable) {
+                       if ((m_object->purgable == VM_PURGABLE_VOLATILE ||
+                            m_object->purgable == VM_PURGABLE_EMPTY) &&
+                           m_object->vo_purgeable_owner != TASK_NULL) {
+                               task_t owner;
+
+                               owner = m_object->vo_purgeable_owner;
+                               /* less volatile bytes */
+                               ledger_debit(owner->ledger,
+                                            task_ledgers.purgeable_volatile,
+                                            PAGE_SIZE);
+                               /* more not-quite-volatile bytes */
+                               ledger_credit(owner->ledger,
+                                             task_ledgers.purgeable_nonvolatile,
+                                             PAGE_SIZE);
+                               /* more footprint */
+                               ledger_credit(owner->ledger,
+                                             task_ledgers.phys_footprint,
+                                             PAGE_SIZE);
+                       }
+                       if (m_object->all_reusable) {
                                /*
                                 * Wired pages are not counted as "re-usable"
                                 * in "all_reusable" VM objects, so nothing
@@ -2577,7 +3896,7 @@ vm_page_wire(
                                 * wired, so adjust its state and the
                                 * accounting.
                                 */
-                               vm_object_reuse_pages(mem->object,
+                               vm_object_reuse_pages(m_object,
                                                      mem->offset,
                                                      mem->offset+PAGE_SIZE_64,
                                                      FALSE);
@@ -2591,49 +3910,19 @@ vm_page_wire(
                        vm_page_gobble_count--;
                mem->gobbled = FALSE;
 
-               VM_CHECK_MEMORYSTATUS;
-               
-               /* 
-                * ENCRYPTED SWAP:
-                * The page could be encrypted, but
-                * We don't have to decrypt it here
-                * because we don't guarantee that the
-                * data is actually valid at this point.
-                * The page will get decrypted in
-                * vm_fault_wire() if needed.
-                */
+               if (check_memorystatus == TRUE) {
+                       VM_CHECK_MEMORYSTATUS;
+               }
        }
        assert(!mem->gobbled);
+       assert(mem->vm_page_q_state == VM_PAGE_IS_WIRED);
        mem->wire_count++;
+       if (__improbable(mem->wire_count == 0)) {
+               panic("vm_page_wire(%p): wire_count overflow", mem);
+       }
        VM_PAGE_CHECK(mem);
 }
 
-/*
- *      vm_page_gobble:
- *
- *      Mark this page as consumed by the vm/ipc/xmm subsystems.
- *
- *      Called only for freshly vm_page_grab()ed pages - w/ nothing locked.
- */
-void
-vm_page_gobble(
-        register vm_page_t      mem)
-{
-        vm_page_lockspin_queues();
-        VM_PAGE_CHECK(mem);
-
-       assert(!mem->gobbled);
-       assert( !VM_PAGE_WIRED(mem));
-
-        if (!mem->gobbled && !VM_PAGE_WIRED(mem)) {
-                if (!mem->private && !mem->fictitious)
-                        vm_page_wire_count++;
-        }
-       vm_page_gobble_count++;
-        mem->gobbled = TRUE;
-        vm_page_unlock_queues();
-}
-
 /*
  *     vm_page_unwire:
  *
@@ -2647,34 +3936,61 @@ vm_page_unwire(
        vm_page_t       mem,
        boolean_t       queueit)
 {
+       vm_object_t     m_object;
+
+       m_object = VM_PAGE_OBJECT(mem);
 
-//     dbgLog(current_thread(), mem->offset, mem->object, 0);  /* (TEST/DEBUG) */
+//     dbgLog(current_thread(), mem->offset, m_object, 0);     /* (TEST/DEBUG) */
 
        VM_PAGE_CHECK(mem);
        assert(VM_PAGE_WIRED(mem));
-       assert(mem->object != VM_OBJECT_NULL);
-#if DEBUG
-       vm_object_lock_assert_exclusive(mem->object);
-       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
+       assert(mem->wire_count > 0);
+       assert(!mem->gobbled);
+       assert(m_object != VM_OBJECT_NULL);
+       vm_object_lock_assert_exclusive(m_object);
+       LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
        if (--mem->wire_count == 0) {
-               assert(!mem->private && !mem->fictitious);
-               vm_page_wire_count--;
-               assert(mem->object->wired_page_count > 0);
-               mem->object->wired_page_count--;
-               assert(mem->object->resident_page_count >=
-                      mem->object->wired_page_count);
-               if (mem->object->purgable == VM_PURGABLE_VOLATILE) {
+
+               mem->vm_page_q_state = VM_PAGE_NOT_ON_Q;
+
+               VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
+               VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
+               VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
+               if (!mem->private && !mem->fictitious) {
+                       vm_page_wire_count--;
+               }
+
+               assert(m_object->resident_page_count >=
+                      m_object->wired_page_count);
+               if (m_object->purgable == VM_PURGABLE_VOLATILE) {
                        OSAddAtomic(+1, &vm_page_purgeable_count);
                        assert(vm_page_purgeable_wired_count > 0);
                        OSAddAtomic(-1, &vm_page_purgeable_wired_count);
                }
-               assert(!mem->laundry);
-               assert(mem->object != kernel_object);
-               assert(mem->pageq.next == NULL && mem->pageq.prev == NULL);
+               if ((m_object->purgable == VM_PURGABLE_VOLATILE ||
+                    m_object->purgable == VM_PURGABLE_EMPTY) &&
+                   m_object->vo_purgeable_owner != TASK_NULL) {
+                       task_t owner;
+
+                       owner = m_object->vo_purgeable_owner;
+                       /* more volatile bytes */
+                       ledger_credit(owner->ledger,
+                                     task_ledgers.purgeable_volatile,
+                                     PAGE_SIZE);
+                       /* less not-quite-volatile bytes */
+                       ledger_debit(owner->ledger,
+                                    task_ledgers.purgeable_nonvolatile,
+                                    PAGE_SIZE);
+                       /* less footprint */
+                       ledger_debit(owner->ledger,
+                                    task_ledgers.phys_footprint,
+                                    PAGE_SIZE);
+               }
+               assert(m_object != kernel_object);
+               assert(mem->pageq.next == 0 && mem->pageq.prev == 0);
 
                if (queueit == TRUE) {
-                       if (mem->object->purgable == VM_PURGABLE_EMPTY) {
+                       if (m_object->purgable == VM_PURGABLE_EMPTY) {
                                vm_page_deactivate(mem);
                        } else {
                                vm_page_activate(mem);
@@ -2709,15 +4025,16 @@ vm_page_deactivate_internal(
        vm_page_t       m,
        boolean_t       clear_hw_reference)
 {
+       vm_object_t     m_object;
+
+       m_object = VM_PAGE_OBJECT(m);
 
        VM_PAGE_CHECK(m);
-       assert(m->object != kernel_object);
-       assert(m->phys_page != vm_page_guard_addr);
+       assert(m_object != kernel_object);
+       assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
 
-//     dbgLog(m->phys_page, vm_page_free_count, vm_page_wire_count, 6);        /* (TEST/DEBUG) */
-#if DEBUG
-       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
+//     dbgLog(VM_PAGE_GET_PHYS_PAGE(m), vm_page_free_count, vm_page_wire_count, 6);    /* (TEST/DEBUG) */
+       LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
        /*
         *      This page is no longer very interesting.  If it was
         *      interesting (active or inactive/referenced), then we
@@ -2737,40 +4054,44 @@ vm_page_deactivate_internal(
        }
        /*
         * if this page is currently on the pageout queue, we can't do the
-        * VM_PAGE_QUEUES_REMOVE (which doesn't handle the pageout queue case)
+        * vm_page_queues_remove (which doesn't handle the pageout queue case)
         * and we can't remove it manually since we would need the object lock
         * (which is not required here) to decrement the activity_in_progress
         * reference which is held on the object while the page is in the pageout queue...
         * just let the normal laundry processing proceed
         */
-       if (m->pageout_queue || m->private || m->fictitious || (VM_PAGE_WIRED(m)))
-               return;
-
+       if (m->laundry || m->private || m->fictitious ||
+           (m->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
+           (m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
+           VM_PAGE_WIRED(m)) {
+               return;
+       }
        if (!m->absent && clear_hw_reference == TRUE)
-               pmap_clear_reference(m->phys_page);
+               pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
 
        m->reference = FALSE;
        m->no_cache = FALSE;
 
-       if (!m->inactive) {
-               VM_PAGE_QUEUES_REMOVE(m);
-
-               if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) &&
-                   m->dirty && m->object->internal &&
-                   (m->object->purgable == VM_PURGABLE_DENY ||
-                    m->object->purgable == VM_PURGABLE_NONVOLATILE ||
-                    m->object->purgable == VM_PURGABLE_VOLATILE)) {
-                       queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
-                       m->throttled = TRUE;
+       if ( !VM_PAGE_INACTIVE(m)) {
+               vm_page_queues_remove(m, FALSE);
+
+               if (!VM_DYNAMIC_PAGING_ENABLED() &&
+                   m->dirty && m_object->internal &&
+                   (m_object->purgable == VM_PURGABLE_DENY ||
+                    m_object->purgable == VM_PURGABLE_NONVOLATILE ||
+                    m_object->purgable == VM_PURGABLE_VOLATILE)) {
+                       vm_page_check_pageable_safe(m);
+                       vm_page_queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
+                       m->vm_page_q_state = VM_PAGE_ON_THROTTLED_Q;
                        vm_page_throttled_count++;
                } else {
-                       if (m->object->named && m->object->ref_count == 1) {
+                       if (m_object->named && m_object->ref_count == 1) {
                                vm_page_speculate(m, FALSE);
 #if DEVELOPMENT || DEBUG
                                vm_page_speculative_recreated++;
 #endif
                        } else {
-                               VM_PAGE_ENQUEUE_INACTIVE(m, FALSE);
+                               vm_page_enqueue_inactive(m, FALSE);
                        }
                }
        }
@@ -2788,14 +4109,19 @@ vm_page_deactivate_internal(
 
 void vm_page_enqueue_cleaned(vm_page_t m)
 {
-       assert(m->phys_page != vm_page_guard_addr);
-#if DEBUG
-       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
+       vm_object_t     m_object;
+
+       m_object = VM_PAGE_OBJECT(m);
+
+       assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
+       LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
        assert( !(m->absent && !m->unusual));
 
+       if (VM_PAGE_WIRED(m)) {
+               return;
+       }
+
        if (m->gobbled) {
-               assert( !VM_PAGE_WIRED(m));
                if (!m->private && !m->fictitious)
                        vm_page_wire_count--;
                vm_page_gobble_count--;
@@ -2803,24 +4129,34 @@ void vm_page_enqueue_cleaned(vm_page_t m)
        }
        /*
         * if this page is currently on the pageout queue, we can't do the
-        * VM_PAGE_QUEUES_REMOVE (which doesn't handle the pageout queue case)
+        * vm_page_queues_remove (which doesn't handle the pageout queue case)
         * and we can't remove it manually since we would need the object lock
         * (which is not required here) to decrement the activity_in_progress
         * reference which is held on the object while the page is in the pageout queue...
         * just let the normal laundry processing proceed
         */
-       if (m->clean_queue || m->pageout_queue || m->private || m->fictitious)
-               return;
-
-       VM_PAGE_QUEUES_REMOVE(m);
+       if (m->laundry || m->private || m->fictitious ||
+           (m->vm_page_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
+           (m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
+                   return;
+       }
+       vm_page_queues_remove(m, FALSE);
 
-       queue_enter(&vm_page_queue_cleaned, m, vm_page_t, pageq);
-       m->clean_queue = TRUE;
+       vm_page_check_pageable_safe(m);
+       vm_page_queue_enter(&vm_page_queue_cleaned, m, vm_page_t, pageq);
+       m->vm_page_q_state = VM_PAGE_ON_INACTIVE_CLEANED_Q;
        vm_page_cleaned_count++;
 
-       m->inactive = TRUE;
        vm_page_inactive_count++;
-
+       if (m_object->internal) {
+               vm_page_pageable_internal_count++;
+       } else {
+               vm_page_pageable_external_count++;
+       }
+#if CONFIG_BACKGROUND_QUEUE
+       if (m->vm_page_in_background)
+               vm_page_add_to_backgroundq(m, TRUE);
+#endif
        vm_pageout_enqueued_cleaned++;
 }
 
@@ -2834,16 +4170,18 @@ void vm_page_enqueue_cleaned(vm_page_t m)
 
 void
 vm_page_activate(
-       register vm_page_t      m)
+       vm_page_t       m)
 {
+       vm_object_t     m_object;
+
+       m_object = VM_PAGE_OBJECT(m);
+
        VM_PAGE_CHECK(m);
 #ifdef FIXME_4778297
-       assert(m->object != kernel_object);
-#endif
-       assert(m->phys_page != vm_page_guard_addr);
-#if DEBUG
-       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+       assert(m_object != kernel_object);
 #endif
+       assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
+       LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
        assert( !(m->absent && !m->unusual));
 
        if (m->gobbled) {
@@ -2855,41 +4193,55 @@ vm_page_activate(
        }
        /*
         * if this page is currently on the pageout queue, we can't do the
-        * VM_PAGE_QUEUES_REMOVE (which doesn't handle the pageout queue case)
+        * vm_page_queues_remove (which doesn't handle the pageout queue case)
         * and we can't remove it manually since we would need the object lock
         * (which is not required here) to decrement the activity_in_progress
         * reference which is held on the object while the page is in the pageout queue...
         * just let the normal laundry processing proceed
         */
-       if (m->pageout_queue || m->private || m->fictitious)
+       if (m->laundry || m->private || m->fictitious ||
+           (m->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
+           (m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q))
                return;
 
 #if DEBUG
-       if (m->active)
+       if (m->vm_page_q_state == VM_PAGE_ON_ACTIVE_Q)
                panic("vm_page_activate: already active");
 #endif
 
-       if (m->speculative) {
+       if (m->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
                DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
                DTRACE_VM2(pgfrec, int, 1, (uint64_t *), NULL);
        }
        
-       VM_PAGE_QUEUES_REMOVE(m);
+       vm_page_queues_remove(m, FALSE);
 
        if ( !VM_PAGE_WIRED(m)) {
-
-               if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) && 
-                   m->dirty && m->object->internal && 
-                   (m->object->purgable == VM_PURGABLE_DENY ||
-                    m->object->purgable == VM_PURGABLE_NONVOLATILE ||
-                    m->object->purgable == VM_PURGABLE_VOLATILE)) {
-                       queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
-                       m->throttled = TRUE;
+               vm_page_check_pageable_safe(m);
+               if (!VM_DYNAMIC_PAGING_ENABLED() && 
+                   m->dirty && m_object->internal && 
+                   (m_object->purgable == VM_PURGABLE_DENY ||
+                    m_object->purgable == VM_PURGABLE_NONVOLATILE ||
+                    m_object->purgable == VM_PURGABLE_VOLATILE)) {
+                       vm_page_queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
+                       m->vm_page_q_state = VM_PAGE_ON_THROTTLED_Q;
                        vm_page_throttled_count++;
                } else {
-                       queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
-                       m->active = TRUE;
-                       vm_page_active_count++;
+#if CONFIG_SECLUDED_MEMORY
+                       if (secluded_for_filecache &&
+                           vm_page_secluded_target != 0 &&
+                           num_tasks_can_use_secluded_mem == 0 &&
+                           m_object->eligible_for_secluded) {
+                               vm_page_queue_enter(&vm_page_queue_secluded, m,
+                                                   vm_page_t, pageq);
+                               m->vm_page_q_state = VM_PAGE_ON_SECLUDED_Q;
+                               vm_page_secluded_count++;
+                               vm_page_secluded_count_inuse++;
+                               assert(!m_object->internal);
+//                             vm_page_pageable_external_count++;
+                       } else
+#endif /* CONFIG_SECLUDED_MEMORY */
+                       vm_page_enqueue_active(m, FALSE);
                }
                m->reference = TRUE;
                m->no_cache = FALSE;
@@ -2911,27 +4263,32 @@ vm_page_speculate(
        boolean_t       new)
 {
         struct vm_speculative_age_q    *aq;
+       vm_object_t     m_object;
+
+       m_object = VM_PAGE_OBJECT(m);
 
        VM_PAGE_CHECK(m);
-       assert(m->object != kernel_object);
-       assert(m->phys_page != vm_page_guard_addr);
-#if DEBUG
-       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
+       vm_page_check_pageable_safe(m);
+
+       assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
+       LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
        assert( !(m->absent && !m->unusual));
+       assert(m_object->internal == FALSE);
 
        /*
         * if this page is currently on the pageout queue, we can't do the
-        * VM_PAGE_QUEUES_REMOVE (which doesn't handle the pageout queue case)
+        * vm_page_queues_remove (which doesn't handle the pageout queue case)
         * and we can't remove it manually since we would need the object lock
         * (which is not required here) to decrement the activity_in_progress
         * reference which is held on the object while the page is in the pageout queue...
         * just let the normal laundry processing proceed
         */
-       if (m->pageout_queue || m->private || m->fictitious)
+       if (m->laundry || m->private || m->fictitious ||
+           (m->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
+           (m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q))
                return;
 
-       VM_PAGE_QUEUES_REMOVE(m);               
+       vm_page_queues_remove(m, FALSE);
 
        if ( !VM_PAGE_WIRED(m)) {
                mach_timespec_t         ts;
@@ -2973,7 +4330,7 @@ vm_page_speculate(
                                }
                                aq = &vm_page_queue_speculative[speculative_age_index];
 
-                               if (!queue_empty(&aq->age_q))
+                               if (!vm_page_queue_empty(&aq->age_q))
                                        vm_page_speculate_ageit(aq);
 
                                aq->age_ts.tv_sec = vm_page_speculative_q_age_ms / 1000;
@@ -2982,14 +4339,15 @@ vm_page_speculate(
                                ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
                        }
                }
-               enqueue_tail(&aq->age_q, &m->pageq);
-               m->speculative = TRUE;
+               vm_page_enqueue_tail(&aq->age_q, &m->pageq);
+               m->vm_page_q_state = VM_PAGE_ON_SPECULATIVE_Q;
                vm_page_speculative_count++;
+               vm_page_pageable_external_count++;
 
                if (new == TRUE) {
-                       vm_object_lock_assert_exclusive(m->object);
+                       vm_object_lock_assert_exclusive(m_object);
 
-                       m->object->pages_created++;
+                       m_object->pages_created++;
 #if DEVELOPMENT || DEBUG
                        vm_page_speculative_created++;
 #endif
@@ -3013,28 +4371,28 @@ vm_page_speculate_ageit(struct vm_speculative_age_q *aq)
 
        sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
 
-       if (queue_empty(&sq->age_q)) {
+       if (vm_page_queue_empty(&sq->age_q)) {
                sq->age_q.next = aq->age_q.next;
                sq->age_q.prev = aq->age_q.prev;
                
-               t = (vm_page_t)sq->age_q.next;
-               t->pageq.prev = &sq->age_q;
+               t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.next);
+               t->pageq.prev = VM_PAGE_PACK_PTR(&sq->age_q);
 
-               t = (vm_page_t)sq->age_q.prev;
-               t->pageq.next = &sq->age_q;
+               t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
+               t->pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
        } else {
-               t = (vm_page_t)sq->age_q.prev;
+               t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
                t->pageq.next = aq->age_q.next;
                                                
-               t = (vm_page_t)aq->age_q.next;
+               t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.next);
                t->pageq.prev = sq->age_q.prev;
 
-               t = (vm_page_t)aq->age_q.prev;
-               t->pageq.next = &sq->age_q;
+               t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.prev);
+               t->pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
 
                sq->age_q.prev = aq->age_q.prev;
        }
-       queue_init(&aq->age_q);
+       vm_page_queue_init(&aq->age_q);
 }
 
 
@@ -3043,28 +4401,29 @@ vm_page_lru(
        vm_page_t       m)
 {
        VM_PAGE_CHECK(m);
-       assert(m->object != kernel_object);
-       assert(m->phys_page != vm_page_guard_addr);
+       assert(VM_PAGE_OBJECT(m) != kernel_object);
+       assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
 
-#if DEBUG
-       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
+       LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
        /*
         * if this page is currently on the pageout queue, we can't do the
-        * VM_PAGE_QUEUES_REMOVE (which doesn't handle the pageout queue case)
+        * vm_page_queues_remove (which doesn't handle the pageout queue case)
         * and we can't remove it manually since we would need the object lock
         * (which is not required here) to decrement the activity_in_progress
         * reference which is held on the object while the page is in the pageout queue...
         * just let the normal laundry processing proceed
         */
-       if (m->pageout_queue || m->private || (VM_PAGE_WIRED(m)))
+       if (m->laundry || m->private ||
+           (m->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
+           (m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
+           VM_PAGE_WIRED(m))
                return;
 
        m->no_cache = FALSE;
 
-       VM_PAGE_QUEUES_REMOVE(m);
+       vm_page_queues_remove(m, FALSE);
 
-       VM_PAGE_ENQUEUE_INACTIVE(m, FALSE);
+       vm_page_enqueue_inactive(m, FALSE);
 }
 
 
@@ -3075,29 +4434,39 @@ vm_page_reactivate_all_throttled(void)
        vm_page_t       first_active;
        vm_page_t       m;
        int             extra_active_count;
+       int             extra_internal_count, extra_external_count;
+       vm_object_t     m_object;
 
-       if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default))
+       if (!VM_DYNAMIC_PAGING_ENABLED())
                return;
 
        extra_active_count = 0;
+       extra_internal_count = 0;
+       extra_external_count = 0;
        vm_page_lock_queues();
-       if (! queue_empty(&vm_page_queue_throttled)) {
+       if (! vm_page_queue_empty(&vm_page_queue_throttled)) {
                /*
                 * Switch "throttled" pages to "active".
                 */
-               queue_iterate(&vm_page_queue_throttled, m, vm_page_t, pageq) {
+               vm_page_queue_iterate(&vm_page_queue_throttled, m, vm_page_t, pageq) {
                        VM_PAGE_CHECK(m);
-                       assert(m->throttled);
-                       assert(!m->active);
-                       assert(!m->inactive);
-                       assert(!m->speculative);
-                       assert(!VM_PAGE_WIRED(m));
+                       assert(m->vm_page_q_state == VM_PAGE_ON_THROTTLED_Q);
+
+                       m_object = VM_PAGE_OBJECT(m);
 
                        extra_active_count++;
+                       if (m_object->internal) {
+                               extra_internal_count++;
+                       } else {
+                               extra_external_count++;
+                       }
 
-                       m->throttled = FALSE;
-                       m->active = TRUE;
+                       m->vm_page_q_state = VM_PAGE_ON_ACTIVE_Q;
                        VM_PAGE_CHECK(m);
+#if CONFIG_BACKGROUND_QUEUE
+                       if (m->vm_page_in_background)
+                               vm_page_add_to_backgroundq(m, FALSE);
+#endif
                }
 
                /*
@@ -3106,30 +4475,32 @@ vm_page_reactivate_all_throttled(void)
                 * get re-evaluated by the LRU algorithm first, since they've been
                 * completely out of it until now.
                 */
-               first_throttled = (vm_page_t) queue_first(&vm_page_queue_throttled);
-               last_throttled = (vm_page_t) queue_last(&vm_page_queue_throttled);
-               first_active = (vm_page_t) queue_first(&vm_page_queue_active);
-               if (queue_empty(&vm_page_queue_active)) {
-                       queue_last(&vm_page_queue_active) = (queue_entry_t) last_throttled;
+               first_throttled = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
+               last_throttled = (vm_page_t) vm_page_queue_last(&vm_page_queue_throttled);
+               first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
+               if (vm_page_queue_empty(&vm_page_queue_active)) {
+                       vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
                } else {
-                       queue_prev(&first_active->pageq) = (queue_entry_t) last_throttled;
+                       first_active->pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
                }
-               queue_first(&vm_page_queue_active) = (queue_entry_t) first_throttled;
-               queue_prev(&first_throttled->pageq) = (queue_entry_t) &vm_page_queue_active;
-               queue_next(&last_throttled->pageq) = (queue_entry_t) first_active;
+               vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_throttled);
+               first_throttled->pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
+               last_throttled->pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
 
 #if DEBUG
                printf("reactivated %d throttled pages\n", vm_page_throttled_count);
 #endif
-               queue_init(&vm_page_queue_throttled);
+               vm_page_queue_init(&vm_page_queue_throttled);
                /*
                 * Adjust the global page counts.
                 */
                vm_page_active_count += extra_active_count;
+               vm_page_pageable_internal_count += extra_internal_count;
+               vm_page_pageable_external_count += extra_external_count;
                vm_page_throttled_count = 0;
        }
        assert(vm_page_throttled_count == 0);
-       assert(queue_empty(&vm_page_queue_throttled));
+       assert(vm_page_queue_empty(&vm_page_queue_throttled));
        vm_page_unlock_queues();
 }
 
@@ -3168,26 +4539,24 @@ vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks)
                /*
                 * Switch "local" pages to "active".
                 */
-               assert(!queue_empty(&lq->vpl_queue));
+               assert(!vm_page_queue_empty(&lq->vpl_queue));
 
-               queue_iterate(&lq->vpl_queue, m, vm_page_t, pageq) {
+               vm_page_queue_iterate(&lq->vpl_queue, m, vm_page_t, pageq) {
                        VM_PAGE_CHECK(m);
-                       assert(m->local);
-                       assert(!m->active);
-                       assert(!m->inactive);
-                       assert(!m->speculative);
-                       assert(!VM_PAGE_WIRED(m));
-                       assert(!m->throttled);
+                       vm_page_check_pageable_safe(m);
+                       assert(m->vm_page_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q);
                        assert(!m->fictitious);
 
                        if (m->local_id != lid)
                                panic("vm_page_reactivate_local: found vm_page_t(%p) with wrong cpuid", m);
                        
                        m->local_id = 0;
-                       m->local = FALSE;
-                       m->active = TRUE;
+                       m->vm_page_q_state = VM_PAGE_ON_ACTIVE_Q;
                        VM_PAGE_CHECK(m);
-
+#if CONFIG_BACKGROUND_QUEUE
+                       if (m->vm_page_in_background)
+                               vm_page_add_to_backgroundq(m, FALSE);
+#endif
                        count++;
                }
                if (count != lq->vpl_count)
@@ -3196,27 +4565,31 @@ vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks)
                /*
                 * Transfer the entire local queue to a regular LRU page queues.
                 */
-               first_local = (vm_page_t) queue_first(&lq->vpl_queue);
-               last_local = (vm_page_t) queue_last(&lq->vpl_queue);
-               first_active = (vm_page_t) queue_first(&vm_page_queue_active);
+               first_local = (vm_page_t) vm_page_queue_first(&lq->vpl_queue);
+               last_local = (vm_page_t) vm_page_queue_last(&lq->vpl_queue);
+               first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
 
-               if (queue_empty(&vm_page_queue_active)) {
-                       queue_last(&vm_page_queue_active) = (queue_entry_t) last_local;
+               if (vm_page_queue_empty(&vm_page_queue_active)) {
+                       vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
                } else {
-                       queue_prev(&first_active->pageq) = (queue_entry_t) last_local;
+                       first_active->pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
                }
-               queue_first(&vm_page_queue_active) = (queue_entry_t) first_local;
-               queue_prev(&first_local->pageq) = (queue_entry_t) &vm_page_queue_active;
-               queue_next(&last_local->pageq) = (queue_entry_t) first_active;
+               vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
+               first_local->pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
+               last_local->pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
 
-               queue_init(&lq->vpl_queue);
+               vm_page_queue_init(&lq->vpl_queue);
                /*
                 * Adjust the global page counts.
                 */
                vm_page_active_count += lq->vpl_count;
+               vm_page_pageable_internal_count += lq->vpl_internal_count;
+               vm_page_pageable_external_count += lq->vpl_external_count;
                lq->vpl_count = 0;
+               lq->vpl_internal_count = 0;
+               lq->vpl_external_count = 0;
        }
-       assert(queue_empty(&lq->vpl_queue));
+       assert(vm_page_queue_empty(&lq->vpl_queue));
 
        if (nolocks == FALSE) {
                VPL_UNLOCK(&lq->vpl_lock);
@@ -3229,13 +4602,13 @@ vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks)
  *
  *     Zero-fill a part of the page.
  */
+#define PMAP_ZERO_PART_PAGE_IMPLEMENTED
 void
 vm_page_part_zero_fill(
        vm_page_t       m,
        vm_offset_t     m_pa,
        vm_size_t       len)
 {
-       vm_page_t       tmp;
 
 #if 0
        /*
@@ -3246,8 +4619,9 @@ vm_page_part_zero_fill(
 #endif
 
 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
-       pmap_zero_part_page(m->phys_page, m_pa, len);
+       pmap_zero_part_page(VM_PAGE_GET_PHYS_PAGE(m), m_pa, len);
 #else
+       vm_page_t       tmp;
        while (1) {
                        tmp = vm_page_grab();
                if (tmp == VM_PAGE_NULL) {
@@ -3280,8 +4654,8 @@ vm_page_zero_fill(
        vm_page_t       m)
 {
         XPR(XPR_VM_PAGE,
-                "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n",
-                m->object, m->offset, m, 0,0);
+           "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n",
+           VM_PAGE_OBJECT(m), m->offset, m, 0,0);
 #if 0
        /*
         * we don't hold the page queue lock
@@ -3290,8 +4664,8 @@ vm_page_zero_fill(
        VM_PAGE_CHECK(m);
 #endif
 
-//     dbgTrace(0xAEAEAEAE, m->phys_page, 0);          /* (BRINGUP) */
-       pmap_zero_page(m->phys_page);
+//     dbgTrace(0xAEAEAEAE, VM_PAGE_GET_PHYS_PAGE(m), 0);              /* (BRINGUP) */
+       pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m));
 }
 
 /*
@@ -3316,18 +4690,14 @@ vm_page_part_copy(
        VM_PAGE_CHECK(src_m);
        VM_PAGE_CHECK(dst_m);
 #endif
-       pmap_copy_part_page(src_m->phys_page, src_pa,
-                       dst_m->phys_page, dst_pa, len);
+       pmap_copy_part_page(VM_PAGE_GET_PHYS_PAGE(src_m), src_pa,
+                           VM_PAGE_GET_PHYS_PAGE(dst_m), dst_pa, len);
 }
 
 /*
  *     vm_page_copy:
  *
  *     Copy one page to another
- *
- * ENCRYPTED SWAP:
- * The source page should not be encrypted.  The caller should
- * make sure the page is decrypted first, if necessary.
  */
 
 int vm_page_copy_cs_validations = 0;
@@ -3338,11 +4708,15 @@ vm_page_copy(
        vm_page_t       src_m,
        vm_page_t       dest_m)
 {
+       vm_object_t     src_m_object;
+
+       src_m_object = VM_PAGE_OBJECT(src_m);
+
         XPR(XPR_VM_PAGE,
-        "vm_page_copy, object 0x%X offset 0x%X to object 0x%X offset 0x%X\n",
-        src_m->object, src_m->offset, 
-       dest_m->object, dest_m->offset,
-       0);
+           "vm_page_copy, object 0x%X offset 0x%X to object 0x%X offset 0x%X\n",
+           src_m_object, src_m->offset, 
+           VM_PAGE_OBJECT(dest_m), dest_m->offset,
+           0);
 #if 0
        /*
         * we don't hold the page queue lock
@@ -3351,21 +4725,10 @@ vm_page_copy(
        VM_PAGE_CHECK(src_m);
        VM_PAGE_CHECK(dest_m);
 #endif
-       vm_object_lock_assert_held(src_m->object);
-
-       /*
-        * ENCRYPTED SWAP:
-        * The source page should not be encrypted at this point.
-        * The destination page will therefore not contain encrypted
-        * data after the copy.
-        */
-       if (src_m->encrypted) {
-               panic("vm_page_copy: source page %p is encrypted\n", src_m);
-       }
-       dest_m->encrypted = FALSE;
+       vm_object_lock_assert_held(src_m_object);
 
-       if (src_m->object != VM_OBJECT_NULL &&
-           src_m->object->code_signed) {
+       if (src_m_object != VM_OBJECT_NULL &&
+           src_m_object->code_signed) {
                /*
                 * We're copying a page from a code-signed object.
                 * Whoever ends up mapping the copy page might care about
@@ -3374,6 +4737,14 @@ vm_page_copy(
                 */
                vm_page_copy_cs_validations++;
                vm_page_validate_cs(src_m);
+#if DEVELOPMENT || DEBUG
+               DTRACE_VM4(codesigned_copy,
+                          vm_object_t, src_m_object,
+                          vm_object_offset_t, src_m->offset,
+                          int, src_m->cs_validated,
+                          int, src_m->cs_tainted);
+#endif /* DEVELOPMENT || DEBUG */
+
        }
 
        if (vm_page_is_slideable(src_m)) {
@@ -3396,7 +4767,7 @@ vm_page_copy(
        }
        dest_m->slid = src_m->slid;
        dest_m->error = src_m->error; /* sliding src_m might have failed... */
-       pmap_copy_page(src_m->phys_page, dest_m->phys_page);
+       pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(src_m), VM_PAGE_GET_PHYS_PAGE(dest_m));
 }
 
 #if MACH_ASSERT
@@ -3405,52 +4776,48 @@ _vm_page_print(
        vm_page_t       p)
 {
        printf("vm_page %p: \n", p);
-       printf("  pageq: next=%p prev=%p\n", p->pageq.next, p->pageq.prev);
-       printf("  listq: next=%p prev=%p\n", p->listq.next, p->listq.prev);
-       printf("  next=%p\n", p->next);
-       printf("  object=%p offset=0x%llx\n", p->object, p->offset);
+       printf("  pageq: next=%p prev=%p\n",
+              (vm_page_t)VM_PAGE_UNPACK_PTR(p->pageq.next),
+              (vm_page_t)VM_PAGE_UNPACK_PTR(p->pageq.prev));
+       printf("  listq: next=%p prev=%p\n",
+              (vm_page_t)(VM_PAGE_UNPACK_PTR(p->listq.next)),
+              (vm_page_t)(VM_PAGE_UNPACK_PTR(p->listq.prev)));
+       printf("  next=%p\n", (vm_page_t)(VM_PAGE_UNPACK_PTR(p->next_m)));
+       printf("  object=%p offset=0x%llx\n",VM_PAGE_OBJECT(p), p->offset);
        printf("  wire_count=%u\n", p->wire_count);
+       printf("  q_state=%u\n", p->vm_page_q_state);
 
-       printf("  %slocal, %sinactive, %sactive, %spageout_queue, %sspeculative, %slaundry\n",
-              (p->local ? "" : "!"),
-              (p->inactive ? "" : "!"),
-              (p->active ? "" : "!"),
-              (p->pageout_queue ? "" : "!"),
-              (p->speculative ? "" : "!"),
-              (p->laundry ? "" : "!"));
-       printf("  %sfree, %sref, %sgobbled, %sprivate, %sthrottled\n",
-              (p->free ? "" : "!"),
+       printf("  %slaundry, %sref, %sgobbled, %sprivate\n",
+              (p->laundry ? "" : "!"),
               (p->reference ? "" : "!"),
               (p->gobbled ? "" : "!"),
-              (p->private ? "" : "!"),
-              (p->throttled ? "" : "!"));
+              (p->private ? "" : "!"));
        printf("  %sbusy, %swanted, %stabled, %sfictitious, %spmapped, %swpmapped\n",
-               (p->busy ? "" : "!"),
-               (p->wanted ? "" : "!"),
-               (p->tabled ? "" : "!"),
-               (p->fictitious ? "" : "!"),
-               (p->pmapped ? "" : "!"),
-               (p->wpmapped ? "" : "!"));
-       printf("  %spageout, %sabsent, %serror, %sdirty, %scleaning, %sprecious, %sclustered\n",
-              (p->pageout ? "" : "!"),
+              (p->busy ? "" : "!"),
+              (p->wanted ? "" : "!"),
+              (p->tabled ? "" : "!"),
+              (p->fictitious ? "" : "!"),
+              (p->pmapped ? "" : "!"),
+              (p->wpmapped ? "" : "!"));
+       printf("  %sfree_when_done, %sabsent, %serror, %sdirty, %scleaning, %sprecious, %sclustered\n",
+              (p->free_when_done ? "" : "!"),
               (p->absent ? "" : "!"),
               (p->error ? "" : "!"),
               (p->dirty ? "" : "!"),
               (p->cleaning ? "" : "!"),
               (p->precious ? "" : "!"),
               (p->clustered ? "" : "!"));
-       printf("  %soverwriting, %srestart, %sunusual, %sencrypted, %sencrypted_cleaning\n",
+       printf("  %soverwriting, %srestart, %sunusual\n",
               (p->overwriting ? "" : "!"),
               (p->restart ? "" : "!"),
-              (p->unusual ? "" : "!"),
-              (p->encrypted ? "" : "!"),
-              (p->encrypted_cleaning ? "" : "!"));
-       printf("  %scs_validated, %scs_tainted, %sno_cache\n",
+              (p->unusual ? "" : "!"));
+       printf("  %scs_validated, %scs_tainted, %scs_nx, %sno_cache\n",
               (p->cs_validated ? "" : "!"),
               (p->cs_tainted ? "" : "!"),
+              (p->cs_nx ? "" : "!"),
               (p->no_cache ? "" : "!"));
 
-       printf("phys_page=0x%x\n", p->phys_page);
+       printf("phys_page=0x%x\n", VM_PAGE_GET_PHYS_PAGE(p));
 }
 
 /*
@@ -3462,20 +4829,20 @@ vm_page_verify_contiguous(
        vm_page_t       pages,
        unsigned int    npages)
 {
-       register vm_page_t      m;
+       vm_page_t               m;
        unsigned int            page_count;
        vm_offset_t             prev_addr;
 
-       prev_addr = pages->phys_page;
+       prev_addr = VM_PAGE_GET_PHYS_PAGE(pages);
        page_count = 1;
        for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
-               if (m->phys_page != prev_addr + 1) {
+               if (VM_PAGE_GET_PHYS_PAGE(m) != prev_addr + 1) {
                        printf("m %p prev_addr 0x%lx, current addr 0x%x\n",
-                              m, (long)prev_addr, m->phys_page);
+                              m, (long)prev_addr, VM_PAGE_GET_PHYS_PAGE(m));
                        printf("pages %p page_count %d npages %d\n", pages, page_count, npages);
                        panic("vm_page_verify_contiguous:  not contiguous!");
                }
-               prev_addr = m->phys_page;
+               prev_addr = VM_PAGE_GET_PHYS_PAGE(m);
                ++page_count;
        }
        if (page_count != npages) {
@@ -3490,9 +4857,10 @@ vm_page_verify_contiguous(
 /*
  *     Check the free lists for proper length etc.
  */
+static boolean_t vm_page_verify_this_free_list_enabled = FALSE;
 static unsigned int
 vm_page_verify_free_list(
-       queue_head_t    *vm_page_queue,
+       vm_page_queue_head_t    *vm_page_queue,
        unsigned int    color,
        vm_page_t       look_for_page,
        boolean_t       expect_page)
@@ -3502,30 +4870,38 @@ vm_page_verify_free_list(
        vm_page_t       prev_m;
        boolean_t       found_page;
 
+       if (! vm_page_verify_this_free_list_enabled)
+               return 0;
+
        found_page = FALSE;
        npages = 0;
-       prev_m = (vm_page_t) vm_page_queue;
-       queue_iterate(vm_page_queue,
-                     m,
-                     vm_page_t,
-                     pageq) {
+       prev_m = (vm_page_t)((uintptr_t)vm_page_queue);
+
+       vm_page_queue_iterate(vm_page_queue,
+                             m,
+                             vm_page_t,
+                             pageq) {
 
                if (m == look_for_page) {
                        found_page = TRUE;
                }
-               if ((vm_page_t) m->pageq.prev != prev_m)
+               if ((vm_page_t)VM_PAGE_UNPACK_PTR(m->pageq.prev) != prev_m)
                        panic("vm_page_verify_free_list(color=%u, npages=%u): page %p corrupted prev ptr %p instead of %p\n",
-                             color, npages, m, m->pageq.prev, prev_m);
+                             color, npages, m, (vm_page_t)VM_PAGE_UNPACK_PTR(m->pageq.prev), prev_m);
                if ( ! m->busy )
                        panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not busy\n",
                              color, npages, m);
                if (color != (unsigned int) -1) {
-                       if ((m->phys_page & vm_color_mask) != color)
+                       if (VM_PAGE_GET_COLOR(m) != color)
                                panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u\n",
-                                     color, npages, m, m->phys_page & vm_color_mask, color);
-                       if ( ! m->free )
-                               panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not free\n",
-                                     color, npages, m);
+                                     color, npages, m, VM_PAGE_GET_COLOR(m), color);
+                       if (m->vm_page_q_state != VM_PAGE_ON_FREE_Q)
+                               panic("vm_page_verify_free_list(color=%u, npages=%u): page %p - expecting q_state == VM_PAGE_ON_FREE_Q, found %d\n",
+                                     color, npages, m, m->vm_page_q_state);
+               } else {
+                       if (m->vm_page_q_state != VM_PAGE_ON_FREE_LOCAL_Q)
+                               panic("vm_page_verify_free_list(npages=%u): local page %p - expecting q_state == VM_PAGE_ON_FREE_LOCAL_Q, found %d\n",
+                                     npages, m, m->vm_page_q_state);
                }
                ++npages;
                prev_m = m;
@@ -3535,14 +4911,14 @@ vm_page_verify_free_list(
 
                if (expect_page && !found_page) {
                        printf("vm_page_verify_free_list(color=%u, npages=%u): page %p not found phys=%u\n",
-                              color, npages, look_for_page, look_for_page->phys_page);
+                              color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
                        _vm_page_print(look_for_page);
                        for (other_color = 0;
                             other_color < vm_colors;
                             other_color++) {
                                if (other_color == color)
                                        continue;
-                               vm_page_verify_free_list(&vm_page_queue_free[other_color],
+                               vm_page_verify_free_list(&vm_page_queue_free[other_color].qhead,
                                                         other_color, look_for_page, FALSE);
                        }
                        if (color == (unsigned int) -1) {
@@ -3553,27 +4929,41 @@ vm_page_verify_free_list(
                }
                if (!expect_page && found_page) {
                        printf("vm_page_verify_free_list(color=%u, npages=%u): page %p found phys=%u\n",
-                              color, npages, look_for_page, look_for_page->phys_page);
+                              color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
                }
        }
        return npages;
 }
 
-static boolean_t vm_page_verify_free_lists_enabled = FALSE;
+static boolean_t vm_page_verify_all_free_lists_enabled = FALSE;
 static void
 vm_page_verify_free_lists( void )
 {
        unsigned int    color, npages, nlopages;
+       boolean_t       toggle = TRUE;
 
-       if (! vm_page_verify_free_lists_enabled)
+       if (! vm_page_verify_all_free_lists_enabled)
                return;
 
        npages = 0;
 
        lck_mtx_lock(&vm_page_queue_free_lock);
+       
+       if (vm_page_verify_this_free_list_enabled == TRUE) {
+               /*
+                * This variable has been set globally for extra checking of
+                * each free list Q. Since we didn't set it, we don't own it
+                * and we shouldn't toggle it.
+                */
+               toggle = FALSE;
+       }
+
+       if (toggle == TRUE) {
+               vm_page_verify_this_free_list_enabled = TRUE;
+       }
 
        for( color = 0; color < vm_colors; color++ ) {
-               npages += vm_page_verify_free_list(&vm_page_queue_free[color],
+               npages += vm_page_verify_free_list(&vm_page_queue_free[color].qhead,
                                                   color, VM_PAGE_NULL, FALSE);
        }
        nlopages = vm_page_verify_free_list(&vm_lopage_queue_free,
@@ -3584,33 +4974,57 @@ vm_page_verify_free_lists( void )
                      "npages %u free_count %d nlopages %u lo_free_count %u",
                      npages, vm_page_free_count, nlopages, vm_lopage_free_count);
 
+       if (toggle == TRUE) {
+               vm_page_verify_this_free_list_enabled = FALSE;
+       }
+
        lck_mtx_unlock(&vm_page_queue_free_lock);
 }
 
-void
-vm_page_queues_assert(
-       vm_page_t       mem,
-       int             val)
+#endif /* MACH_ASSERT */
+
+
+
+#if __arm64__
+/*
+ *     1 or more clients (currently only SEP) ask for a large contiguous chunk of memory 
+ *     after the system has 'aged'. To ensure that other allocation requests don't mess
+ *     with the chances of that request being satisfied, we pre-allocate a single contiguous
+ *     10MB buffer and hand it out to the first request of >= 4MB.
+ */
+
+kern_return_t cpm_preallocate_early(void);
+
+vm_page_t cpm_preallocated_pages_list = NULL;
+boolean_t preallocated_buffer_available = FALSE;
+
+#define        PREALLOCATED_CONTIG_BUFFER_PAGES_COUNT                  ((10 * 1024 * 1024) / PAGE_SIZE_64) /* 10 MB */
+#define MIN_CONTIG_PAGES_REQUEST_FOR_PREALLOCATED_BUFFER       ((4 * 1024 *1024) / PAGE_SIZE_64)   /* 4 MB */
+
+kern_return_t
+cpm_preallocate_early(void)
 {
-#if DEBUG
-       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
-       if (mem->free + mem->active + mem->inactive + mem->speculative +
-           mem->throttled + mem->pageout_queue > (val)) {
-               _vm_page_print(mem);
-               panic("vm_page_queues_assert(%p, %d)\n", mem, val);
-       }
-       if (VM_PAGE_WIRED(mem)) {
-               assert(!mem->active);
-               assert(!mem->inactive);
-               assert(!mem->speculative);
-               assert(!mem->throttled);
-               assert(!mem->pageout_queue);
+
+       kern_return_t   kr = KERN_SUCCESS;
+       vm_map_size_t   prealloc_size = (PREALLOCATED_CONTIG_BUFFER_PAGES_COUNT * PAGE_SIZE_64);
+
+       printf("cpm_preallocate_early called to preallocate contiguous buffer of %llu pages\n", PREALLOCATED_CONTIG_BUFFER_PAGES_COUNT);
+
+       kr = cpm_allocate(CAST_DOWN(vm_size_t, prealloc_size), &cpm_preallocated_pages_list, 0, 0, TRUE, 0);
+
+       if (kr != KERN_SUCCESS) {
+               printf("cpm_allocate for preallocated contig buffer failed with %d.\n", kr);
+       } else {
+               preallocated_buffer_available = TRUE;
        }
+
+       return kr;
 }
-#endif /* MACH_ASSERT */
+#endif /* __arm64__ */
 
 
+extern boolean_t (* volatile consider_buffer_cache_collect)(int);
+
 /*
  *     CONTIGUOUS PAGE ALLOCATION
  *
@@ -3686,34 +5100,93 @@ vm_page_find_contiguous(
        int             flags)
 {
        vm_page_t       m = NULL;
-       ppnum_t         prevcontaddr;
-       ppnum_t         start_pnum;
-       unsigned int    npages, considered, scanned;
-       unsigned int    page_idx, start_idx, last_idx, orig_last_idx;
+       ppnum_t         prevcontaddr = 0;
+       ppnum_t         start_pnum = 0;
+       unsigned int    npages = 0, considered = 0, scanned = 0;
+       unsigned int    page_idx = 0, start_idx = 0, last_idx = 0, orig_last_idx = 0;
        unsigned int    idx_last_contig_page_found = 0;
-       int             free_considered, free_available;
-       int             substitute_needed;
-       boolean_t       wrapped;
+       int             free_considered = 0, free_available = 0;
+       int             substitute_needed = 0;
+       boolean_t       wrapped, zone_gc_called = FALSE;
+       kern_return_t   kr;
 #if DEBUG
-       clock_sec_t     tv_start_sec, tv_end_sec;
-       clock_usec_t    tv_start_usec, tv_end_usec;
+       clock_sec_t     tv_start_sec = 0, tv_end_sec = 0;
+       clock_usec_t    tv_start_usec = 0, tv_end_usec = 0;
 #endif
-#if MACH_ASSERT
+
        int             yielded = 0;
        int             dumped_run = 0;
        int             stolen_pages = 0;
-#endif
+       int             compressed_pages = 0;
+
 
        if (contig_pages == 0)
                return VM_PAGE_NULL;
 
+full_scan_again:
+
 #if MACH_ASSERT
        vm_page_verify_free_lists();
 #endif
 #if DEBUG
        clock_get_system_microtime(&tv_start_sec, &tv_start_usec);
 #endif
+       PAGE_REPLACEMENT_ALLOWED(TRUE);
+
        vm_page_lock_queues();
+
+#if __arm64__
+       if (preallocated_buffer_available) {
+
+               if ((contig_pages >= MIN_CONTIG_PAGES_REQUEST_FOR_PREALLOCATED_BUFFER) && (contig_pages <= PREALLOCATED_CONTIG_BUFFER_PAGES_COUNT)) {
+
+                       m = cpm_preallocated_pages_list;
+
+                       start_idx = (unsigned int) (m - &vm_pages[0]);
+
+                       if (wire == FALSE) {
+                       
+                               last_idx = start_idx;
+
+                               for(npages = 0; npages < contig_pages; npages++, last_idx++) {
+
+                                       assert(vm_pages[last_idx].gobbled == FALSE);
+
+                                       vm_pages[last_idx].gobbled = TRUE;
+                                       vm_page_gobble_count++;
+
+                                       assert(1 == vm_pages[last_idx].wire_count);
+                                       /*
+                                        * Gobbled pages are counted as wired pages. So no need to drop
+                                        * the global wired page count. Just the page's wire count is fine.
+                                        */
+                                       vm_pages[last_idx].wire_count--;
+                                       vm_pages[last_idx].vm_page_q_state = VM_PAGE_NOT_ON_Q;
+                               }
+                       
+                       }
+
+                       last_idx = start_idx + contig_pages - 1;
+
+                       vm_pages[last_idx].snext = NULL;
+
+                       printf("Using preallocated buffer: Requested size (pages):%d... index range: %d-%d...freeing %llu pages\n", contig_pages, start_idx, last_idx, PREALLOCATED_CONTIG_BUFFER_PAGES_COUNT - contig_pages);
+
+                       last_idx += 1;
+                       for(npages = contig_pages; npages < PREALLOCATED_CONTIG_BUFFER_PAGES_COUNT; npages++, last_idx++) {
+
+                               VM_PAGE_ZERO_PAGEQ_ENTRY(&vm_pages[last_idx]);
+                               vm_page_free(&vm_pages[last_idx]);
+                       }
+
+                       cpm_preallocated_pages_list = NULL;
+                       preallocated_buffer_available = FALSE;
+
+                       goto done_scanning;
+               }
+       }
+#endif /* __arm64__ */
+
        lck_mtx_lock(&vm_page_queue_free_lock);
 
        RESET_STATE_OF_RUN();
@@ -3752,20 +5225,19 @@ retry:
                assert(!m->fictitious);
                assert(!m->private);
 
-               if (max_pnum && m->phys_page > max_pnum) {
+               if (max_pnum && VM_PAGE_GET_PHYS_PAGE(m) > max_pnum) {
                        /* no more low pages... */
                        break;
                }
-               if (!npages & ((m->phys_page & pnum_mask) != 0)) {
+               if (!npages & ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0)) {
                        /*
                         * not aligned
                         */
                        RESET_STATE_OF_RUN();
 
                } else if (VM_PAGE_WIRED(m) || m->gobbled ||
-                          m->encrypted || m->encrypted_cleaning || m->cs_validated || m->cs_tainted ||
-                          m->error || m->absent || m->pageout_queue || m->laundry || m->wanted || m->precious ||
-                          m->cleaning || m->overwriting || m->restart || m->unusual || m->pageout) {
+                          m->laundry || m->wanted ||
+                          m->cleaning || m->overwriting || m->free_when_done) {
                        /*
                         * page is in a transient state
                         * or a state we don't want to deal
@@ -3774,9 +5246,15 @@ retry:
                         */
                        RESET_STATE_OF_RUN();
 
-               } else if (!m->free && !m->active && !m->inactive && !m->speculative && !m->throttled) {
+               } else if ((m->vm_page_q_state == VM_PAGE_NOT_ON_Q) ||
+                          (m->vm_page_q_state == VM_PAGE_ON_FREE_LOCAL_Q) ||
+                          (m->vm_page_q_state == VM_PAGE_ON_FREE_LOPAGE_Q) ||
+                          (m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
                        /*
-                        * page needs to be on one of our queues
+                        * page needs to be on one of our queues (other then the pageout or special free queues)
+                        * or it needs to belong to the compressor pool (which is now indicated
+                        * by vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR and falls out
+                        * from the check for VM_PAGE_NOT_ON_Q)
                         * in order for it to be stable behind the
                         * locks we hold at this point...
                         * if not, don't consider it which
@@ -3784,7 +5262,7 @@ retry:
                         */
                        RESET_STATE_OF_RUN();
 
-               } else if (!m->free && (!m->tabled || m->busy)) {
+               } else if ((m->vm_page_q_state != VM_PAGE_ON_FREE_Q) && (!m->tabled || m->busy)) {
                        /*
                         * pages on the free list are always 'busy'
                         * so we couldn't test for 'busy' in the check
@@ -3799,22 +5277,22 @@ retry:
                        RESET_STATE_OF_RUN();
 
                } else {
-                       if (m->phys_page != prevcontaddr + 1) {
-                               if ((m->phys_page & pnum_mask) != 0) {
+                       if (VM_PAGE_GET_PHYS_PAGE(m) != prevcontaddr + 1) {
+                               if ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0) {
                                        RESET_STATE_OF_RUN();
                                        goto did_consider;
                                } else {
                                        npages = 1;
                                        start_idx = page_idx;
-                                       start_pnum = m->phys_page;
+                                       start_pnum = VM_PAGE_GET_PHYS_PAGE(m);
                                }
                        } else {
                                npages++;
                        }
-                       prevcontaddr = m->phys_page;
+                       prevcontaddr = VM_PAGE_GET_PHYS_PAGE(m);
                        
                        VM_PAGE_CHECK(m);
-                       if (m->free) {
+                       if (m->vm_page_q_state == VM_PAGE_ON_FREE_Q) {
                                free_considered++;
                        } else {
                                /*
@@ -3827,7 +5305,7 @@ retry:
                                 * into a substitute page.
                                 */
 #if VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
-                               if (m->pmapped || m->dirty) {
+                               if (m->pmapped || m->dirty || m->precious) {
                                        substitute_needed++;
                                }
 #else
@@ -3862,12 +5340,16 @@ retry:
                }
 did_consider:
                if (considered > MAX_CONSIDERED_BEFORE_YIELD && npages <= 1) {
-                       
+
+                       PAGE_REPLACEMENT_ALLOWED(FALSE);
+
                        lck_mtx_unlock(&vm_page_queue_free_lock);
                        vm_page_unlock_queues();
 
                        mutex_pause(0);
 
+                       PAGE_REPLACEMENT_ALLOWED(TRUE);
+
                        vm_page_lock_queues();
                        lck_mtx_lock(&vm_page_queue_free_lock);
 
@@ -3878,9 +5360,9 @@ did_consider:
                         */
                        free_available = vm_page_free_count - vm_page_free_reserved;
                        considered = 0;
-#if MACH_ASSERT
+
                        yielded++;
-#endif
+
                        goto retry;
                }
                considered++;
@@ -3931,42 +5413,36 @@ did_consider:
                        m1 = &vm_pages[start_idx++];
 
 #if !VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
-                       assert(m1->free);
+                       assert(m1->vm_page_q_state == VM_PAGE_ON_FREE_Q);
 #endif
 
-                       if (m1->free) {
+                       if (m1->vm_page_q_state == VM_PAGE_ON_FREE_Q) {
                                unsigned int color;
 
-                               color = m1->phys_page & vm_color_mask;
+                               color = VM_PAGE_GET_COLOR(m1);
 #if MACH_ASSERT
-                               vm_page_verify_free_list(&vm_page_queue_free[color], color, m1, TRUE);
+                               vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, m1, TRUE);
 #endif
-                               queue_remove(&vm_page_queue_free[color],
-                                            m1,
-                                            vm_page_t,
-                                            pageq);
-                               m1->pageq.next = NULL;
-                               m1->pageq.prev = NULL;
+                               vm_page_queue_remove(&vm_page_queue_free[color].qhead,
+                                                    m1,
+                                                    vm_page_t,
+                                                    pageq);
+
+                               VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
 #if MACH_ASSERT
-                               vm_page_verify_free_list(&vm_page_queue_free[color], color, VM_PAGE_NULL, FALSE);
+                               vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, VM_PAGE_NULL, FALSE);
 #endif
                                /*
                                 * Clear the "free" bit so that this page
                                 * does not get considered for another
                                 * concurrent physically-contiguous allocation.
                                 */
-                               m1->free = FALSE; 
+                               m1->vm_page_q_state = VM_PAGE_NOT_ON_Q;
                                assert(m1->busy);
 
                                vm_page_free_count--;
                        }
                }
-               /*
-                * adjust global freelist counts
-                */
-               if (vm_page_free_count < vm_page_free_count_minimum)
-                       vm_page_free_count_minimum = vm_page_free_count;
-
                if( flags & KMA_LOMEM)
                        vm_page_lomem_find_contiguous_last_idx = page_idx;
                else 
@@ -3991,23 +5467,27 @@ did_consider:
                         */
                        m1 = &vm_pages[cur_idx--];
 
-                       assert(!m1->free);
-                       if (m1->object == VM_OBJECT_NULL) {
+                       if (m1->vm_page_object == 0) {
                                /*
                                 * page has already been removed from
                                 * the free list in the 1st pass
                                 */
+                               assert(m1->vm_page_q_state == VM_PAGE_NOT_ON_Q);
                                assert(m1->offset == (vm_object_offset_t) -1);
                                assert(m1->busy);
                                assert(!m1->wanted);
                                assert(!m1->laundry);
                        } else {
                                vm_object_t object;
+                               int refmod;
+                               boolean_t disconnected, reusable;
 
                                if (abort_run == TRUE)
                                        continue;
 
-                               object = m1->object;
+                               assert(m1->vm_page_q_state != VM_PAGE_NOT_ON_Q);
+
+                               object = VM_PAGE_OBJECT(m1);
 
                                if (object != locked_object) {
                                        if (locked_object) {
@@ -4019,9 +5499,9 @@ did_consider:
                                }
                                if (locked_object == VM_OBJECT_NULL || 
                                    (VM_PAGE_WIRED(m1) || m1->gobbled ||
-                                    m1->encrypted || m1->encrypted_cleaning || m1->cs_validated || m1->cs_tainted ||
-                                    m1->error || m1->absent || m1->pageout_queue || m1->laundry || m1->wanted || m1->precious ||
-                                    m1->cleaning || m1->overwriting || m1->restart || m1->unusual || m1->busy)) {
+                                    m1->laundry || m1->wanted ||
+                                    m1->cleaning || m1->overwriting || m1->free_when_done || m1->busy) ||
+                                   (m1->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
 
                                        if (locked_object) {
                                                vm_object_unlock(locked_object);
@@ -4031,8 +5511,31 @@ did_consider:
                                        abort_run = TRUE;
                                        continue;
                                }
-                               if (m1->pmapped || m1->dirty) {
-                                       int refmod;
+
+                               disconnected = FALSE;
+                               reusable = FALSE;
+
+                               if ((m1->reusable ||
+                                    object->all_reusable) &&
+                                   (m1->vm_page_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q) &&
+                                   !m1->dirty &&
+                                   !m1->reference) {
+                                       /* reusable page... */
+                                       refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
+                                       disconnected = TRUE;
+                                       if (refmod == 0) {
+                                               /*
+                                                * ... not reused: can steal
+                                                * without relocating contents.
+                                                */
+                                               reusable = TRUE;
+                                       }
+                               }
+
+                               if ((m1->pmapped &&
+                                    ! reusable) ||
+                                   m1->dirty ||
+                                   m1->precious) {
                                        vm_object_offset_t offset;
 
                                        m2 = vm_page_grab();
@@ -4046,30 +5549,67 @@ did_consider:
                                                abort_run = TRUE;
                                                continue;
                                        }
-                                       if (m1->pmapped)
-                                               refmod = pmap_disconnect(m1->phys_page);
-                                       else
-                                               refmod = 0;
-                                       vm_page_copy(m1, m2);
-                 
-                                       m2->reference = m1->reference;
-                                       m2->dirty     = m1->dirty;
-
-                                       if (refmod & VM_MEM_REFERENCED)
-                                               m2->reference = TRUE;
-                                       if (refmod & VM_MEM_MODIFIED) {
-                                               SET_PAGE_DIRTY(m2, TRUE);
+                                       if (! disconnected) {
+                                               if (m1->pmapped)
+                                                       refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
+                                               else
+                                                       refmod = 0;
                                        }
-                                       offset = m1->offset;
+
+                                       /* copy the page's contents */
+                                       pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(m1), VM_PAGE_GET_PHYS_PAGE(m2));
+                                       /* copy the page's state */
+                                       assert(!VM_PAGE_WIRED(m1));
+                                       assert(m1->vm_page_q_state != VM_PAGE_ON_FREE_Q);
+                                       assert(m1->vm_page_q_state != VM_PAGE_ON_PAGEOUT_Q);
+                                       assert(!m1->laundry);
+                                       m2->reference   = m1->reference;
+                                       assert(!m1->gobbled);
+                                       assert(!m1->private);
+                                       m2->no_cache    = m1->no_cache;
+                                       m2->xpmapped    = 0;
+                                       assert(!m1->busy);
+                                       assert(!m1->wanted);
+                                       assert(!m1->fictitious);
+                                       m2->pmapped     = m1->pmapped; /* should flush cache ? */
+                                       m2->wpmapped    = m1->wpmapped;
+                                       assert(!m1->free_when_done);
+                                       m2->absent      = m1->absent;
+                                       m2->error       = m1->error;
+                                       m2->dirty       = m1->dirty;
+                                       assert(!m1->cleaning);
+                                       m2->precious    = m1->precious;
+                                       m2->clustered   = m1->clustered;
+                                       assert(!m1->overwriting);
+                                       m2->restart     = m1->restart;
+                                       m2->unusual     = m1->unusual;
+                                       m2->cs_validated = m1->cs_validated;
+                                       m2->cs_tainted  = m1->cs_tainted;
+                                       m2->cs_nx       = m1->cs_nx;
 
                                        /*
-                                        * completely cleans up the state
-                                        * of the page so that it is ready
-                                        * to be put onto the free list, or
-                                        * for this purpose it looks like it
-                                        * just came off of the free list
+                                        * If m1 had really been reusable,
+                                        * we would have just stolen it, so
+                                        * let's not propagate it's "reusable"
+                                        * bit and assert that m2 is not
+                                        * marked as "reusable".
                                         */
-                                       vm_page_free_prepare(m1);
+                                       // m2->reusable = m1->reusable;
+                                       assert(!m2->reusable);
+
+                                       // assert(!m1->lopage);
+                                       m2->slid        = m1->slid;
+
+                                       if (m1->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR)
+                                               m2->vm_page_q_state = VM_PAGE_USED_BY_COMPRESSOR;
+
+                                       /*
+                                        * page may need to be flushed if
+                                        * it is marshalled into a UPL
+                                        * that is going to be used by a device
+                                        * that doesn't support coherency
+                                        */
+                                       m2->written_by_kernel = TRUE;
 
                                        /*
                                         * make sure we clear the ref/mod state
@@ -4077,20 +5617,52 @@ did_consider:
                                         * inheriting state from the last time
                                         * this page was used...
                                         */
-                                       pmap_clear_refmod(m2->phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
+                                       pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m2), VM_MEM_MODIFIED | VM_MEM_REFERENCED);
+
+                                       if (refmod & VM_MEM_REFERENCED)
+                                               m2->reference = TRUE;
+                                       if (refmod & VM_MEM_MODIFIED) {
+                                               SET_PAGE_DIRTY(m2, TRUE);
+                                       }
+                                       offset = m1->offset;
+
+                                       /*
+                                        * completely cleans up the state
+                                        * of the page so that it is ready
+                                        * to be put onto the free list, or
+                                        * for this purpose it looks like it
+                                        * just came off of the free list
+                                        */
+                                       vm_page_free_prepare(m1);
+
                                        /*
-                                        * now put the substitute page on the object
+                                        * now put the substitute page
+                                        * on the object
                                         */
-                                       vm_page_insert_internal(m2, locked_object, offset, TRUE, TRUE, FALSE);
+                                       vm_page_insert_internal(m2, locked_object, offset, VM_KERN_MEMORY_NONE, TRUE, TRUE, FALSE, FALSE, NULL);
 
-                                       if (m2->reference)
-                                               vm_page_activate(m2);
-                                       else
-                                               vm_page_deactivate(m2);
+                                       if (m2->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
+                                               m2->pmapped = TRUE;
+                                               m2->wpmapped = TRUE;
 
+                                               PMAP_ENTER(kernel_pmap, m2->offset, m2,
+                                                          VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE, kr);
+
+                                               assert(kr == KERN_SUCCESS);
+
+                                               compressed_pages++;
+
+                                       } else {
+                                               if (m2->reference)
+                                                       vm_page_activate(m2);
+                                               else
+                                                       vm_page_deactivate(m2);
+                                       }
                                        PAGE_WAKEUP_DONE(m2);
 
                                } else {
+                                       assert(m1->vm_page_q_state != VM_PAGE_USED_BY_COMPRESSOR);
+
                                        /*
                                         * completely cleans up the state
                                         * of the page so that it is ready
@@ -4100,12 +5672,15 @@ did_consider:
                                         */
                                        vm_page_free_prepare(m1);
                                }
-#if MACH_ASSERT
+
                                stolen_pages++;
-#endif
+
                        }
-                       m1->pageq.next = (queue_entry_t) m;
-                       m1->pageq.prev = NULL;
+#if CONFIG_BACKGROUND_QUEUE
+                       vm_page_assign_background_state(m1);
+#endif
+                       VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
+                       m1->snext = m;
                        m = m1;
                }
                if (locked_object) {
@@ -4114,12 +5689,6 @@ did_consider:
                }
 
                if (abort_run == TRUE) {
-                       if (m != VM_PAGE_NULL) {
-                               vm_page_free_list(m, FALSE);
-                       }
-#if MACH_ASSERT
-                       dumped_run++;
-#endif
                        /*
                         * want the index of the last
                         * page in this run that was
@@ -4129,8 +5698,16 @@ did_consider:
                         */
                        page_idx = tmp_start_idx + 2;
                        if (page_idx >= vm_pages_count) {
-                               if (wrapped)
+                               if (wrapped) {
+                                       if (m != VM_PAGE_NULL) {
+                                               vm_page_unlock_queues();
+                                               vm_page_free_list(m, FALSE);
+                                               vm_page_lock_queues();
+                                               m = VM_PAGE_NULL;
+                                       }
+                                       dumped_run++;
                                        goto done_scanning;
+                               }
                                page_idx = last_idx = 0;
                                wrapped = TRUE;
                        }
@@ -4150,6 +5727,14 @@ did_consider:
                        
                        last_idx = page_idx;
                        
+                       if (m != VM_PAGE_NULL) {
+                               vm_page_unlock_queues();
+                               vm_page_free_list(m, FALSE);
+                               vm_page_lock_queues();
+                               m = VM_PAGE_NULL;
+                       }
+                       dumped_run++;
+
                        lck_mtx_lock(&vm_page_queue_free_lock);
                        /*
                        * reset our free page limit since we
@@ -4161,9 +5746,13 @@ did_consider:
 
                for (m1 = m; m1 != VM_PAGE_NULL; m1 = NEXT_PAGE(m1)) {
 
-                       if (wire == TRUE)
+                       assert(m1->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+                       assert(m1->wire_count == 0);
+
+                       if (wire == TRUE) {
                                m1->wire_count++;
-                       else
+                               m1->vm_page_q_state = VM_PAGE_IS_WIRED;
+                       } else
                                m1->gobbled = TRUE;
                }
                if (wire == FALSE)
@@ -4177,6 +5766,8 @@ did_consider:
                assert(vm_page_verify_contiguous(m, npages));
        }
 done_scanning:
+       PAGE_REPLACEMENT_ALLOWED(FALSE);
+
        vm_page_unlock_queues();
 
 #if DEBUG
@@ -4193,16 +5784,33 @@ done_scanning:
                tv_end_sec -= 1000000;
        }
        if (vm_page_find_contig_debug) {
-               printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds...  started at %d... scanned %d pages...  yielded %d times...  dumped run %d times... stole %d pages\n",
-              __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
-              (long)tv_end_sec, tv_end_usec, orig_last_idx,
-              scanned, yielded, dumped_run, stolen_pages);
+               printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds...  started at %d...  scanned %d pages...  yielded %d times...  dumped run %d times... stole %d pages... stole %d compressed pages\n",
+                      __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
+                      (long)tv_end_sec, tv_end_usec, orig_last_idx,
+                      scanned, yielded, dumped_run, stolen_pages, compressed_pages);
        }
 
 #endif
 #if MACH_ASSERT
        vm_page_verify_free_lists();
 #endif
+       if (m == NULL && zone_gc_called == FALSE) {
+               printf("%s(num=%d,low=%d): found %d pages at 0x%llx...scanned %d pages...  yielded %d times...  dumped run %d times... stole %d pages... stole %d compressed pages... wired count is %d\n",
+                      __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
+                      scanned, yielded, dumped_run, stolen_pages, compressed_pages, vm_page_wire_count);
+
+               if (consider_buffer_cache_collect != NULL) {
+                       (void)(*consider_buffer_cache_collect)(1);
+               }
+
+               consider_zone_gc(FALSE);
+
+               zone_gc_called = TRUE;
+
+               printf("vm_page_find_contiguous: zone_gc called... wired count is %d\n", vm_page_wire_count);
+               goto full_scan_again;
+       }
+
        return m;
 }
 
@@ -4284,6 +5892,7 @@ unsigned int vm_max_delayed_work_limit = DEFAULT_DELAYED_WORK_LIMIT;
 void
 vm_page_do_delayed_work(
        vm_object_t     object,
+       vm_tag_t        tag,
        struct vm_page_delayed_work *dwp,
        int             dw_count)
 {
@@ -4324,31 +5933,34 @@ vm_page_do_delayed_work(
 
                if (dwp->dw_mask & DW_vm_pageout_throttle_up)
                        vm_pageout_throttle_up(m);
-
+#if CONFIG_PHANTOM_CACHE
+               if (dwp->dw_mask & DW_vm_phantom_cache_update)
+                       vm_phantom_cache_update(m);
+#endif
                if (dwp->dw_mask & DW_vm_page_wire)
-                       vm_page_wire(m);
+                       vm_page_wire(m, tag, FALSE);
                else if (dwp->dw_mask & DW_vm_page_unwire) {
                        boolean_t       queueit;
 
-                       queueit = (dwp->dw_mask & DW_vm_page_free) ? FALSE : TRUE;
+                       queueit = (dwp->dw_mask & (DW_vm_page_free | DW_vm_page_deactivate_internal)) ? FALSE : TRUE;
 
                        vm_page_unwire(m, queueit);
                }
                if (dwp->dw_mask & DW_vm_page_free) {
                        vm_page_free_prepare_queues(m);
 
-                       assert(m->pageq.next == NULL && m->pageq.prev == NULL);
+                       assert(m->pageq.next == 0 && m->pageq.prev == 0);
                        /*
                         * Add this page to our list of reclaimed pages,
                         * to be freed later.
                         */
-                       m->pageq.next = (queue_entry_t) local_free_q;
+                       m->snext = local_free_q;
                        local_free_q = m;
                } else {
                        if (dwp->dw_mask & DW_vm_page_deactivate_internal)
                                vm_page_deactivate_internal(m, FALSE);
                        else if (dwp->dw_mask & DW_vm_page_activate) {
-                               if (m->active == FALSE) {
+                               if (m->vm_page_q_state != VM_PAGE_ON_ACTIVE_Q) {
                                        vm_page_activate(m);
                                }
                        }
@@ -4362,7 +5974,7 @@ vm_page_do_delayed_work(
                                 * cleaned queue, and so we would have a referenced (maybe even dirty)
                                 * page on that queue, which we don't want
                                 */
-                               int refmod_state = pmap_disconnect(m->phys_page);
+                               int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
 
                                if ((refmod_state & VM_MEM_REFERENCED)) {
                                        /*
@@ -4373,7 +5985,7 @@ vm_page_do_delayed_work(
                                        vm_pageout_cleaned_reactivated++;
                                        vm_pageout_cleaned_commit_reactivated++;
 
-                                       if (m->active == FALSE)
+                                       if (m->vm_page_q_state != VM_PAGE_ON_ACTIVE_Q)
                                                vm_page_activate(m);
                                } else {
                                        m->reference = FALSE;
@@ -4383,8 +5995,8 @@ vm_page_do_delayed_work(
                        else if (dwp->dw_mask & DW_vm_page_lru)
                                vm_page_lru(m);
                        else if (dwp->dw_mask & DW_VM_PAGE_QUEUES_REMOVE) {
-                               if ( !m->pageout_queue)
-                                       VM_PAGE_QUEUES_REMOVE(m);
+                               if (m->vm_page_q_state != VM_PAGE_ON_PAGEOUT_Q)
+                                       vm_page_queues_remove(m, TRUE);
                        }
                        if (dwp->dw_mask & DW_set_reference)
                                m->reference = TRUE;
@@ -4392,12 +6004,12 @@ vm_page_do_delayed_work(
                                m->reference = FALSE;
 
                        if (dwp->dw_mask & DW_move_page) {
-                               if ( !m->pageout_queue) {
-                                       VM_PAGE_QUEUES_REMOVE(m);
+                               if (m->vm_page_q_state != VM_PAGE_ON_PAGEOUT_Q) {
+                                       vm_page_queues_remove(m, FALSE);
 
-                                       assert(m->object != kernel_object);
+                                       assert(VM_PAGE_OBJECT(m) != kernel_object);
 
-                                       VM_PAGE_ENQUEUE_INACTIVE(m, FALSE);
+                                       vm_page_enqueue_inactive(m, FALSE);
                                }
                        }
                        if (dwp->dw_mask & DW_clear_busy)
@@ -4441,7 +6053,7 @@ vm_page_alloc_list(
 
                        return (KERN_RESOURCE_SHORTAGE);
                }
-               mem->pageq.next = (queue_entry_t) lo_page_list;
+               mem->snext = lo_page_list;
                lo_page_list = mem;
        }
        *list = lo_page_list;
@@ -4458,7 +6070,7 @@ vm_page_set_offset(vm_page_t page, vm_object_offset_t offset)
 vm_page_t
 vm_page_get_next(vm_page_t page)
 {
-       return ((vm_page_t) page->pageq.next);
+       return (page->snext);
 }
 
 vm_object_offset_t
@@ -4470,7 +6082,7 @@ vm_page_get_offset(vm_page_t page)
 ppnum_t
 vm_page_get_phys_page(vm_page_t page)
 {
-       return (page->phys_page);
+       return (VM_PAGE_GET_PHYS_PAGE(page));
 }
        
        
@@ -4480,16 +6092,20 @@ vm_page_get_phys_page(vm_page_t page)
 
 static vm_page_t hibernate_gobble_queue;
 
-extern boolean_t (* volatile consider_buffer_cache_collect)(int);
-
 static int  hibernate_drain_pageout_queue(struct vm_pageout_queue *);
-static int  hibernate_flush_dirty_pages(void);
-static int  hibernate_flush_queue(queue_head_t *, int);
+static int  hibernate_flush_dirty_pages(int);
+static int  hibernate_flush_queue(vm_page_queue_head_t *, int);
 
 void hibernate_flush_wait(void);
 void hibernate_mark_in_progress(void);
 void hibernate_clear_in_progress(void);
 
+void           hibernate_free_range(int, int);
+void           hibernate_hash_insert_page(vm_page_t);
+uint32_t       hibernate_mark_as_unneeded(addr64_t, addr64_t, hibernate_page_list_t *, hibernate_page_list_t *);
+void           hibernate_rebuild_vm_structs(void);
+uint32_t       hibernate_teardown_vm_structs(hibernate_page_list_t *, hibernate_page_list_t *);
+ppnum_t                hibernate_lookup_paddr(unsigned int);
 
 struct hibernate_statistics {
        int hibernate_considered;
@@ -4498,6 +6114,7 @@ struct hibernate_statistics {
        int hibernate_skipped_cleaning;
        int hibernate_skipped_transient;
        int hibernate_skipped_precious;
+       int hibernate_skipped_external;
        int hibernate_queue_nolock;
        int hibernate_queue_paused;
        int hibernate_throttled;
@@ -4512,15 +6129,25 @@ struct hibernate_statistics {
        int cd_found_cleaning;
        int cd_found_laundry;
        int cd_found_dirty;
+       int cd_found_xpmapped;
+       int cd_skipped_xpmapped;
        int cd_local_free;
        int cd_total_free;
        int cd_vm_page_wire_count;
+       int cd_vm_struct_pages_unneeded;
        int cd_pages;
        int cd_discarded;
        int cd_count_wire;
 } hibernate_stats;
 
 
+/*
+ * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
+ * so that we don't overrun the estimated image size, which would
+ * result in a hibernation failure.
+ */
+#define        HIBERNATE_XPMAPPED_LIMIT        40000
+
 
 static int
 hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
@@ -4529,7 +6156,7 @@ hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
 
        vm_page_lock_queues();
 
-       while (q->pgo_laundry) {
+       while ( !vm_page_queue_empty(&q->pgo_pending) ) {
 
                q->pgo_draining = TRUE;
 
@@ -4539,8 +6166,12 @@ hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
 
                wait_result = thread_block(THREAD_CONTINUE_NULL);
 
-               if (wait_result == THREAD_TIMED_OUT) {
+               if (wait_result == THREAD_TIMED_OUT && !vm_page_queue_empty(&q->pgo_pending)) {
                        hibernate_stats.hibernate_drain_timeout++;
+                       
+                       if (q == &vm_pageout_queue_external)
+                               return (0);
+                       
                        return (1);
                }
                vm_page_lock_queues();
@@ -4553,8 +6184,10 @@ hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
 }
 
 
+boolean_t hibernate_skip_external = FALSE;
+
 static int
-hibernate_flush_queue(queue_head_t *q, int qcount)
+hibernate_flush_queue(vm_page_queue_head_t *q, int qcount)
 {
        vm_page_t       m;
        vm_object_t     l_object = NULL;
@@ -4567,16 +6200,15 @@ hibernate_flush_queue(queue_head_t *q, int qcount)
        struct  vm_pageout_queue *eq;
        struct  vm_pageout_queue *tq;
 
-       hibernate_cleaning_in_progress = TRUE;
+       KDBG(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_START,
+                       VM_KERNEL_UNSLIDE_OR_PERM(q), qcount);
 
-       KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_START, q, qcount, 0, 0, 0);
-       
        iq = &vm_pageout_queue_internal;
        eq = &vm_pageout_queue_external;
 
        vm_page_lock_queues();
 
-       while (qcount && !queue_empty(q)) {
+       while (qcount && !vm_page_queue_empty(q)) {
 
                if (current_run++ == 1000) {
                        if (hibernate_should_abort()) {
@@ -4586,8 +6218,8 @@ hibernate_flush_queue(queue_head_t *q, int qcount)
                        current_run = 0;
                }
 
-               m = (vm_page_t) queue_first(q);
-               m_object = m->object;
+               m = (vm_page_t) vm_page_queue_first(q);
+               m_object = VM_PAGE_OBJECT(m);
 
                /*
                 * check to see if we currently are working
@@ -4618,7 +6250,6 @@ hibernate_flush_queue(queue_head_t *q, int qcount)
 
                                        goto reenter_pg_on_q;
                                }
-                               vm_pageout_scan_wants_object = m_object;
 
                                vm_page_unlock_queues();
                                mutex_pause(try_failed_count++);
@@ -4628,10 +6259,9 @@ hibernate_flush_queue(queue_head_t *q, int qcount)
                                continue;
                        } else {
                                l_object = m_object;
-                               vm_pageout_scan_wants_object = VM_OBJECT_NULL;
                        }
                }
-               if ( !m_object->alive || m->encrypted_cleaning || m->cleaning || m->laundry || m->busy || m->absent || m->error) {
+               if ( !m_object->alive || m->cleaning || m->laundry || m->busy || m->absent || m->error) {
                        /*
                         * page is not to be cleaned
                         * put it back on the head of its queue
@@ -4643,9 +6273,6 @@ hibernate_flush_queue(queue_head_t *q, int qcount)
 
                        goto reenter_pg_on_q;
                }
-               if ( !m_object->pager_initialized && m_object->pager_created)
-                       goto reenter_pg_on_q;
-                       
                if (m_object->copy == VM_OBJECT_NULL) {
                        if (m_object->purgable == VM_PURGABLE_VOLATILE || m_object->purgable == VM_PURGABLE_EMPTY) {
                                /*
@@ -4656,7 +6283,7 @@ hibernate_flush_queue(queue_head_t *q, int qcount)
                        }
                }
                if ( !m->dirty && m->pmapped) {
-                       refmod_state = pmap_get_refmod(m->phys_page);
+                       refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
 
                        if ((refmod_state & VM_MEM_MODIFIED)) {
                                SET_PAGE_DIRTY(m, FALSE);
@@ -4674,6 +6301,13 @@ hibernate_flush_queue(queue_head_t *q, int qcount)
 
                        goto reenter_pg_on_q;
                }
+
+               if (hibernate_skip_external == TRUE && !m_object->internal) {
+
+                       hibernate_stats.hibernate_skipped_external++;
+                       
+                       goto reenter_pg_on_q;
+               }
                tq = NULL;
 
                if (m_object->internal) {
@@ -4690,12 +6324,11 @@ hibernate_flush_queue(queue_head_t *q, int qcount)
                                vm_object_unlock(l_object);
                                l_object = NULL;
                        }
-                       vm_pageout_scan_wants_object = VM_OBJECT_NULL;
-
-                       tq->pgo_throttled = TRUE;
 
                        while (retval == 0) {
 
+                               tq->pgo_throttled = TRUE;
+
                                assert_wait_timeout((event_t) &tq->pgo_laundry, THREAD_INTERRUPTIBLE, 1000, 1000*NSEC_PER_USEC);
 
                                vm_page_unlock_queues();
@@ -4704,14 +6337,22 @@ hibernate_flush_queue(queue_head_t *q, int qcount)
 
                                vm_page_lock_queues();
 
+                               if (wait_result != THREAD_TIMED_OUT)
+                                       break;
+                                if (!VM_PAGE_Q_THROTTLED(tq))
+                                        break;
+
                                if (hibernate_should_abort())
                                        retval = 1;
 
-                               if (wait_result != THREAD_TIMED_OUT)
-                                       break;
-                               
                                if (--wait_count == 0) {
+
                                        hibernate_stats.hibernate_throttle_timeout++;
+
+                                       if (tq == eq) {
+                                               hibernate_skip_external = TRUE;
+                                               break;
+                                       }
                                        retval = 1;
                                }
                        }
@@ -4725,21 +6366,22 @@ hibernate_flush_queue(queue_head_t *q, int qcount)
                /*
                 * we've already factored out pages in the laundry which
                 * means this page can't be on the pageout queue so it's
-                * safe to do the VM_PAGE_QUEUES_REMOVE
+                * safe to do the vm_page_queues_remove
                 */
-                assert(!m->pageout_queue);
+               vm_page_queues_remove(m, TRUE);
 
-               VM_PAGE_QUEUES_REMOVE(m);
+               if (m_object->internal == TRUE)
+                       pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m), PMAP_OPTIONS_COMPRESSOR, NULL);
 
-               vm_pageout_cluster(m, FALSE);
+               vm_pageout_cluster(m);
 
                hibernate_stats.hibernate_found_dirty++;
 
                goto next_pg;
 
 reenter_pg_on_q:
-               queue_remove(q, m, vm_page_t, pageq);
-               queue_enter(q, m, vm_page_t, pageq);
+               vm_page_queue_remove(q, m, vm_page_t, pageq);
+               vm_page_queue_enter(q, m, vm_page_t, pageq);
 
                hibernate_stats.hibernate_reentered_on_q++;
 next_pg:
@@ -4752,26 +6394,21 @@ next_pg:
                vm_object_unlock(l_object);
                l_object = NULL;
        }
-       vm_pageout_scan_wants_object = VM_OBJECT_NULL;
 
        vm_page_unlock_queues();
 
        KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_END, hibernate_stats.hibernate_found_dirty, retval, 0, 0, 0);
 
-       hibernate_cleaning_in_progress = FALSE;
-
        return (retval);
 }
 
 
 static int
-hibernate_flush_dirty_pages()
+hibernate_flush_dirty_pages(int pass)
 {
        struct vm_speculative_age_q     *aq;
        uint32_t        i;
 
-       bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
-
        if (vm_page_local_q) {
                for (i = 0; i < vm_page_local_q_count; i++)
                        vm_page_reactivate_local(i, TRUE, FALSE);
@@ -4783,13 +6420,13 @@ hibernate_flush_dirty_pages()
 
                aq = &vm_page_queue_speculative[i];
 
-               if (queue_empty(&aq->age_q))
+               if (vm_page_queue_empty(&aq->age_q))
                        continue;
                qcount = 0;
 
                vm_page_lockspin_queues();
 
-               queue_iterate(&aq->age_q,
+               vm_page_queue_iterate(&aq->age_q,
                              m,
                              vm_page_t,
                              pageq)
@@ -4803,57 +6440,97 @@ hibernate_flush_dirty_pages()
                                return (1);
                }
        }
-       if (hibernate_flush_queue(&vm_page_queue_active, vm_page_active_count))
-               return (1);
        if (hibernate_flush_queue(&vm_page_queue_inactive, vm_page_inactive_count - vm_page_anonymous_count - vm_page_cleaned_count))
                return (1);
+       /* XXX FBDP TODO: flush secluded queue */
        if (hibernate_flush_queue(&vm_page_queue_anonymous, vm_page_anonymous_count))
                return (1);
        if (hibernate_flush_queue(&vm_page_queue_cleaned, vm_page_cleaned_count))
                return (1);
-
        if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal))
                return (1);
-       return (hibernate_drain_pageout_queue(&vm_pageout_queue_external));
+
+       if (pass == 1)
+               vm_compressor_record_warmup_start();
+
+       if (hibernate_flush_queue(&vm_page_queue_active, vm_page_active_count)) {
+               if (pass == 1)
+                       vm_compressor_record_warmup_end();
+               return (1);
+       }
+       if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
+               if (pass == 1)
+                       vm_compressor_record_warmup_end();
+               return (1);
+       }
+       if (pass == 1)
+               vm_compressor_record_warmup_end();
+
+       if (hibernate_skip_external == FALSE && hibernate_drain_pageout_queue(&vm_pageout_queue_external))
+               return (1);
+
+       return (0);
 }
 
 
-extern void IOSleep(unsigned int);
-extern int sync_internal(void);
+void
+hibernate_reset_stats()
+{
+       bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
+}
+
 
 int
 hibernate_flush_memory()
 {
        int     retval;
 
+       assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
+
        KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_START, vm_page_free_count, 0, 0, 0, 0);
 
-       IOSleep(2 * 1000);
+       hibernate_cleaning_in_progress = TRUE;
+       hibernate_skip_external = FALSE;
+
+       if ((retval = hibernate_flush_dirty_pages(1)) == 0) {
+
+               KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_START, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
 
-       KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_NONE, vm_page_free_count, 0, 0, 0, 0);
+               vm_compressor_flush();
+
+               KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_END, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
 
-       if ((retval = hibernate_flush_dirty_pages()) == 0) {
                if (consider_buffer_cache_collect != NULL) {
+                       unsigned int orig_wire_count;
 
-                       KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, vm_page_wire_count, 0, 0, 0, 0);
+                       KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, 0, 0, 0, 0, 0);
+                       orig_wire_count = vm_page_wire_count;
                        
-                       sync_internal();
                        (void)(*consider_buffer_cache_collect)(1);
-                       consider_zone_gc(TRUE);
+                       consider_zone_gc(FALSE);
+
+                       HIBLOG("hibernate_flush_memory: buffer_cache_gc freed up %d wired pages\n", orig_wire_count - vm_page_wire_count);
 
-                       KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_END, vm_page_wire_count, 0, 0, 0, 0);
+                       KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_END, orig_wire_count - vm_page_wire_count, 0, 0, 0, 0);
                }
        }
+       hibernate_cleaning_in_progress = FALSE;
+
        KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_END, vm_page_free_count, hibernate_stats.hibernate_found_dirty, retval, 0, 0);
 
+       if (retval)
+               HIBLOG("hibernate_flush_memory() failed to finish - vm_page_compressor_count(%d)\n", VM_PAGE_COMPRESSOR_COUNT);
+
+
     HIBPRINT("hibernate_flush_memory() considered(%d) reentered_on_q(%d) found_dirty(%d)\n",
                 hibernate_stats.hibernate_considered,
                 hibernate_stats.hibernate_reentered_on_q,
                 hibernate_stats.hibernate_found_dirty);
-    HIBPRINT("   skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) queue_nolock(%d)\n",
+    HIBPRINT("   skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) skipped_external(%d) queue_nolock(%d)\n",
                 hibernate_stats.hibernate_skipped_cleaning,
                 hibernate_stats.hibernate_skipped_transient,
                 hibernate_stats.hibernate_skipped_precious,
+                hibernate_stats.hibernate_skipped_external,
                 hibernate_stats.hibernate_queue_nolock);
     HIBPRINT("   queue_paused(%d) throttled(%d) throttle_timeout(%d) drained(%d) drain_timeout(%d)\n",
                 hibernate_stats.hibernate_queue_paused,
@@ -4887,38 +6564,6 @@ hibernate_page_list_zero(hibernate_page_list_t *list)
     }
 }
 
-void
-hibernate_gobble_pages(uint32_t gobble_count, uint32_t free_page_time)
-{
-    uint32_t i;
-    vm_page_t m;
-    uint64_t start, end, timeout, nsec;
-    clock_interval_to_deadline(free_page_time, 1000 * 1000 /*ms*/, &timeout);
-    clock_get_uptime(&start);
-
-    for (i = 0; i < gobble_count; i++)
-    {
-       while (VM_PAGE_NULL == (m = vm_page_grab()))
-       {
-           clock_get_uptime(&end);
-           if (end >= timeout)
-               break;
-           VM_PAGE_WAIT();
-       }
-       if (!m)
-           break;
-       m->busy = FALSE;
-       vm_page_gobble(m);
-
-       m->pageq.next = (queue_entry_t) hibernate_gobble_queue;
-       hibernate_gobble_queue = m;
-    }
-
-    clock_get_uptime(&end);
-    absolutetime_to_nanoseconds(end - start, &nsec);
-    HIBLOG("Gobbled %d pages, time: %qd ms\n", i, nsec / 1000000ULL);
-}
-
 void
 hibernate_free_gobble_pages(void)
 {
@@ -4928,7 +6573,7 @@ hibernate_free_gobble_pages(void)
     m = (vm_page_t) hibernate_gobble_queue;
     while(m)
     {
-        next = (vm_page_t) m->pageq.next;
+        next = m->snext;
         vm_page_free(m);
         count++;
         m = next;
@@ -4940,7 +6585,7 @@ hibernate_free_gobble_pages(void)
 }
 
 static boolean_t 
-hibernate_consider_discard(vm_page_t m)
+hibernate_consider_discard(vm_page_t m, boolean_t preflight)
 {
     vm_object_t object = NULL;
     int                  refmod_state;
@@ -4951,45 +6596,46 @@ hibernate_consider_discard(vm_page_t m)
         if (m->private)
             panic("hibernate_consider_discard: private");
 
-        if (!vm_object_lock_try(m->object)) {
-           hibernate_stats.cd_lock_failed++;
+       object = VM_PAGE_OBJECT(m);
+
+        if (!vm_object_lock_try(object)) {
+           object = NULL;
+           if (!preflight) hibernate_stats.cd_lock_failed++;
             break;
        }
-        object = m->object;
-
        if (VM_PAGE_WIRED(m)) {
-           hibernate_stats.cd_found_wired++;
+           if (!preflight) hibernate_stats.cd_found_wired++;
             break;
        }
         if (m->precious) {
-           hibernate_stats.cd_found_precious++;
+           if (!preflight) hibernate_stats.cd_found_precious++;
             break;
        }
         if (m->busy || !object->alive) {
            /*
             *  Somebody is playing with this page.
             */
-           hibernate_stats.cd_found_busy++;
+           if (!preflight) hibernate_stats.cd_found_busy++;
             break;
        }
         if (m->absent || m->unusual || m->error) {
            /*
             * If it's unusual in anyway, ignore it
             */
-           hibernate_stats.cd_found_unusual++;
+           if (!preflight) hibernate_stats.cd_found_unusual++;
             break;
        }
         if (m->cleaning) {
-           hibernate_stats.cd_found_cleaning++;
+           if (!preflight) hibernate_stats.cd_found_cleaning++;
             break;
        }
        if (m->laundry) {
-           hibernate_stats.cd_found_laundry++;
+           if (!preflight) hibernate_stats.cd_found_laundry++;
             break;
        }
         if (!m->dirty)
         {
-            refmod_state = pmap_get_refmod(m->phys_page);
+               refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
         
             if (refmod_state & VM_MEM_REFERENCED)
                 m->reference = TRUE;
@@ -5005,8 +6651,20 @@ hibernate_consider_discard(vm_page_t m)
                    || (VM_PURGABLE_VOLATILE == object->purgable)
                    || (VM_PURGABLE_EMPTY    == object->purgable);
 
-       if (discard == FALSE)
-           hibernate_stats.cd_found_dirty++;
+
+        if (discard == FALSE) {
+               if (!preflight)
+                       hibernate_stats.cd_found_dirty++;
+        } else if (m->xpmapped && m->reference && !object->internal) {
+               if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) {
+                       if (!preflight)
+                               hibernate_stats.cd_found_xpmapped++;
+                       discard = FALSE;
+               } else {
+                       if (!preflight)
+                               hibernate_stats.cd_skipped_xpmapped++;
+               }
+        }
     }
     while (FALSE);
 
@@ -5020,24 +6678,27 @@ hibernate_consider_discard(vm_page_t m)
 static void
 hibernate_discard_page(vm_page_t m)
 {
+    vm_object_t        m_object;
+
     if (m->absent || m->unusual || m->error)
        /*
         * If it's unusual in anyway, ignore
         */
         return;
 
-#if DEBUG
-    vm_object_t object = m->object;
-    if (!vm_object_lock_try(m->object))
+    m_object = VM_PAGE_OBJECT(m);
+
+#if MACH_ASSERT || DEBUG
+    if (!vm_object_lock_try(m_object))
        panic("hibernate_discard_page(%p) !vm_object_lock_try", m);
 #else
     /* No need to lock page queue for token delete, hibernate_vm_unlock() 
        makes sure these locks are uncontended before sleep */
-#endif /* !DEBUG */
+#endif /* MACH_ASSERT || DEBUG */
 
     if (m->pmapped == TRUE) 
     {
-        __unused int refmod_state = pmap_disconnect(m->phys_page);
+       __unused int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
     }
 
     if (m->laundry)
@@ -5047,21 +6708,75 @@ hibernate_discard_page(vm_page_t m)
     if (m->fictitious)
         panic("hibernate_discard_page(%p) fictitious", m);
 
-    if (VM_PURGABLE_VOLATILE == m->object->purgable)
+    if (VM_PURGABLE_VOLATILE == m_object->purgable)
     {
        /* object should be on a queue */
-        assert((m->object->objq.next != NULL) && (m->object->objq.prev != NULL));
-        purgeable_q_t old_queue = vm_purgeable_object_remove(m->object);
+        assert((m_object->objq.next != NULL) && (m_object->objq.prev != NULL));
+        purgeable_q_t old_queue = vm_purgeable_object_remove(m_object);
         assert(old_queue);
-        vm_purgeable_token_delete_first(old_queue);
-        m->object->purgable = VM_PURGABLE_EMPTY;
+       if (m_object->purgeable_when_ripe) {
+               vm_purgeable_token_delete_first(old_queue);
+       }
+       vm_object_lock_assert_exclusive(m_object);
+        m_object->purgable = VM_PURGABLE_EMPTY;
+
+       /*
+        * Purgeable ledgers:  pages of VOLATILE and EMPTY objects are
+        * accounted in the "volatile" ledger, so no change here.
+        * We have to update vm_page_purgeable_count, though, since we're
+        * effectively purging this object.
+        */
+       unsigned int delta;
+       assert(m_object->resident_page_count >= m_object->wired_page_count);
+       delta = (m_object->resident_page_count - m_object->wired_page_count);
+       assert(vm_page_purgeable_count >= delta);
+       assert(delta > 0);
+       OSAddAtomic(-delta, (SInt32 *)&vm_page_purgeable_count);
     }
        
     vm_page_free(m);
 
-#if DEBUG
-    vm_object_unlock(object);
-#endif /* DEBUG */
+#if MACH_ASSERT || DEBUG
+    vm_object_unlock(m_object);
+#endif /* MACH_ASSERT || DEBUG */
+}
+
+/*
+ Grab locks for hibernate_page_list_setall()
+*/
+void
+hibernate_vm_lock_queues(void)
+{
+    vm_object_lock(compressor_object);
+    vm_page_lock_queues();
+    lck_mtx_lock(&vm_page_queue_free_lock);
+    lck_mtx_lock(&vm_purgeable_queue_lock);
+
+    if (vm_page_local_q) {
+       uint32_t  i;
+       for (i = 0; i < vm_page_local_q_count; i++) {
+           struct vpl  *lq;
+           lq = &vm_page_local_q[i].vpl_un.vpl;
+           VPL_LOCK(&lq->vpl_lock);
+       }
+    }
+}
+
+void
+hibernate_vm_unlock_queues(void)
+{
+    if (vm_page_local_q) {
+       uint32_t  i;
+       for (i = 0; i < vm_page_local_q_count; i++) {
+           struct vpl  *lq;
+           lq = &vm_page_local_q[i].vpl_un.vpl;
+           VPL_UNLOCK(&lq->vpl_lock);
+       }
+    }
+    lck_mtx_unlock(&vm_purgeable_queue_lock);
+    lck_mtx_unlock(&vm_page_queue_free_lock);
+    vm_page_unlock_queues();
+    vm_object_unlock(compressor_object);
 }
 
 /*
@@ -5074,12 +6789,15 @@ void
 hibernate_page_list_setall(hibernate_page_list_t * page_list,
                           hibernate_page_list_t * page_list_wired,
                           hibernate_page_list_t * page_list_pal,
+                          boolean_t preflight, 
+                          boolean_t will_discard,
                           uint32_t * pagesOut)
 {
     uint64_t start, end, nsec;
     vm_page_t m;
+    vm_page_t next;
     uint32_t pages = page_list->page_count;
-    uint32_t count_zf = 0, count_throttled = 0;
+    uint32_t count_anonymous = 0, count_throttled = 0, count_compressor = 0;
     uint32_t count_inactive = 0, count_active = 0, count_speculative = 0, count_cleaned = 0;
     uint32_t count_wire = pages;
     uint32_t count_discard_active    = 0;
@@ -5087,15 +6805,29 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list,
     uint32_t count_discard_cleaned   = 0;
     uint32_t count_discard_purgeable = 0;
     uint32_t count_discard_speculative = 0;
+    uint32_t count_discard_vm_struct_pages = 0;
     uint32_t i;
     uint32_t             bank;
     hibernate_bitmap_t * bitmap;
     hibernate_bitmap_t * bitmap_wired;
+    boolean_t                   discard_all;
+    boolean_t            discard;
+
+    HIBLOG("hibernate_page_list_setall(preflight %d) start\n", preflight);
+
+    if (preflight) {
+        page_list       = NULL;
+        page_list_wired = NULL;
+        page_list_pal   = NULL;
+               discard_all     = FALSE;
+    } else {
+               discard_all     = will_discard;
+    }
 
-
-    HIBLOG("hibernate_page_list_setall start %p, %p\n", page_list, page_list_wired);
-
-#if DEBUG
+#if MACH_ASSERT || DEBUG
+    if (!preflight)
+    {
+        assert(hibernate_vm_locks_are_safe());
         vm_page_lock_queues();
        if (vm_page_local_q) {
            for (i = 0; i < vm_page_local_q_count; i++) {
@@ -5104,45 +6836,58 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list,
                VPL_LOCK(&lq->vpl_lock);
            }
        }
-#endif /* DEBUG */
+    }
+#endif  /* MACH_ASSERT || DEBUG */
 
 
     KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0);
 
     clock_get_uptime(&start);
 
-    hibernate_page_list_zero(page_list);
-    hibernate_page_list_zero(page_list_wired);
-    hibernate_page_list_zero(page_list_pal);
-
-    hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count;
-    hibernate_stats.cd_pages = pages;
+    if (!preflight) {
+       hibernate_page_list_zero(page_list);
+       hibernate_page_list_zero(page_list_wired);
+       hibernate_page_list_zero(page_list_pal);
+    
+       hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count;
+       hibernate_stats.cd_pages = pages;
+    }
 
     if (vm_page_local_q) {
            for (i = 0; i < vm_page_local_q_count; i++)
-                   vm_page_reactivate_local(i, TRUE, TRUE);
+                   vm_page_reactivate_local(i, TRUE, !preflight);
+    }
+
+    if (preflight) {
+       vm_object_lock(compressor_object);
+       vm_page_lock_queues();
+       lck_mtx_lock(&vm_page_queue_free_lock);
     }
 
     m = (vm_page_t) hibernate_gobble_queue;
-    while(m)
+    while (m)
     {
        pages--;
        count_wire--;
-       hibernate_page_bitset(page_list,       TRUE, m->phys_page);
-       hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
-       m = (vm_page_t) m->pageq.next;
+       if (!preflight) {
+           hibernate_page_bitset(page_list,       TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+           hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+       }
+       m = m->snext;
     }
 
-    for( i = 0; i < real_ncpus; i++ )
+    if (!preflight) for( i = 0; i < real_ncpus; i++ )
     {
        if (cpu_data_ptr[i] && cpu_data_ptr[i]->cpu_processor)
        {
-           for (m = PROCESSOR_DATA(cpu_data_ptr[i]->cpu_processor, free_pages); m; m = (vm_page_t)m->pageq.next)
+       for (m = PROCESSOR_DATA(cpu_data_ptr[i]->cpu_processor, free_pages); m; m = m->snext)
            {
+               assert(m->vm_page_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
+
                pages--;
                count_wire--;
-               hibernate_page_bitset(page_list,       TRUE, m->phys_page);
-               hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+               hibernate_page_bitset(page_list,       TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+               hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
 
                hibernate_stats.cd_local_free++;
                hibernate_stats.cd_total_free++;
@@ -5152,188 +6897,271 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list,
 
     for( i = 0; i < vm_colors; i++ )
     {
-       queue_iterate(&vm_page_queue_free[i],
-                     m,
-                     vm_page_t,
-                     pageq)
+       vm_page_queue_iterate(&vm_page_queue_free[i].qhead,
+                             m,
+                             vm_page_t,
+                             pageq)
        {
+           assert(m->vm_page_q_state == VM_PAGE_ON_FREE_Q);
+
            pages--;
            count_wire--;
-           hibernate_page_bitset(page_list,       TRUE, m->phys_page);
-           hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
-
-           hibernate_stats.cd_total_free++;
+           if (!preflight) {
+               hibernate_page_bitset(page_list,       TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+               hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+    
+               hibernate_stats.cd_total_free++;
+           }
        }
     }
 
-    queue_iterate(&vm_lopage_queue_free,
-                 m,
-                 vm_page_t,
-                 pageq)
+    vm_page_queue_iterate(&vm_lopage_queue_free,
+                         m,
+                         vm_page_t,
+                         pageq)
     {
+       assert(m->vm_page_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
+
        pages--;
        count_wire--;
-       hibernate_page_bitset(page_list,       TRUE, m->phys_page);
-       hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
-
-       hibernate_stats.cd_total_free++;
+       if (!preflight) {
+           hibernate_page_bitset(page_list,       TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+           hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+    
+           hibernate_stats.cd_total_free++;
+       }
     }
 
-    queue_iterate( &vm_page_queue_throttled,
-                    m,
-                    vm_page_t,
-                    pageq )
+    m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
+    while (m && !vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t)m))
     {
+       assert(m->vm_page_q_state == VM_PAGE_ON_THROTTLED_Q);
+
+        next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->pageq.next);
+       discard = FALSE;
         if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
-         && hibernate_consider_discard(m))
+         && hibernate_consider_discard(m, preflight))
         {
-            hibernate_page_bitset(page_list, TRUE, m->phys_page);
+           if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
             count_discard_inactive++;
+            discard = discard_all;
         }
         else
             count_throttled++;
        count_wire--;
-       hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+       if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+
+        if (discard) hibernate_discard_page(m);
+       m = next;
     }
 
-    queue_iterate( &vm_page_queue_anonymous,
-                    m,
-                    vm_page_t,
-                   pageq )
+    m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
+    while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m))
     {
+       assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
+
+        next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->pageq.next);
+       discard = FALSE;
         if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
-         && hibernate_consider_discard(m))
+         && hibernate_consider_discard(m, preflight))
         {
-            hibernate_page_bitset(page_list, TRUE, m->phys_page);
+           if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
            if (m->dirty)
                count_discard_purgeable++;
            else
                count_discard_inactive++;
+            discard = discard_all;
         }
         else
-            count_zf++;
+            count_anonymous++;
        count_wire--;
-       hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+       if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+        if (discard)    hibernate_discard_page(m);
+       m = next;
     }
 
-    queue_iterate( &vm_page_queue_inactive,
-                    m,
-                    vm_page_t,
-                    pageq )
+    m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
+    while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m))
     {
+       assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
+
+        next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->pageq.next);
+       discard = FALSE;
         if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
-         && hibernate_consider_discard(m))
+         && hibernate_consider_discard(m, preflight))
         {
-            hibernate_page_bitset(page_list, TRUE, m->phys_page);
+           if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
            if (m->dirty)
                count_discard_purgeable++;
            else
-               count_discard_inactive++;
+               count_discard_cleaned++;
+            discard = discard_all;
         }
         else
-            count_inactive++;
+            count_cleaned++;
        count_wire--;
-       hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+       if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+        if (discard)    hibernate_discard_page(m);
+       m = next;
     }
 
-    queue_iterate( &vm_page_queue_cleaned,
-                    m,
-                    vm_page_t,
-                    pageq )
+    m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
+    while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m))
     {
-        if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
-         && hibernate_consider_discard(m))
+       assert(m->vm_page_q_state == VM_PAGE_ON_ACTIVE_Q);
+
+        next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->pageq.next);
+       discard = FALSE;
+        if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) 
+         && hibernate_consider_discard(m, preflight))
         {
-            hibernate_page_bitset(page_list, TRUE, m->phys_page);
+           if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
            if (m->dirty)
                count_discard_purgeable++;
            else
-               count_discard_cleaned++;
+               count_discard_active++;
+            discard = discard_all;
         }
         else
-            count_cleaned++;
+            count_active++;
        count_wire--;
-       hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+       if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+        if (discard)    hibernate_discard_page(m);
+       m = next;
     }
 
-    for( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ )
+    m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
+    while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m))
     {
-       queue_iterate(&vm_page_queue_speculative[i].age_q,
-                     m,
-                     vm_page_t,
-                     pageq)
-       {
-           if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
-            && hibernate_consider_discard(m))
-           {
-               hibernate_page_bitset(page_list, TRUE, m->phys_page);
-               count_discard_speculative++;
-           }
-           else
-               count_speculative++;
-           count_wire--;
-           hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
-       }
-    }
+       assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
 
-    queue_iterate( &vm_page_queue_active,
-                    m,
-                    vm_page_t,
-                    pageq )
-    {
-        if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) 
-         && hibernate_consider_discard(m))
+        next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->pageq.next);
+       discard = FALSE;
+        if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
+         && hibernate_consider_discard(m, preflight))
         {
-            hibernate_page_bitset(page_list, TRUE, m->phys_page);
+           if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
            if (m->dirty)
                count_discard_purgeable++;
            else
-               count_discard_active++;
+               count_discard_inactive++;
+            discard = discard_all;
         }
         else
-            count_active++;
+            count_inactive++;
        count_wire--;
-       hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+       if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+        if (discard)    hibernate_discard_page(m);
+       m = next;
     }
+    /* XXX FBDP TODO: secluded queue */
+
+    for( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ )
+    {
+       m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
+       while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m))
+       {
+           assert(m->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q);
 
-    // pull wired from hibernate_bitmap
+           next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->pageq.next);
+           discard = FALSE;
+           if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
+            && hibernate_consider_discard(m, preflight))
+           {
+               if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+               count_discard_speculative++;
+               discard = discard_all;
+           }
+           else
+               count_speculative++;
+           count_wire--;
+           if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+           if (discard)    hibernate_discard_page(m);
+           m = next;
+       }
+    }
 
-    bitmap = &page_list->bank_bitmap[0];
-    bitmap_wired = &page_list_wired->bank_bitmap[0];
-    for (bank = 0; bank < page_list->bank_count; bank++)
+    vm_page_queue_iterate(&compressor_object->memq, m, vm_page_t, listq)
     {
-       for (i = 0; i < bitmap->bitmapwords; i++)
-           bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
-       bitmap       = (hibernate_bitmap_t *) &bitmap->bitmap      [bitmap->bitmapwords];
-       bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
+       assert(m->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR);
+
+        count_compressor++;
+       count_wire--;
+       if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
     }
 
-    // machine dependent adjustments
-    hibernate_page_list_setall_machine(page_list, page_list_wired, &pages);
+    if (preflight == FALSE && discard_all == TRUE) {
+           KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_START);
 
-    hibernate_stats.cd_count_wire = count_wire;
-    hibernate_stats.cd_discarded = count_discard_active + count_discard_inactive + count_discard_purgeable + count_discard_speculative + count_discard_cleaned;
+           HIBLOG("hibernate_teardown started\n");
+           count_discard_vm_struct_pages = hibernate_teardown_vm_structs(page_list, page_list_wired);
+           HIBLOG("hibernate_teardown completed - discarded %d\n", count_discard_vm_struct_pages);
 
-    clock_get_uptime(&end);
-    absolutetime_to_nanoseconds(end - start, &nsec);
-    HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
+           pages -= count_discard_vm_struct_pages;
+           count_wire -= count_discard_vm_struct_pages;
 
-    HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, could discard act %d inact %d purgeable %d spec %d cleaned %d\n", 
-               pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative, count_zf, count_throttled,
-               count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
+           hibernate_stats.cd_vm_struct_pages_unneeded = count_discard_vm_struct_pages;
 
-    *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned;
+           KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_END);
+    }
 
-#if DEBUG
-       if (vm_page_local_q) {
-           for (i = 0; i < vm_page_local_q_count; i++) {
+    if (!preflight) {
+       // pull wired from hibernate_bitmap
+       bitmap = &page_list->bank_bitmap[0];
+       bitmap_wired = &page_list_wired->bank_bitmap[0];
+       for (bank = 0; bank < page_list->bank_count; bank++)
+       {
+           for (i = 0; i < bitmap->bitmapwords; i++)
+               bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
+           bitmap       = (hibernate_bitmap_t *) &bitmap->bitmap      [bitmap->bitmapwords];
+           bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
+       }
+    }
+
+    // machine dependent adjustments
+    hibernate_page_list_setall_machine(page_list, page_list_wired, preflight, &pages);
+
+    if (!preflight) {
+       hibernate_stats.cd_count_wire = count_wire;
+       hibernate_stats.cd_discarded = count_discard_active + count_discard_inactive + count_discard_purgeable +
+               count_discard_speculative + count_discard_cleaned + count_discard_vm_struct_pages;
+    }
+
+    clock_get_uptime(&end);
+    absolutetime_to_nanoseconds(end - start, &nsec);
+    HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
+
+    HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n  %s discard act %d inact %d purgeable %d spec %d cleaned %d\n", 
+          pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative, count_anonymous, count_throttled, count_compressor, hibernate_stats.cd_found_xpmapped,
+               discard_all ? "did" : "could",
+               count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
+
+    if (hibernate_stats.cd_skipped_xpmapped)
+           HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped);
+
+    *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned;
+
+    if (preflight && will_discard) *pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;
+
+#if MACH_ASSERT || DEBUG
+    if (!preflight)
+    {
+       if (vm_page_local_q) {
+           for (i = 0; i < vm_page_local_q_count; i++) {
                struct vpl      *lq;
                lq = &vm_page_local_q[i].vpl_un.vpl;
                VPL_UNLOCK(&lq->vpl_lock);
            }
        }
         vm_page_unlock_queues();
-#endif /* DEBUG */
+    }
+#endif  /* MACH_ASSERT || DEBUG */
+
+    if (preflight) {
+       lck_mtx_unlock(&vm_page_queue_free_lock);
+       vm_page_unlock_queues();
+       vm_object_unlock(compressor_object);
+    }
 
     KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_END, count_wire, *pagesOut, 0, 0, 0);
 }
@@ -5351,7 +7179,8 @@ hibernate_page_list_discard(hibernate_page_list_t * page_list)
     uint32_t  count_discard_cleaned   = 0;
     uint32_t  count_discard_speculative = 0;
 
-#if DEBUG
+
+#if MACH_ASSERT || DEBUG
         vm_page_lock_queues();
        if (vm_page_local_q) {
            for (i = 0; i < vm_page_local_q_count; i++) {
@@ -5360,15 +7189,17 @@ hibernate_page_list_discard(hibernate_page_list_t * page_list)
                VPL_LOCK(&lq->vpl_lock);
            }
        }
-#endif /* DEBUG */
+#endif  /* MACH_ASSERT || DEBUG */
 
     clock_get_uptime(&start);
 
-    m = (vm_page_t) queue_first(&vm_page_queue_anonymous);
-    while (m && !queue_end(&vm_page_queue_anonymous, (queue_entry_t)m))
+    m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
+    while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m))
     {
-        next = (vm_page_t) m->pageq.next;
-        if (hibernate_page_bittst(page_list, m->phys_page))
+       assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
+
+        next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->pageq.next);
+        if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m)))
         {
            if (m->dirty)
                count_discard_purgeable++;
@@ -5381,11 +7212,13 @@ hibernate_page_list_discard(hibernate_page_list_t * page_list)
 
     for( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ )
     {
-       m = (vm_page_t) queue_first(&vm_page_queue_speculative[i].age_q);
-       while (m && !queue_end(&vm_page_queue_speculative[i].age_q, (queue_entry_t)m))
+       m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
+       while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m))
        {
-           next = (vm_page_t) m->pageq.next;
-           if (hibernate_page_bittst(page_list, m->phys_page))
+          assert(m->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q);
+
+           next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->pageq.next);
+           if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m)))
            {
                count_discard_speculative++;
                hibernate_discard_page(m);
@@ -5394,11 +7227,13 @@ hibernate_page_list_discard(hibernate_page_list_t * page_list)
        }
     }
 
-    m = (vm_page_t) queue_first(&vm_page_queue_inactive);
-    while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m))
+    m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
+    while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m))
     {
-        next = (vm_page_t) m->pageq.next;
-        if (hibernate_page_bittst(page_list, m->phys_page))
+       assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
+
+        next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->pageq.next);
+        if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m)))
         {
            if (m->dirty)
                count_discard_purgeable++;
@@ -5408,12 +7243,15 @@ hibernate_page_list_discard(hibernate_page_list_t * page_list)
         }
         m = next;
     }
+    /* XXX FBDP TODO: secluded queue */
 
-    m = (vm_page_t) queue_first(&vm_page_queue_active);
-    while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m))
+    m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
+    while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m))
     {
-        next = (vm_page_t) m->pageq.next;
-        if (hibernate_page_bittst(page_list, m->phys_page))
+       assert(m->vm_page_q_state == VM_PAGE_ON_ACTIVE_Q);
+
+        next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->pageq.next);
+        if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m)))
         {
            if (m->dirty)
                count_discard_purgeable++;
@@ -5424,11 +7262,13 @@ hibernate_page_list_discard(hibernate_page_list_t * page_list)
         m = next;
     }
 
-    m = (vm_page_t) queue_first(&vm_page_queue_cleaned);
-    while (m && !queue_end(&vm_page_queue_cleaned, (queue_entry_t)m))
+    m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
+    while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m))
     {
-        next = (vm_page_t) m->pageq.next;
-        if (hibernate_page_bittst(page_list, m->phys_page))
+       assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
+
+        next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->pageq.next);
+        if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m)))
         {
            if (m->dirty)
                count_discard_purgeable++;
@@ -5439,7 +7279,7 @@ hibernate_page_list_discard(hibernate_page_list_t * page_list)
         m = next;
     }
 
-#if DEBUG
+#if MACH_ASSERT || DEBUG
        if (vm_page_local_q) {
            for (i = 0; i < vm_page_local_q_count; i++) {
                struct vpl      *lq;
@@ -5448,7 +7288,7 @@ hibernate_page_list_discard(hibernate_page_list_t * page_list)
            }
        }
         vm_page_unlock_queues();
-#endif /* DEBUG */
+#endif  /* MACH_ASSERT || DEBUG */
 
     clock_get_uptime(&end);
     absolutetime_to_nanoseconds(end - start, &nsec);
@@ -5457,6 +7297,373 @@ hibernate_page_list_discard(hibernate_page_list_t * page_list)
                count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
 }
 
+boolean_t       hibernate_paddr_map_inited = FALSE;
+unsigned int   hibernate_teardown_last_valid_compact_indx = -1;
+vm_page_t      hibernate_rebuild_hash_list = NULL;
+
+unsigned int   hibernate_teardown_found_tabled_pages = 0;
+unsigned int   hibernate_teardown_found_created_pages = 0;
+unsigned int   hibernate_teardown_found_free_pages = 0;
+unsigned int   hibernate_teardown_vm_page_free_count;
+
+
+struct ppnum_mapping {
+       struct ppnum_mapping    *ppnm_next;
+       ppnum_t                 ppnm_base_paddr;
+       unsigned int            ppnm_sindx;
+       unsigned int            ppnm_eindx;
+};
+
+struct ppnum_mapping   *ppnm_head;
+struct ppnum_mapping   *ppnm_last_found = NULL;
+
+
+void
+hibernate_create_paddr_map() 
+{
+       unsigned int    i;
+       ppnum_t         next_ppnum_in_run = 0;
+       struct ppnum_mapping *ppnm = NULL;
+
+       if (hibernate_paddr_map_inited == FALSE) {
+
+               for (i = 0; i < vm_pages_count; i++) {
+
+                       if (ppnm)
+                               ppnm->ppnm_eindx = i;
+
+                       if (ppnm == NULL || VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) != next_ppnum_in_run) {
+
+                               ppnm = kalloc(sizeof(struct ppnum_mapping));
+
+                               ppnm->ppnm_next = ppnm_head;
+                               ppnm_head = ppnm;
+
+                               ppnm->ppnm_sindx = i;
+                               ppnm->ppnm_base_paddr = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]);
+                       }
+                       next_ppnum_in_run = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) + 1;
+               }
+               ppnm->ppnm_eindx++;
+
+               hibernate_paddr_map_inited = TRUE;
+       }
+}
+
+ppnum_t
+hibernate_lookup_paddr(unsigned int indx)
+{
+       struct ppnum_mapping *ppnm = NULL;
+       
+       ppnm = ppnm_last_found;
+
+       if (ppnm) {
+               if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx)
+                       goto done;
+       }
+       for (ppnm = ppnm_head; ppnm; ppnm = ppnm->ppnm_next) {
+
+               if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
+                       ppnm_last_found = ppnm;
+                       break;
+               }
+       }
+       if (ppnm == NULL)
+               panic("hibernate_lookup_paddr of %d failed\n", indx);
+done:
+       return (ppnm->ppnm_base_paddr + (indx - ppnm->ppnm_sindx));
+}
+
+
+uint32_t
+hibernate_mark_as_unneeded(addr64_t saddr, addr64_t eaddr, hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
+{
+       addr64_t        saddr_aligned;
+       addr64_t        eaddr_aligned;
+       addr64_t        addr;
+       ppnum_t         paddr;
+       unsigned int    mark_as_unneeded_pages = 0;
+
+       saddr_aligned = (saddr + PAGE_MASK_64) & ~PAGE_MASK_64;
+       eaddr_aligned = eaddr & ~PAGE_MASK_64;
+
+       for (addr = saddr_aligned; addr < eaddr_aligned; addr += PAGE_SIZE_64) {
+
+               paddr = pmap_find_phys(kernel_pmap, addr);
+
+               assert(paddr);
+
+               hibernate_page_bitset(page_list,       TRUE, paddr);
+               hibernate_page_bitset(page_list_wired, TRUE, paddr);
+
+               mark_as_unneeded_pages++;
+       }
+       return (mark_as_unneeded_pages);
+}
+
+
+void
+hibernate_hash_insert_page(vm_page_t mem)
+{
+       vm_page_bucket_t *bucket;
+       int             hash_id;
+       vm_object_t     m_object;
+
+       m_object = VM_PAGE_OBJECT(mem);
+
+       assert(mem->hashed);
+       assert(m_object);
+       assert(mem->offset != (vm_object_offset_t) -1);
+
+       /*
+        *      Insert it into the object_object/offset hash table
+        */
+       hash_id = vm_page_hash(m_object, mem->offset);
+       bucket = &vm_page_buckets[hash_id];
+
+       mem->next_m = bucket->page_list;
+       bucket->page_list = VM_PAGE_PACK_PTR(mem);
+}
+
+
+void
+hibernate_free_range(int sindx, int eindx)
+{
+       vm_page_t       mem;
+       unsigned int    color;
+
+       while (sindx < eindx) {
+               mem = &vm_pages[sindx];
+
+               vm_page_init(mem, hibernate_lookup_paddr(sindx), FALSE);
+
+               mem->lopage = FALSE;
+               mem->vm_page_q_state = VM_PAGE_ON_FREE_Q;
+
+               color = VM_PAGE_GET_COLOR(mem);
+#if defined(__x86_64__)
+               vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead,
+                                         mem,
+                                         vm_page_t,
+                                         pageq);
+#else
+               vm_page_queue_enter(&vm_page_queue_free[color].qhead,
+                                         mem,
+                                         vm_page_t,
+                                         pageq);
+#endif
+               vm_page_free_count++;
+
+               sindx++;
+       }
+}
+
+
+extern void hibernate_rebuild_pmap_structs(void);
+
+void
+hibernate_rebuild_vm_structs(void)
+{
+       int             i, cindx, sindx, eindx;
+       vm_page_t       mem, tmem, mem_next;
+       AbsoluteTime    startTime, endTime;
+       uint64_t        nsec;
+
+       if (hibernate_rebuild_needed == FALSE)
+               return;
+
+       KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_START);
+       HIBLOG("hibernate_rebuild started\n");
+
+       clock_get_uptime(&startTime);
+
+       hibernate_rebuild_pmap_structs();
+
+       bzero(&vm_page_buckets[0], vm_page_bucket_count * sizeof(vm_page_bucket_t));
+       eindx = vm_pages_count;
+
+       /*
+        * Mark all the vm_pages[] that have not been initialized yet as being 
+        * transient. This is needed to ensure that buddy page search is corrrect.
+        * Without this random data in these vm_pages[] can trip the buddy search 
+        */
+       for (i = hibernate_teardown_last_valid_compact_indx+1; i < eindx; ++i) 
+               vm_pages[i].vm_page_q_state = VM_PAGE_NOT_ON_Q;
+
+       for (cindx = hibernate_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
+               
+               mem = &vm_pages[cindx];
+               assert(mem->vm_page_q_state != VM_PAGE_ON_FREE_Q);
+               /*
+                * hibernate_teardown_vm_structs leaves the location where
+                * this vm_page_t must be located in "next".
+                */
+               tmem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->next_m));
+               mem->next_m = VM_PAGE_PACK_PTR(NULL);
+
+               sindx = (int)(tmem - &vm_pages[0]);
+
+               if (mem != tmem) {
+                       /*
+                        * this vm_page_t was moved by hibernate_teardown_vm_structs,
+                        * so move it back to its real location
+                        */
+                       *tmem = *mem;
+                       mem = tmem;
+               }
+               if (mem->hashed)
+                       hibernate_hash_insert_page(mem);
+               /*
+                * the 'hole' between this vm_page_t and the previous
+                * vm_page_t we moved needs to be initialized as 
+                * a range of free vm_page_t's
+                */
+               hibernate_free_range(sindx + 1, eindx);
+
+               eindx = sindx;
+       }
+       if (sindx)
+               hibernate_free_range(0, sindx);
+
+       assert(vm_page_free_count == hibernate_teardown_vm_page_free_count);
+
+       /*
+        * process the list of vm_page_t's that were entered in the hash,
+        * but were not located in the vm_pages arrary... these are 
+        * vm_page_t's that were created on the fly (i.e. fictitious)
+        */
+       for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) {
+               mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->next_m));
+
+               mem->next_m = 0;
+               hibernate_hash_insert_page(mem);
+       }
+       hibernate_rebuild_hash_list = NULL;
+
+        clock_get_uptime(&endTime);
+        SUB_ABSOLUTETIME(&endTime, &startTime);
+        absolutetime_to_nanoseconds(endTime, &nsec);
+
+       HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec / 1000000ULL);
+
+       hibernate_rebuild_needed = FALSE;
+
+       KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END);
+}
+
+
+extern void hibernate_teardown_pmap_structs(addr64_t *, addr64_t *);
+
+uint32_t
+hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
+{
+       unsigned int    i;
+       unsigned int    compact_target_indx;
+       vm_page_t       mem, mem_next;
+       vm_page_bucket_t *bucket;
+       unsigned int    mark_as_unneeded_pages = 0;
+       unsigned int    unneeded_vm_page_bucket_pages = 0;
+       unsigned int    unneeded_vm_pages_pages = 0;
+       unsigned int    unneeded_pmap_pages = 0;
+       addr64_t        start_of_unneeded = 0;
+       addr64_t        end_of_unneeded = 0;
+
+       
+       if (hibernate_should_abort())
+               return (0);
+
+       hibernate_rebuild_needed = TRUE;
+
+       HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, active_pages %d, inactive_pages %d, speculative_pages %d, cleaned_pages %d, compressor_pages %d\n",
+              vm_page_wire_count, vm_page_free_count, vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count,
+              vm_page_cleaned_count, compressor_object->resident_page_count);
+
+       for (i = 0; i < vm_page_bucket_count; i++) {
+
+               bucket = &vm_page_buckets[i];
+
+               for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)); mem != VM_PAGE_NULL; mem = mem_next) {
+                       assert(mem->hashed);
+
+                       mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->next_m));
+
+                       if (mem < &vm_pages[0] || mem >= &vm_pages[vm_pages_count]) {
+                               mem->next_m = VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list);
+                               hibernate_rebuild_hash_list = mem;
+                       }
+               }
+       }
+       unneeded_vm_page_bucket_pages = hibernate_mark_as_unneeded((addr64_t)&vm_page_buckets[0], (addr64_t)&vm_page_buckets[vm_page_bucket_count], page_list, page_list_wired);
+       mark_as_unneeded_pages += unneeded_vm_page_bucket_pages;
+
+       hibernate_teardown_vm_page_free_count = vm_page_free_count;
+
+       compact_target_indx = 0;
+
+       for (i = 0; i < vm_pages_count; i++) {
+
+               mem = &vm_pages[i];
+
+               if (mem->vm_page_q_state == VM_PAGE_ON_FREE_Q) {
+                       unsigned int color;
+
+                       assert(mem->busy);
+                       assert(!mem->lopage);
+
+                       color = VM_PAGE_GET_COLOR(mem);
+
+                       vm_page_queue_remove(&vm_page_queue_free[color].qhead,
+                                            mem,
+                                            vm_page_t,
+                                            pageq);
+
+                       VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
+
+                       vm_page_free_count--;
+
+                       hibernate_teardown_found_free_pages++;
+
+                       if (vm_pages[compact_target_indx].vm_page_q_state != VM_PAGE_ON_FREE_Q)
+                               compact_target_indx = i;
+               } else {
+                       /*
+                        * record this vm_page_t's original location
+                        * we need this even if it doesn't get moved
+                        * as an indicator to the rebuild function that
+                        * we don't have to move it
+                        */
+                       mem->next_m = VM_PAGE_PACK_PTR(mem);
+
+                       if (vm_pages[compact_target_indx].vm_page_q_state == VM_PAGE_ON_FREE_Q) {
+                               /*
+                                * we've got a hole to fill, so
+                                * move this vm_page_t to it's new home
+                                */
+                               vm_pages[compact_target_indx] = *mem;
+                               mem->vm_page_q_state = VM_PAGE_ON_FREE_Q;
+
+                               hibernate_teardown_last_valid_compact_indx = compact_target_indx;
+                               compact_target_indx++;
+                       } else
+                               hibernate_teardown_last_valid_compact_indx = i;
+               }
+       }
+       unneeded_vm_pages_pages = hibernate_mark_as_unneeded((addr64_t)&vm_pages[hibernate_teardown_last_valid_compact_indx+1],
+                                                            (addr64_t)&vm_pages[vm_pages_count-1], page_list, page_list_wired);
+       mark_as_unneeded_pages += unneeded_vm_pages_pages;
+
+       hibernate_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded);
+
+       if (start_of_unneeded) {
+               unneeded_pmap_pages = hibernate_mark_as_unneeded(start_of_unneeded, end_of_unneeded, page_list, page_list_wired);
+               mark_as_unneeded_pages += unneeded_pmap_pages;
+       }
+       HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n", unneeded_vm_page_bucket_pages, unneeded_vm_pages_pages, unneeded_pmap_pages);
+
+       return (mark_as_unneeded_pages);
+}
+
+
 #endif /* HIBERNATION */
 
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
@@ -5497,7 +7704,9 @@ vm_page_info(
                bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
                lck_spin_lock(bucket_lock);
 
-               for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next)
+               for (m = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
+                    m != VM_PAGE_NULL;
+                    m = (vm_page_t)(VM_PAGE_UNPACK_PTR(m->next_m)))
                        bucket_count++;
 
                lck_spin_unlock(bucket_lock);
@@ -5509,3 +7718,1258 @@ vm_page_info(
        return vm_page_bucket_count;
 }
 #endif /* MACH_VM_DEBUG */
+
+#if VM_PAGE_BUCKETS_CHECK
+void
+vm_page_buckets_check(void)
+{
+       unsigned int i;
+       vm_page_t p;
+       unsigned int p_hash;
+       vm_page_bucket_t *bucket;
+       lck_spin_t      *bucket_lock;
+
+       if (!vm_page_buckets_check_ready) {
+               return;
+       }
+
+#if HIBERNATION
+       if (hibernate_rebuild_needed ||
+           hibernate_rebuild_hash_list) {
+               panic("BUCKET_CHECK: hibernation in progress: "
+                     "rebuild_needed=%d rebuild_hash_list=%p\n",
+                     hibernate_rebuild_needed,
+                     hibernate_rebuild_hash_list);
+       }
+#endif /* HIBERNATION */
+
+#if VM_PAGE_FAKE_BUCKETS
+       char *cp;
+       for (cp = (char *) vm_page_fake_buckets_start;
+            cp < (char *) vm_page_fake_buckets_end;
+            cp++) {
+               if (*cp != 0x5a) {
+                       panic("BUCKET_CHECK: corruption at %p in fake buckets "
+                             "[0x%llx:0x%llx]\n",
+                             cp,
+                             (uint64_t) vm_page_fake_buckets_start,
+                             (uint64_t) vm_page_fake_buckets_end);
+               }
+       }
+#endif /* VM_PAGE_FAKE_BUCKETS */
+
+       for (i = 0; i < vm_page_bucket_count; i++) {
+               vm_object_t     p_object;
+
+               bucket = &vm_page_buckets[i];
+               if (!bucket->page_list) {
+                       continue;
+               }
+
+               bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
+               lck_spin_lock(bucket_lock);
+               p = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
+
+               while (p != VM_PAGE_NULL) {
+                       p_object = VM_PAGE_OBJECT(p);
+
+                       if (!p->hashed) {
+                               panic("BUCKET_CHECK: page %p (%p,0x%llx) "
+                                     "hash %d in bucket %d at %p "
+                                     "is not hashed\n",
+                                     p, p_object, p->offset,
+                                     p_hash, i, bucket);
+                       }
+                       p_hash = vm_page_hash(p_object, p->offset);
+                       if (p_hash != i) {
+                               panic("BUCKET_CHECK: corruption in bucket %d "
+                                     "at %p: page %p object %p offset 0x%llx "
+                                     "hash %d\n",
+                                     i, bucket, p, p_object, p->offset,
+                                     p_hash);
+                       }
+                       p = (vm_page_t)(VM_PAGE_UNPACK_PTR(p->next_m));
+               }
+               lck_spin_unlock(bucket_lock);
+       }
+
+//     printf("BUCKET_CHECK: checked buckets\n");
+}
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
+/*
+ * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the
+ * local queues if they exist... its the only spot in the system where we add pages
+ * to those queues...  once on those queues, those pages can only move to one of the
+ * global page queues or the free queues... they NEVER move from local q to local q.
+ * the 'local' state is stable when vm_page_queues_remove is called since we're behind
+ * the global vm_page_queue_lock at this point...  we still need to take the local lock
+ * in case this operation is being run on a different CPU then the local queue's identity,
+ * but we don't have to worry about the page moving to a global queue or becoming wired
+ * while we're grabbing the local lock since those operations would require the global
+ * vm_page_queue_lock to be held, and we already own it.
+ *
+ * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id...
+ * 'wired' and local are ALWAYS mutually exclusive conditions.
+ */
+
+#if CONFIG_BACKGROUND_QUEUE
+void
+vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_backgroundq)
+#else
+void
+vm_page_queues_remove(vm_page_t mem, boolean_t __unused remove_from_backgroundq)
+#endif
+{
+       boolean_t       was_pageable = TRUE;
+       vm_object_t     m_object;
+
+       m_object = VM_PAGE_OBJECT(mem);
+
+       LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+
+       if (mem->vm_page_q_state == VM_PAGE_NOT_ON_Q)
+       {
+               assert(mem->pageq.next == 0 && mem->pageq.prev == 0);
+#if CONFIG_BACKGROUND_QUEUE
+               if (remove_from_backgroundq == TRUE) {
+                       vm_page_remove_from_backgroundq(mem);
+               }
+               if (mem->vm_page_on_backgroundq) {
+                       assert(mem->vm_page_backgroundq.next != 0);
+                       assert(mem->vm_page_backgroundq.prev != 0);
+               } else {
+                       assert(mem->vm_page_backgroundq.next == 0);
+                       assert(mem->vm_page_backgroundq.prev == 0);
+               }
+#endif /* CONFIG_BACKGROUND_QUEUE */
+               return;
+       }
+
+       if (mem->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR)
+       {
+               assert(mem->pageq.next == 0 && mem->pageq.prev == 0);
+#if CONFIG_BACKGROUND_QUEUE
+               assert(mem->vm_page_backgroundq.next == 0 &&
+                      mem->vm_page_backgroundq.prev == 0 &&
+                      mem->vm_page_on_backgroundq == FALSE);
+#endif
+               return;
+       }
+       if (mem->vm_page_q_state == VM_PAGE_IS_WIRED) {
+               /*
+                * might put these guys on a list for debugging purposes
+                * if we do, we'll need to remove this assert
+                */
+               assert(mem->pageq.next == 0 && mem->pageq.prev == 0);
+#if CONFIG_BACKGROUND_QUEUE
+               assert(mem->vm_page_backgroundq.next == 0 &&
+                      mem->vm_page_backgroundq.prev == 0 &&
+                      mem->vm_page_on_backgroundq == FALSE);
+#endif
+               return;
+       }
+
+       assert(m_object != compressor_object);
+       assert(m_object != kernel_object);
+       assert(m_object != vm_submap_object);
+       assert(!mem->fictitious);
+
+       switch(mem->vm_page_q_state) {
+               
+       case VM_PAGE_ON_ACTIVE_LOCAL_Q:
+       {
+               struct vpl      *lq;
+
+               lq = &vm_page_local_q[mem->local_id].vpl_un.vpl;
+               VPL_LOCK(&lq->vpl_lock);
+               vm_page_queue_remove(&lq->vpl_queue,
+                                    mem, vm_page_t, pageq);
+               mem->local_id = 0;
+               lq->vpl_count--;
+               if (m_object->internal) {
+                       lq->vpl_internal_count--;
+               } else {
+                       lq->vpl_external_count--;
+               }
+               VPL_UNLOCK(&lq->vpl_lock);
+               was_pageable = FALSE;
+               break;
+       }
+       case VM_PAGE_ON_ACTIVE_Q:
+       {
+               vm_page_queue_remove(&vm_page_queue_active,
+                                    mem, vm_page_t, pageq);
+               vm_page_active_count--;
+               break;
+       }
+
+       case VM_PAGE_ON_INACTIVE_INTERNAL_Q:
+       {
+               assert(m_object->internal == TRUE);
+
+               vm_page_inactive_count--;
+               vm_page_queue_remove(&vm_page_queue_anonymous,
+                                    mem, vm_page_t, pageq);
+               vm_page_anonymous_count--;
+               vm_purgeable_q_advance_all();
+               break;
+       }
+
+       case VM_PAGE_ON_INACTIVE_EXTERNAL_Q:
+       {
+               assert(m_object->internal == FALSE);
+
+               vm_page_inactive_count--;
+               vm_page_queue_remove(&vm_page_queue_inactive,
+                                    mem, vm_page_t, pageq);
+               vm_purgeable_q_advance_all();
+               break;
+       }
+
+       case VM_PAGE_ON_INACTIVE_CLEANED_Q:
+       {
+               assert(m_object->internal == FALSE);
+
+               vm_page_inactive_count--;
+               vm_page_queue_remove(&vm_page_queue_cleaned,
+                                    mem, vm_page_t, pageq);
+               vm_page_cleaned_count--;
+               break;
+       }
+
+       case VM_PAGE_ON_THROTTLED_Q:
+       {
+               assert(m_object->internal == TRUE);
+
+               vm_page_queue_remove(&vm_page_queue_throttled,
+                                    mem, vm_page_t, pageq);
+               vm_page_throttled_count--;
+               was_pageable = FALSE;
+               break;
+       }
+
+       case VM_PAGE_ON_SPECULATIVE_Q:
+       {
+               assert(m_object->internal == FALSE);
+
+                vm_page_remque(&mem->pageq);
+               vm_page_speculative_count--;
+               break;
+       }
+
+#if CONFIG_SECLUDED_MEMORY
+       case VM_PAGE_ON_SECLUDED_Q:
+       {
+               vm_page_queue_remove(&vm_page_queue_secluded,
+                                    mem, vm_page_t, pageq);
+               vm_page_secluded_count--;
+               if (m_object == VM_OBJECT_NULL) {
+                       vm_page_secluded_count_free--;
+                       was_pageable = FALSE;
+               } else {
+                       assert(!m_object->internal);
+                       vm_page_secluded_count_inuse--;
+                       was_pageable = FALSE;
+//                     was_pageable = TRUE;
+               }
+               break;
+       }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+       default:
+       {
+               /*
+                *      if (mem->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q)
+                *              NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue...
+                *              the caller is responsible for determing if the page is on that queue, and if so, must
+                *              either first remove it (it needs both the page queues lock and the object lock to do
+                *              this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove
+                *
+                *      we also don't expect to encounter VM_PAGE_ON_FREE_Q, VM_PAGE_ON_FREE_LOCAL_Q, VM_PAGE_ON_FREE_LOPAGE_Q
+                *      or any of the undefined states
+                */
+               panic("vm_page_queues_remove - bad page q_state (%p, %d)\n", mem, mem->vm_page_q_state);
+               break;
+       }
+
+       }
+       VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
+       mem->vm_page_q_state = VM_PAGE_NOT_ON_Q;
+
+#if CONFIG_BACKGROUND_QUEUE
+       if (remove_from_backgroundq == TRUE)
+               vm_page_remove_from_backgroundq(mem);
+#endif
+       if (was_pageable) {
+               if (m_object->internal) {
+                       vm_page_pageable_internal_count--;
+               } else {
+                       vm_page_pageable_external_count--;
+               }
+       }
+}
+
+void
+vm_page_remove_internal(vm_page_t page)
+{
+       vm_object_t __object = VM_PAGE_OBJECT(page);
+       if (page == __object->memq_hint) {
+               vm_page_t       __new_hint;
+               vm_page_queue_entry_t   __qe;
+               __qe = (vm_page_queue_entry_t)vm_page_queue_next(&page->listq);
+               if (vm_page_queue_end(&__object->memq, __qe)) {
+                       __qe = (vm_page_queue_entry_t)vm_page_queue_prev(&page->listq);
+                       if (vm_page_queue_end(&__object->memq, __qe)) {
+                               __qe = NULL;
+                       }
+               }
+               __new_hint = (vm_page_t)((uintptr_t) __qe);
+               __object->memq_hint = __new_hint;
+       }
+       vm_page_queue_remove(&__object->memq, page, vm_page_t, listq);
+#if CONFIG_SECLUDED_MEMORY
+       if (__object->eligible_for_secluded) {
+               vm_page_secluded.eligible_for_secluded--;
+       }
+#endif /* CONFIG_SECLUDED_MEMORY */
+}
+
+void
+vm_page_enqueue_inactive(vm_page_t mem, boolean_t first)
+{
+       vm_object_t     m_object;
+
+       m_object = VM_PAGE_OBJECT(mem);
+
+       LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+       assert(!mem->fictitious);
+       assert(!mem->laundry);
+       assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+       vm_page_check_pageable_safe(mem);
+
+       if (m_object->internal) {
+               mem->vm_page_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
+
+               if (first == TRUE)
+                       vm_page_queue_enter_first(&vm_page_queue_anonymous, mem, vm_page_t, pageq);
+               else
+                       vm_page_queue_enter(&vm_page_queue_anonymous, mem, vm_page_t, pageq);
+
+               vm_page_anonymous_count++;
+               vm_page_pageable_internal_count++;
+       } else {
+               mem->vm_page_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
+
+               if (first == TRUE)
+                       vm_page_queue_enter_first(&vm_page_queue_inactive, mem, vm_page_t, pageq);
+               else
+                       vm_page_queue_enter(&vm_page_queue_inactive, mem, vm_page_t, pageq);
+
+               vm_page_pageable_external_count++;
+       }
+       vm_page_inactive_count++;
+       token_new_pagecount++;
+
+#if CONFIG_BACKGROUND_QUEUE
+       if (mem->vm_page_in_background)
+               vm_page_add_to_backgroundq(mem, FALSE);
+#endif
+}
+
+void
+vm_page_enqueue_active(vm_page_t mem, boolean_t first)
+{
+       vm_object_t     m_object;
+
+       m_object = VM_PAGE_OBJECT(mem);
+
+       LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+       assert(!mem->fictitious);
+       assert(!mem->laundry);
+       assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+       vm_page_check_pageable_safe(mem);
+
+       mem->vm_page_q_state = VM_PAGE_ON_ACTIVE_Q;
+       if (first == TRUE)
+               vm_page_queue_enter_first(&vm_page_queue_active, mem, vm_page_t, pageq);
+       else
+               vm_page_queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
+       vm_page_active_count++;
+
+       if (m_object->internal) {
+               vm_page_pageable_internal_count++;
+       } else {
+               vm_page_pageable_external_count++;
+       }
+
+#if CONFIG_BACKGROUND_QUEUE
+       if (mem->vm_page_in_background)
+               vm_page_add_to_backgroundq(mem, FALSE);
+#endif
+}
+
+/*
+ * Pages from special kernel objects shouldn't
+ * be placed on pageable queues.
+ */
+void
+vm_page_check_pageable_safe(vm_page_t page)
+{
+       vm_object_t     page_object;
+
+       page_object = VM_PAGE_OBJECT(page);
+
+       if (page_object == kernel_object) {
+               panic("vm_page_check_pageable_safe: trying to add page" \
+                        "from kernel object (%p) to pageable queue", kernel_object);
+       }
+
+       if (page_object == compressor_object) {
+               panic("vm_page_check_pageable_safe: trying to add page" \
+                        "from compressor object (%p) to pageable queue", compressor_object);
+       }
+
+       if (page_object == vm_submap_object) {
+               panic("vm_page_check_pageable_safe: trying to add page" \
+                       "from submap object (%p) to pageable queue", vm_submap_object);
+       }
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * wired page diagnose
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include <libkern/OSKextLibPrivate.h>
+
+#define KA_SIZE(namelen, subtotalscount)       \
+       (sizeof(struct vm_allocation_site) + (namelen) + 1 + ((subtotalscount) * sizeof(struct vm_allocation_total)))
+
+#define KA_NAME(alloc) \
+       ((char *)(&(alloc)->subtotals[(alloc->subtotalscount)]))
+
+#define KA_NAME_LEN(alloc)     \
+    (VM_TAG_NAME_LEN_MAX & (alloc->flags >> VM_TAG_NAME_LEN_SHIFT))
+
+vm_tag_t 
+vm_tag_bt(void)
+{
+    uintptr_t* frameptr;
+    uintptr_t* frameptr_next;
+    uintptr_t retaddr;
+    uintptr_t kstackb, kstackt;
+    const vm_allocation_site_t * site;
+    thread_t cthread;
+    kern_allocation_name_t name;
+    
+    cthread = current_thread();
+    if (__improbable(cthread == NULL)) return VM_KERN_MEMORY_OSFMK;
+
+    if ((name = thread_get_kernel_state(cthread)->allocation_name))
+    {
+       if (!name->tag) vm_tag_alloc(name);
+       return name->tag;
+    }
+
+    kstackb = cthread->kernel_stack;
+    kstackt = kstackb + kernel_stack_size;
+
+    /* Load stack frame pointer (EBP on x86) into frameptr */
+    frameptr = __builtin_frame_address(0);
+    site = NULL;
+    while (frameptr != NULL) 
+    {
+       /* Verify thread stack bounds */
+       if (((uintptr_t)(frameptr + 2) > kstackt) || ((uintptr_t)frameptr < kstackb)) break;
+
+       /* Next frame pointer is pointed to by the previous one */
+       frameptr_next = (uintptr_t*) *frameptr;
+
+       /* Pull return address from one spot above the frame pointer */
+       retaddr = *(frameptr + 1);
+
+
+       if ((retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top))
+       {
+               site = OSKextGetAllocationSiteForCaller(retaddr);
+               break;
+       }
+       frameptr = frameptr_next;
+    }
+
+    return (site ? site->tag : VM_KERN_MEMORY_NONE);
+}
+
+static uint64_t free_tag_bits[VM_MAX_TAG_VALUE/64];
+
+void
+vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP)
+{
+    vm_tag_t tag;
+    uint64_t avail;
+    uint32_t idx;
+    vm_allocation_site_t * prev;
+
+    if (site->tag) return;
+
+    idx = 0;
+    while (TRUE)
+    {
+               avail = free_tag_bits[idx];
+               if (avail)
+               {
+                   tag = __builtin_clzll(avail);
+                   avail &= ~(1ULL << (63 - tag));
+                   free_tag_bits[idx] = avail;
+                   tag += (idx << 6);
+                   break;
+               }
+               idx++;
+               if (idx >= ARRAY_COUNT(free_tag_bits))
+               {
+                   for (idx = 0; idx < ARRAY_COUNT(vm_allocation_sites); idx++)
+                   {
+                               prev = vm_allocation_sites[idx];
+                               if (!prev)               continue;
+                               if (!KA_NAME_LEN(prev))  continue;
+                               if (!prev->tag)          continue;
+                               if (prev->total)         continue;
+                               if (1 != prev->refcount) continue;
+
+                               assert(idx == prev->tag);
+                               tag = idx;
+                               prev->tag = VM_KERN_MEMORY_NONE;
+                               *releasesiteP = prev;
+                               break;
+                   }
+                   if (idx >= ARRAY_COUNT(vm_allocation_sites))
+                       {
+                               tag = VM_KERN_MEMORY_ANY;
+                       }
+                   break;
+               }
+    }
+    site->tag = tag;
+
+    OSAddAtomic16(1, &site->refcount);
+
+    if (VM_KERN_MEMORY_ANY != tag) vm_allocation_sites[tag] = site;
+
+    if (tag > vm_allocation_tag_highest) vm_allocation_tag_highest = tag;
+}
+
+static void
+vm_tag_free_locked(vm_tag_t tag)
+{
+    uint64_t avail;
+    uint32_t idx;
+    uint64_t bit;
+
+    if (VM_KERN_MEMORY_ANY == tag) return;
+
+    idx = (tag >> 6);
+    avail = free_tag_bits[idx];
+    tag &= 63;
+    bit = (1ULL << (63 - tag));
+    assert(!(avail & bit));
+    free_tag_bits[idx] = (avail | bit);
+}
+
+static void
+vm_tag_init(void)
+{
+    vm_tag_t tag;
+    for (tag = VM_KERN_MEMORY_FIRST_DYNAMIC; tag < VM_KERN_MEMORY_ANY; tag++)
+    {
+        vm_tag_free_locked(tag);
+    }
+
+    for (tag = VM_KERN_MEMORY_ANY + 1; tag < VM_MAX_TAG_VALUE; tag++)
+    {
+        vm_tag_free_locked(tag);
+    }
+}
+
+vm_tag_t
+vm_tag_alloc(vm_allocation_site_t * site)
+{
+    vm_tag_t tag;
+    vm_allocation_site_t * releasesite;
+
+    if (VM_TAG_BT & site->flags)
+    {
+               tag = vm_tag_bt();
+               if (VM_KERN_MEMORY_NONE != tag) return (tag);
+    }
+
+    if (!site->tag) 
+    {
+               releasesite = NULL;
+               lck_spin_lock(&vm_allocation_sites_lock);
+               vm_tag_alloc_locked(site, &releasesite);
+               lck_spin_unlock(&vm_allocation_sites_lock);
+        if (releasesite) kern_allocation_name_release(releasesite);
+    }
+
+    return (site->tag);
+}
+
+void
+vm_tag_update_size(vm_tag_t tag, int64_t delta)
+{
+    vm_allocation_site_t * allocation;
+    uint64_t prior;
+
+    assert(VM_KERN_MEMORY_NONE != tag);
+    assert(tag < VM_MAX_TAG_VALUE);
+
+    allocation = vm_allocation_sites[tag];
+    assert(allocation);
+
+    if (delta < 0) {
+               assertf(allocation->total >= ((uint64_t)-delta), "tag %d, site %p", tag, allocation);
+    }
+    prior = OSAddAtomic64(delta, &allocation->total);
+
+#if DEBUG || DEVELOPMENT
+
+    uint64_t new, peak;
+       new = prior + delta;
+    do
+    {
+        peak = allocation->peak;
+        if (new <= peak) break;
+    }
+    while (!OSCompareAndSwap64(peak, new, &allocation->peak));
+
+#endif /* DEBUG || DEVELOPMENT */
+
+    if (tag < VM_KERN_MEMORY_FIRST_DYNAMIC) return;
+
+    if (!prior && !allocation->tag) vm_tag_alloc(allocation);
+}
+
+void
+kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta)
+{
+    uint64_t prior;
+
+    if (delta < 0) {
+               assertf(allocation->total >= ((uint64_t)-delta), "name %p", allocation);
+    }
+    prior = OSAddAtomic64(delta, &allocation->total);
+
+#if DEBUG || DEVELOPMENT
+
+    uint64_t new, peak;
+       new = prior + delta;
+    do
+    {
+        peak = allocation->peak;
+        if (new <= peak) break;
+    }
+    while (!OSCompareAndSwap64(peak, new, &allocation->peak));
+
+#endif /* DEBUG || DEVELOPMENT */
+
+    if (!prior && !allocation->tag) vm_tag_alloc(allocation);
+}
+
+#if VM_MAX_TAG_ZONES
+
+void
+vm_allocation_zones_init(void)
+{
+    kern_return_t ret;
+       vm_offset_t       addr;
+       vm_size_t     size;
+
+    size = VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t **)
+         + 2 * VM_MAX_TAG_ZONES * sizeof(vm_allocation_zone_total_t);
+
+       ret = kernel_memory_allocate(kernel_map,
+                                       &addr, round_page(size), 0,
+                                       KMA_ZERO, VM_KERN_MEMORY_DIAG);
+    assert(KERN_SUCCESS == ret);
+
+    vm_allocation_zone_totals = (vm_allocation_zone_total_t **) addr;
+    addr += VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t **);
+
+    // prepopulate VM_KERN_MEMORY_DIAG & VM_KERN_MEMORY_KALLOC so allocations
+    // in vm_tag_update_zone_size() won't recurse
+    vm_allocation_zone_totals[VM_KERN_MEMORY_DIAG]   = (vm_allocation_zone_total_t *) addr;
+    addr += VM_MAX_TAG_ZONES * sizeof(vm_allocation_zone_total_t);
+    vm_allocation_zone_totals[VM_KERN_MEMORY_KALLOC] = (vm_allocation_zone_total_t *) addr;
+}
+
+void
+vm_tag_will_update_zone(vm_tag_t tag, uint32_t zidx)
+{
+    vm_allocation_zone_total_t * zone;
+
+    assert(VM_KERN_MEMORY_NONE != tag);
+    assert(tag < VM_MAX_TAG_VALUE);
+
+    if (zidx >= VM_MAX_TAG_ZONES) return;
+
+       zone = vm_allocation_zone_totals[tag];
+    if (!zone)
+    {
+        zone = kalloc_tag(VM_MAX_TAG_ZONES * sizeof(*zone), VM_KERN_MEMORY_DIAG);
+        if (!zone) return;
+        bzero(zone, VM_MAX_TAG_ZONES * sizeof(*zone));
+        if (!OSCompareAndSwapPtr(NULL, zone, &vm_allocation_zone_totals[tag]))
+               {
+                       kfree(zone, VM_MAX_TAG_ZONES * sizeof(*zone));
+               }
+    }
+}
+
+void
+vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, int64_t delta, int64_t dwaste)
+{
+    vm_allocation_zone_total_t * zone;
+    uint32_t new;
+
+    assert(VM_KERN_MEMORY_NONE != tag);
+    assert(tag < VM_MAX_TAG_VALUE);
+
+    if (zidx >= VM_MAX_TAG_ZONES) return;
+
+       zone = vm_allocation_zone_totals[tag];
+    assert(zone);
+    zone += zidx;
+
+    /* the zone is locked */
+    if (delta < 0)
+    {
+               assertf(zone->total >= ((uint64_t)-delta), "zidx %d, tag %d, %p", zidx, tag, zone);
+           zone->total += delta;
+    }
+    else
+    {
+               zone->total += delta;
+               if (zone->total > zone->peak) zone->peak = zone->total;
+        if (dwaste)
+        {
+                       new = zone->waste;
+                       if (zone->wastediv < 65536) zone->wastediv++;
+                       else                        new -= (new >> 16);
+                       __assert_only bool ov = os_add_overflow(new, dwaste, &new);
+                       assert(!ov);
+                       zone->waste = new;
+        }
+    }
+}
+
+#endif /* VM_MAX_TAG_ZONES */
+
+void
+kern_allocation_update_subtotal(kern_allocation_name_t allocation, uint32_t subtag, int64_t delta)
+{
+    kern_allocation_name_t other;
+       struct vm_allocation_total * total;
+    uint32_t subidx;
+
+    subidx = 0;
+    assert(VM_KERN_MEMORY_NONE != subtag);
+       for (; subidx < allocation->subtotalscount; subidx++)
+       {
+               if (VM_KERN_MEMORY_NONE == allocation->subtotals[subidx].tag)
+               {
+                       allocation->subtotals[subidx].tag = subtag;
+                       break;
+               }
+               if (subtag == allocation->subtotals[subidx].tag) break;
+       }
+    assert(subidx < allocation->subtotalscount);
+    if (subidx >= allocation->subtotalscount) return;
+
+    total = &allocation->subtotals[subidx];
+    other = vm_allocation_sites[subtag];
+    assert(other);
+
+    if (delta < 0)
+    {
+               assertf(total->total >= ((uint64_t)-delta), "name %p", allocation);
+           OSAddAtomic64(delta, &total->total);
+               assertf(other->mapped >= ((uint64_t)-delta), "other %p", other);
+        OSAddAtomic64(delta, &other->mapped);
+    }
+    else
+    {
+        OSAddAtomic64(delta, &other->mapped);
+           OSAddAtomic64(delta, &total->total);
+    }
+}
+
+const char *
+kern_allocation_get_name(kern_allocation_name_t allocation)
+{
+    return (KA_NAME(allocation));
+}
+
+kern_allocation_name_t
+kern_allocation_name_allocate(const char * name, uint32_t subtotalscount)
+{
+    uint32_t namelen;
+
+    namelen = (uint32_t) strnlen(name, MACH_MEMORY_INFO_NAME_MAX_LEN - 1);
+
+    kern_allocation_name_t allocation;
+    allocation = kalloc(KA_SIZE(namelen, subtotalscount));
+    bzero(allocation, KA_SIZE(namelen, subtotalscount));
+
+    allocation->refcount       = 1;
+    allocation->subtotalscount = subtotalscount;
+    allocation->flags          = (namelen << VM_TAG_NAME_LEN_SHIFT);
+    strlcpy(KA_NAME(allocation), name, namelen + 1);
+
+    return (allocation);
+}
+
+void
+kern_allocation_name_release(kern_allocation_name_t allocation)
+{
+    assert(allocation->refcount > 0);
+    if (1 == OSAddAtomic16(-1, &allocation->refcount))
+    {
+        kfree(allocation, KA_SIZE(KA_NAME_LEN(allocation), allocation->subtotalscount));
+    }
+}
+
+vm_tag_t
+kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation)
+{
+    return (vm_tag_alloc(allocation));
+}
+
+static void 
+vm_page_count_object(mach_memory_info_t * info, unsigned int __unused num_info, vm_object_t object)
+{
+    if (!object->wired_page_count) return;
+    if (object != kernel_object)
+    {
+               assert(object->wire_tag < num_info);
+               info[object->wire_tag].size += ptoa_64(object->wired_page_count);
+    }
+}
+
+typedef void (*vm_page_iterate_proc)(mach_memory_info_t * info,
+                                    unsigned int num_info, vm_object_t object);
+
+static void 
+vm_page_iterate_purgeable_objects(mach_memory_info_t * info, unsigned int num_info,
+                                 vm_page_iterate_proc proc, purgeable_q_t queue, 
+                                 int group)
+{
+    vm_object_t object;
+
+    for (object = (vm_object_t) queue_first(&queue->objq[group]);
+               !queue_end(&queue->objq[group], (queue_entry_t) object);
+               object = (vm_object_t) queue_next(&object->objq))
+    {
+               proc(info, num_info, object);
+    }
+}
+
+static void 
+vm_page_iterate_objects(mach_memory_info_t * info, unsigned int num_info,
+                       vm_page_iterate_proc proc)
+{
+    purgeable_q_t   volatile_q;
+    queue_head_t  * nonvolatile_q;
+    vm_object_t     object;
+    int             group;
+
+    lck_spin_lock(&vm_objects_wired_lock);
+    queue_iterate(&vm_objects_wired,
+                 object,
+                 vm_object_t,
+                 objq)
+    {
+               proc(info, num_info, object);
+    }
+    lck_spin_unlock(&vm_objects_wired_lock);
+
+    lck_mtx_lock(&vm_purgeable_queue_lock);
+    nonvolatile_q = &purgeable_nonvolatile_queue;
+    for (object = (vm_object_t) queue_first(nonvolatile_q);
+                !queue_end(nonvolatile_q, (queue_entry_t) object);
+                object = (vm_object_t) queue_next(&object->objq))
+    {
+               proc(info, num_info, object);
+    }
+
+    volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
+    vm_page_iterate_purgeable_objects(info, num_info, proc, volatile_q, 0);
+
+    volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
+    for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
+    {
+               vm_page_iterate_purgeable_objects(info, num_info, proc, volatile_q, group);
+    }
+
+    volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
+    for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
+    {
+               vm_page_iterate_purgeable_objects(info, num_info, proc, volatile_q, group);
+    }
+    lck_mtx_unlock(&vm_purgeable_queue_lock);
+}
+
+static uint64_t
+process_account(mach_memory_info_t * info, unsigned int num_info, uint64_t zones_collectable_bytes, boolean_t iterated)
+{
+    size_t                 namelen;
+    unsigned int           idx, count, nextinfo;
+    vm_allocation_site_t * site;
+       lck_spin_lock(&vm_allocation_sites_lock);
+
+       for (idx = 0; idx <= vm_allocation_tag_highest; idx++)
+    {
+               site = vm_allocation_sites[idx];
+               if (!site) continue;
+               info[idx].mapped = site->mapped;
+               info[idx].tag    = site->tag;
+        if (!iterated)
+        {
+                       info[idx].size = site->total;
+#if DEBUG || DEVELOPMENT
+                       info[idx].peak = site->peak;
+#endif /* DEBUG || DEVELOPMENT */
+        }
+        else
+        {
+                       if (!site->subtotalscount && (site->total != info[idx].size))
+                       {
+                           printf("tag mismatch[%d] 0x%qx, iter 0x%qx\n", idx, site->total, info[idx].size);
+                           info[idx].size = site->total;
+                       }
+        }
+    }
+
+    nextinfo = (vm_allocation_tag_highest + 1);
+    count    = nextinfo;
+    if (count >= num_info) count = num_info;
+
+    for (idx = 0; idx < count; idx++)
+    {
+               site = vm_allocation_sites[idx];
+               if (!site) continue;
+               info[idx].flags |= VM_KERN_SITE_WIRED;
+               if (idx < VM_KERN_MEMORY_FIRST_DYNAMIC)
+               {
+                   info[idx].site   = idx;
+                   info[idx].flags |= VM_KERN_SITE_TAG;
+                   if (VM_KERN_MEMORY_ZONE == idx)
+                   {
+                               info[idx].flags |= VM_KERN_SITE_HIDE;
+                               info[idx].flags &= ~VM_KERN_SITE_WIRED;
+                               info[idx].collectable_bytes = zones_collectable_bytes;
+                       }
+               }
+               else if ((namelen = (VM_TAG_NAME_LEN_MAX & (site->flags >> VM_TAG_NAME_LEN_SHIFT))))
+               {
+                   info[idx].site   = 0;
+                   info[idx].flags |= VM_KERN_SITE_NAMED;
+                   if (namelen > sizeof(info[idx].name)) namelen = sizeof(info[idx].name);
+                   strncpy(&info[idx].name[0], KA_NAME(site), namelen);
+               }
+               else if (VM_TAG_KMOD & site->flags)
+               {
+                   info[idx].site   = OSKextGetKmodIDForSite(site, NULL, 0);
+                   info[idx].flags |= VM_KERN_SITE_KMOD;
+               }
+               else
+               {
+                   info[idx].site   = VM_KERNEL_UNSLIDE(site);
+                   info[idx].flags |= VM_KERN_SITE_KERNEL;
+               }
+#if VM_MAX_TAG_ZONES
+               vm_allocation_zone_total_t * zone;
+               unsigned int                 zidx;
+               vm_size_t                    elem_size;
+
+        if (vm_allocation_zone_totals
+                       && (zone = vm_allocation_zone_totals[idx])
+                       && (nextinfo < num_info))
+        {
+            for (zidx = 0; zidx < VM_MAX_TAG_ZONES; zidx++)
+            {
+                if (!zone[zidx].peak)              continue;
+                               info[nextinfo]                   = info[idx];
+                               info[nextinfo].zone              = zone_index_from_tag_index(zidx, &elem_size);
+                               info[nextinfo].flags            &= ~VM_KERN_SITE_WIRED;
+                               info[nextinfo].flags            |= VM_KERN_SITE_ZONE;
+                               info[nextinfo].size              = zone[zidx].total;
+                               info[nextinfo].peak              = zone[zidx].peak;
+                               info[nextinfo].mapped            = 0;
+                if (zone[zidx].wastediv)
+                               {
+                                       info[nextinfo].collectable_bytes = ((zone[zidx].waste * zone[zidx].total / elem_size) / zone[zidx].wastediv);
+                               }
+                nextinfo++;
+             }
+        }
+#endif /* VM_MAX_TAG_ZONES */
+        if (site->subtotalscount)
+        {
+                       uint64_t mapped, mapcost, take;
+                       uint32_t sub;
+                       vm_tag_t alloctag;
+
+            info[idx].size = site->total;
+            mapped = info[idx].size;
+            info[idx].mapped = mapped;
+            mapcost = 0;
+            for (sub = 0; sub < site->subtotalscount; sub++)
+            {
+                               alloctag = site->subtotals[sub].tag;
+                               assert(alloctag < num_info);
+                               if (info[alloctag].name[0]) continue;
+                               take = info[alloctag].mapped;
+                               if (take > info[alloctag].size) take = info[alloctag].size;
+                               if (take > mapped) take = mapped;
+                               info[alloctag].mapped  -= take;
+                               info[alloctag].size    -= take;
+                               mapped                 -= take;
+                               mapcost                += take;
+            }
+            info[idx].size = mapcost;
+        }
+       }
+       lck_spin_unlock(&vm_allocation_sites_lock);
+
+    return (0);
+}
+
+uint32_t
+vm_page_diagnose_estimate(void)
+{
+    vm_allocation_site_t * site;
+    uint32_t               count;
+    uint32_t               idx;
+
+       lck_spin_lock(&vm_allocation_sites_lock);
+       for (count = idx = 0; idx < VM_MAX_TAG_VALUE; idx++)
+    {
+               site = vm_allocation_sites[idx];
+               if (!site) continue;
+               count++;
+#if VM_MAX_TAG_ZONES
+               if (vm_allocation_zone_totals)
+               {
+                       vm_allocation_zone_total_t * zone;
+                       zone = vm_allocation_zone_totals[idx];
+                       if (!zone) continue;
+                       for (uint32_t zidx = 0; zidx < VM_MAX_TAG_ZONES; zidx++) if (zone[zidx].peak) count++;
+               }
+#endif
+    }
+       lck_spin_unlock(&vm_allocation_sites_lock);
+
+    /* some slop for new tags created */
+    count += 8;
+    count += VM_KERN_COUNTER_COUNT;
+
+    return (count);
+}
+
+
+kern_return_t 
+vm_page_diagnose(mach_memory_info_t * info, unsigned int num_info, uint64_t zones_collectable_bytes)
+{
+    uint64_t                wired_size;
+    uint64_t                wired_managed_size;
+    uint64_t                wired_reserved_size;
+    uint64_t                booter_size;
+    boolean_t                iterate;
+    mach_memory_info_t     * counts;
+
+    bzero(info, num_info * sizeof(mach_memory_info_t));
+
+    if (!vm_page_wire_count_initial) return (KERN_ABORTED);
+
+#if CONFIG_EMBEDDED
+    wired_size         = ptoa_64(vm_page_wire_count);
+    wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count);
+#else
+    wired_size          = ptoa_64(vm_page_wire_count + vm_lopage_free_count + vm_page_throttled_count);
+    wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count + vm_page_throttled_count);
+#endif
+    wired_managed_size  = ptoa_64(vm_page_wire_count - vm_page_wire_count_initial);
+
+    booter_size = ml_get_booter_memory_size();
+    wired_size += booter_size;
+
+    assert(num_info >= VM_KERN_COUNTER_COUNT);
+    num_info -= VM_KERN_COUNTER_COUNT;
+    counts = &info[num_info];
+
+#define SET_COUNT(xcount, xsize, xflags)                       \
+    counts[xcount].tag   = VM_MAX_TAG_VALUE + xcount;   \
+    counts[xcount].site  = (xcount);                           \
+    counts[xcount].size  = (xsize);                                \
+    counts[xcount].mapped  = (xsize);                          \
+    counts[xcount].flags = VM_KERN_SITE_COUNTER | xflags;
+
+    SET_COUNT(VM_KERN_COUNT_MANAGED,             ptoa_64(vm_page_pages),        0);
+    SET_COUNT(VM_KERN_COUNT_WIRED,               wired_size,                    0);
+    SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED,       wired_managed_size,            0);
+    SET_COUNT(VM_KERN_COUNT_RESERVED,            wired_reserved_size,           VM_KERN_SITE_WIRED);
+    SET_COUNT(VM_KERN_COUNT_STOLEN,              ptoa_64(vm_page_stolen_count), VM_KERN_SITE_WIRED);
+    SET_COUNT(VM_KERN_COUNT_LOPAGE,              ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED);
+    SET_COUNT(VM_KERN_COUNT_WIRED_BOOT,                  ptoa_64(vm_page_wire_count_on_boot), 0);
+    SET_COUNT(VM_KERN_COUNT_BOOT_STOLEN,         booter_size,                   VM_KERN_SITE_WIRED);
+
+#define SET_MAP(xcount, xsize, xfree, xlargest)        \
+    counts[xcount].site    = (xcount);                 \
+    counts[xcount].size    = (xsize);                  \
+    counts[xcount].mapped  = (xsize);                  \
+    counts[xcount].free    = (xfree);                  \
+    counts[xcount].largest = (xlargest);               \
+    counts[xcount].flags   = VM_KERN_SITE_COUNTER;
+
+    vm_map_size_t map_size, map_free, map_largest;
+
+    vm_map_sizes(kernel_map, &map_size, &map_free, &map_largest);
+    SET_MAP(VM_KERN_COUNT_MAP_KERNEL, map_size, map_free, map_largest);
+
+    vm_map_sizes(zone_map, &map_size, &map_free, &map_largest);
+    SET_MAP(VM_KERN_COUNT_MAP_ZONE, map_size, map_free, map_largest);
+
+    vm_map_sizes(kalloc_map, &map_size, &map_free, &map_largest);
+    SET_MAP(VM_KERN_COUNT_MAP_KALLOC, map_size, map_free, map_largest);
+
+    iterate = !VM_TAG_ACTIVE_UPDATE;
+    if (iterate)
+       {
+               enum                       { kMaxKernelDepth = 1 };
+               vm_map_t                     maps   [kMaxKernelDepth];
+               vm_map_entry_t               entries[kMaxKernelDepth];
+               vm_map_t                     map;
+               vm_map_entry_t               entry;
+               vm_object_offset_t           offset;
+               vm_page_t                    page;
+               int                          stackIdx, count;
+
+           vm_page_iterate_objects(info, num_info, &vm_page_count_object);
+
+           map = kernel_map;
+           stackIdx = 0;
+           while (map)
+           {
+                       vm_map_lock(map);
+                       for (entry = map->hdr.links.next; map; entry = entry->links.next)
+                       {
+                           if (entry->is_sub_map)
+                           {
+                                       assert(stackIdx < kMaxKernelDepth);
+                                       maps[stackIdx] = map;
+                                       entries[stackIdx] = entry;
+                                       stackIdx++;
+                                       map = VME_SUBMAP(entry);
+                                       entry = NULL;
+                                       break;
+                           }
+                           if (VME_OBJECT(entry) == kernel_object)
+                           {
+                                       count = 0;
+                                       vm_object_lock(VME_OBJECT(entry));
+                                       for (offset = entry->links.start; offset < entry->links.end; offset += page_size)
+                                       {
+                                               page = vm_page_lookup(VME_OBJECT(entry), offset);
+                                               if (page && VM_PAGE_WIRED(page)) count++;
+                                       }
+                                       vm_object_unlock(VME_OBJECT(entry));
+
+                                       if (count)
+                                       {
+                                           assert(VME_ALIAS(entry) != VM_KERN_MEMORY_NONE);
+                                           assert(VME_ALIAS(entry) < num_info);
+                                           info[VME_ALIAS(entry)].size += ptoa_64(count);
+                                       }
+                           }
+                           while (map && (entry == vm_map_last_entry(map)))
+                           {
+                                       vm_map_unlock(map);
+                                       if (!stackIdx) map = NULL;
+                                       else
+                                       {
+                                           --stackIdx;
+                                           map = maps[stackIdx];
+                                           entry = entries[stackIdx];
+                                       }
+                           }
+                       }
+           }
+    }
+
+    process_account(info, num_info, zones_collectable_bytes, iterate);
+    
+    return (KERN_SUCCESS);
+}
+
+#if DEBUG || DEVELOPMENT
+
+kern_return_t
+vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_size_t * zone_size)
+{
+    kern_return_t  ret;
+    vm_size_t      zsize;
+    vm_map_t       map;
+    vm_map_entry_t entry;
+
+    zsize = zone_element_info((void *) addr, tag);
+    if (zsize)
+    {
+               *zone_size = *size = zsize;
+               return (KERN_SUCCESS);
+    }
+
+       *zone_size = 0;
+    ret = KERN_INVALID_ADDRESS;
+    for (map = kernel_map; map; )
+       {
+               vm_map_lock(map);
+               if (!vm_map_lookup_entry(map, addr, &entry)) break;
+           if (entry->is_sub_map)
+           {
+            if (map != kernel_map)                   break;
+                       map = VME_SUBMAP(entry);
+                       continue;
+           }
+               if (entry->vme_start != addr)                break;
+               *tag = VME_ALIAS(entry);
+               *size = (entry->vme_end - addr);
+               ret = KERN_SUCCESS;
+               break;
+       }
+       if (map != kernel_map) vm_map_unlock(map);
+       vm_map_unlock(kernel_map);
+
+       return (ret);
+}
+
+#endif /* DEBUG || DEVELOPMENT */
+
+uint32_t
+vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen)
+{
+    vm_allocation_site_t * site;
+    uint32_t               kmodId;
+
+    kmodId = 0;
+    lck_spin_lock(&vm_allocation_sites_lock);
+    if ((site = vm_allocation_sites[tag]))
+    {
+        if (VM_TAG_KMOD & site->flags)
+        {
+            kmodId = OSKextGetKmodIDForSite(site, name, namelen);
+        }
+    }
+    lck_spin_unlock(&vm_allocation_sites_lock);
+
+    return (kmodId);
+}