]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_resident.c
xnu-3248.30.4.tar.gz
[apple/xnu.git] / osfmk / vm / vm_resident.c
index ca2ad7744586919144ec3b9b7f7e035b43cda7bd..25c1edb266ec449eae17e8cb2f0579ba9956b958 100644 (file)
@@ -1,14 +1,19 @@
 /*
- * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ * 
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
  * 
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
@@ -18,7 +23,7 @@
  * Please see the License for the specific language governing rights and
  * limitations under the License.
  * 
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
  */
 
 #include <debug.h>
+#include <libkern/OSAtomic.h>
+#include <libkern/OSDebug.h>
 
 #include <mach/clock_types.h>
 #include <mach/vm_prot.h>
 #include <mach/vm_statistics.h>
+#include <mach/sdt.h>
 #include <kern/counters.h>
 #include <kern/sched_prim.h>
 #include <kern/task.h>
 #include <kern/thread.h>
+#include <kern/kalloc.h>
 #include <kern/zalloc.h>
 #include <kern/xpr.h>
+#include <kern/ledger.h>
 #include <vm/pmap.h>
 #include <vm/vm_init.h>
 #include <vm/vm_map.h>
 #include <vm/vm_kern.h>                        /* kernel_memory_allocate() */
 #include <kern/misc_protos.h>
 #include <zone_debug.h>
+#include <mach_debug/zone_info.h>
 #include <vm/cpm.h>
-#include <ppc/mappings.h>              /* (BRINGUP) */
-#include <pexpert/pexpert.h>   /* (BRINGUP) */
+#include <pexpert/pexpert.h>
 
 #include <vm/vm_protos.h>
+#include <vm/memory_object.h>
+#include <vm/vm_purgeable_internal.h>
+#include <vm/vm_compressor.h>
 
-/*     Variables used to indicate the relative age of pages in the
- *     inactive list
- */
+#if CONFIG_PHANTOM_CACHE
+#include <vm/vm_phantom_cache.h>
+#endif
+
+#include <IOKit/IOHibernatePrivate.h>
+
+#include <sys/kdebug.h>
+
+boolean_t      hibernate_cleaning_in_progress = FALSE;
+boolean_t      vm_page_free_verify = TRUE;
+
+uint32_t       vm_lopage_free_count = 0;
+uint32_t       vm_lopage_free_limit = 0;
+uint32_t       vm_lopage_lowater    = 0;
+boolean_t      vm_lopage_refill = FALSE;
+boolean_t      vm_lopage_needed = FALSE;
+
+lck_mtx_ext_t  vm_page_queue_lock_ext;
+lck_mtx_ext_t  vm_page_queue_free_lock_ext;
+lck_mtx_ext_t  vm_purgeable_queue_lock_ext;
+
+int            speculative_age_index = 0;
+int            speculative_steal_index = 0;
+struct vm_speculative_age_q vm_page_queue_speculative[VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1];
+
+
+__private_extern__ void                vm_page_init_lck_grp(void);
+
+static void            vm_page_free_prepare(vm_page_t  page);
+static vm_page_t       vm_page_grab_fictitious_common(ppnum_t phys_addr);
+
+static void vm_tag_init(void);
+
+uint64_t       vm_min_kernel_and_kext_address = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
 
-unsigned int   vm_page_ticket_roll = 0;
-unsigned int   vm_page_ticket = 0;
 /*
  *     Associated with page of user-allocatable memory is a
  *     page structure.
@@ -101,7 +143,7 @@ unsigned int        vm_page_ticket = 0;
 
 vm_offset_t virtual_space_start;
 vm_offset_t virtual_space_end;
-int    vm_page_pages;
+uint32_t       vm_page_pages;
 
 /*
  *     The vm_page_lookup() routine, which provides for fast
@@ -112,22 +154,36 @@ int       vm_page_pages;
  *     or VP, table.]
  */
 typedef struct {
-       vm_page_t       pages;
+       vm_page_packed_t page_list;
 #if    MACH_PAGE_HASH_STATS
        int             cur_count;              /* current count */
        int             hi_count;               /* high water mark */
 #endif /* MACH_PAGE_HASH_STATS */
 } vm_page_bucket_t;
 
+
+#define BUCKETS_PER_LOCK       16
+
 vm_page_bucket_t *vm_page_buckets;             /* Array of buckets */
 unsigned int   vm_page_bucket_count = 0;       /* How big is array? */
 unsigned int   vm_page_hash_mask;              /* Mask for hash function */
 unsigned int   vm_page_hash_shift;             /* Shift for hash function */
-uint32_t               vm_page_bucket_hash;    /* Basic bucket hash */
-decl_simple_lock_data(,vm_page_bucket_lock)
+uint32_t       vm_page_bucket_hash;            /* Basic bucket hash */
+unsigned int   vm_page_bucket_lock_count = 0;          /* How big is array of locks? */
 
-vm_page_t
-vm_page_lookup_nohint(vm_object_t object, vm_object_offset_t offset);
+lck_spin_t     *vm_page_bucket_locks;
+lck_spin_t     vm_objects_wired_lock;
+lck_spin_t     vm_allocation_sites_lock;
+
+#if VM_PAGE_BUCKETS_CHECK
+boolean_t vm_page_buckets_check_ready = FALSE;
+#if VM_PAGE_FAKE_BUCKETS
+vm_page_bucket_t *vm_page_fake_buckets;        /* decoy buckets */
+vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
+extern int not_in_kdp;
 
 
 #if    MACH_PAGE_HASH_STATS
@@ -178,7 +234,7 @@ hash_debug(void)
  */
 vm_size_t      page_size  = PAGE_SIZE;
 vm_size_t      page_mask  = PAGE_MASK;
-int                    page_shift = PAGE_SHIFT;
+int            page_shift = PAGE_SHIFT;
 
 /*
  *     Resident page structures are initialized from
@@ -190,18 +246,25 @@ int                       page_shift = PAGE_SHIFT;
  */
 struct vm_page vm_page_template;
 
+vm_page_t      vm_pages = VM_PAGE_NULL;
+unsigned int   vm_pages_count = 0;
+ppnum_t                vm_page_lowest = 0;
+
 /*
  *     Resident pages that represent real memory
- *     are allocated from a free list.
+ *     are allocated from a set of free lists,
+ *     one per color.
  */
-vm_page_t      vm_page_queue_free;
-vm_page_t       vm_page_queue_fictitious;
+unsigned int   vm_colors;
+unsigned int    vm_color_mask;                 /* mask is == (vm_colors-1) */
+unsigned int   vm_cache_geometry_colors = 0;   /* set by hw dependent code during startup */
+unsigned int   vm_free_magazine_refill_limit = 0;
+queue_head_t   vm_page_queue_free[MAX_COLORS];
 unsigned int   vm_page_free_wanted;
+unsigned int   vm_page_free_wanted_privileged;
 unsigned int   vm_page_free_count;
 unsigned int   vm_page_fictitious_count;
 
-unsigned int   vm_page_free_count_minimum;     /* debugging */
-
 /*
  *     Occasionally, the virtual memory system uses
  *     resident page structures that do not refer to
@@ -212,16 +275,37 @@ unsigned int      vm_page_free_count_minimum;     /* debugging */
  *     most other kernel structures are.
  */
 zone_t vm_page_zone;
-decl_mutex_data(,vm_page_alloc_lock)
+vm_locks_array_t vm_page_locks;
+decl_lck_mtx_data(,vm_page_alloc_lock)
+lck_mtx_ext_t vm_page_alloc_lock_ext;
+
 unsigned int io_throttle_zero_fill;
 
+unsigned int   vm_page_local_q_count = 0;
+unsigned int   vm_page_local_q_soft_limit = 250;
+unsigned int   vm_page_local_q_hard_limit = 500;
+struct vplq     *vm_page_local_q = NULL;
+
+/* N.B. Guard and fictitious pages must not
+ * be assigned a zero phys_page value.
+ */
 /*
  *     Fictitious pages don't have a physical address,
  *     but we must initialize phys_page to something.
  *     For debugging, this should be a strange value
  *     that the pmap module can recognize in assertions.
  */
-vm_offset_t vm_page_fictitious_addr = (vm_offset_t) -1;
+ppnum_t vm_page_fictitious_addr = (ppnum_t) -1;
+
+/*
+ *     Guard pages are not accessible so they don't
+ *     need a physical address, but we need to enter
+ *     one in the pmap.
+ *     Let's make it recognizable and make sure that
+ *     we don't use a real physical page with that
+ *     physical address.
+ */
+ppnum_t vm_page_guard_addr = (ppnum_t) -2;
 
 /*
  *     Resident page structures are also chained on
@@ -229,22 +313,57 @@ vm_offset_t vm_page_fictitious_addr = (vm_offset_t) -1;
  *     system (pageout daemon).  These queues are
  *     defined here, but are shared by the pageout
  *     module.  The inactive queue is broken into 
- *     inactive and zf for convenience as the 
+ *     file backed and anonymous for convenience as the 
  *     pageout daemon often assignes a higher 
- *     affinity to zf pages
+ *     importance to anonymous pages (less likely to pick)
  */
 queue_head_t   vm_page_queue_active;
 queue_head_t   vm_page_queue_inactive;
+queue_head_t   vm_page_queue_anonymous;        /* inactive memory queue for anonymous pages */
+queue_head_t   vm_page_queue_throttled;
+
+queue_head_t   vm_objects_wired;
+
 unsigned int   vm_page_active_count;
 unsigned int   vm_page_inactive_count;
+unsigned int   vm_page_anonymous_count;
+unsigned int   vm_page_throttled_count;
+unsigned int   vm_page_speculative_count;
+
 unsigned int   vm_page_wire_count;
+unsigned int   vm_page_stolen_count;
+unsigned int   vm_page_wire_count_initial;
+unsigned int   vm_page_pages_initial;
 unsigned int   vm_page_gobble_count = 0;
-unsigned int   vm_page_wire_count_warning = 0;
-unsigned int   vm_page_gobble_count_warning = 0;
+
+#define        VM_PAGE_WIRE_COUNT_WARNING      0
+#define VM_PAGE_GOBBLE_COUNT_WARNING   0
 
 unsigned int   vm_page_purgeable_count = 0; /* # of pages purgeable now */
+unsigned int   vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */
 uint64_t       vm_page_purged_count = 0;    /* total count of purged pages */
 
+unsigned int   vm_page_xpmapped_external_count = 0;
+unsigned int   vm_page_external_count = 0;
+unsigned int   vm_page_internal_count = 0;
+unsigned int   vm_page_pageable_external_count = 0;
+unsigned int   vm_page_pageable_internal_count = 0;
+
+#if DEVELOPMENT || DEBUG
+unsigned int   vm_page_speculative_recreated = 0;
+unsigned int   vm_page_speculative_created = 0;
+unsigned int   vm_page_speculative_used = 0;
+#endif
+
+queue_head_t    vm_page_queue_cleaned;
+
+unsigned int   vm_page_cleaned_count = 0;
+unsigned int   vm_pageout_enqueued_cleaned = 0;
+
+uint64_t       max_valid_dma_address = 0xffffffffffffffffULL;
+ppnum_t                max_valid_low_ppnum = 0xffffffff;
+
+
 /*
  *     Several page replacement parameters are also
  *     shared with this module, so that page allocation
@@ -253,9 +372,13 @@ uint64_t   vm_page_purged_count = 0;    /* total count of purged pages */
  */
 unsigned int   vm_page_free_target = 0;
 unsigned int   vm_page_free_min = 0;
+unsigned int   vm_page_throttle_limit = 0;
 unsigned int   vm_page_inactive_target = 0;
+unsigned int   vm_page_anonymous_min = 0;
+unsigned int   vm_page_inactive_min = 0;
 unsigned int   vm_page_free_reserved = 0;
-unsigned int   vm_page_throttled_count = 0;
+unsigned int   vm_page_throttle_count = 0;
+
 
 /*
  *     The VM system has a couple of heuristics for deciding
@@ -267,6 +390,8 @@ unsigned int        vm_page_throttled_count = 0;
 
 boolean_t vm_page_deactivate_hint = TRUE;
 
+struct vm_page_stats_reusable vm_page_stats_reusable;
+       
 /*
  *     vm_set_page_size:
  *
@@ -279,7 +404,9 @@ boolean_t vm_page_deactivate_hint = TRUE;
 void
 vm_set_page_size(void)
 {
-       page_mask = page_size - 1;
+       page_size  = PAGE_SIZE;
+       page_mask  = PAGE_MASK;
+       page_shift = PAGE_SHIFT;
 
        if ((page_mask & page_size) != 0)
                panic("vm_set_page_size: page size not a power of two");
@@ -289,6 +416,99 @@ vm_set_page_size(void)
                        break;
 }
 
+#define COLOR_GROUPS_TO_STEAL  4
+
+
+/* Called once during statup, once the cache geometry is known.
+ */
+static void
+vm_page_set_colors( void )
+{
+       unsigned int    n, override;
+       
+       if ( PE_parse_boot_argn("colors", &override, sizeof (override)) )               /* colors specified as a boot-arg? */
+               n = override;   
+       else if ( vm_cache_geometry_colors )                    /* do we know what the cache geometry is? */
+               n = vm_cache_geometry_colors;
+       else    n = DEFAULT_COLORS;                             /* use default if all else fails */
+
+       if ( n == 0 )
+               n = 1;
+       if ( n > MAX_COLORS )
+               n = MAX_COLORS;
+               
+       /* the count must be a power of 2  */
+       if ( ( n & (n - 1)) != 0  )
+               panic("vm_page_set_colors");
+       
+       vm_colors = n;
+       vm_color_mask = n - 1;
+
+       vm_free_magazine_refill_limit = vm_colors * COLOR_GROUPS_TO_STEAL;
+}
+
+
+lck_grp_t              vm_page_lck_grp_free;
+lck_grp_t              vm_page_lck_grp_queue;
+lck_grp_t              vm_page_lck_grp_local;
+lck_grp_t              vm_page_lck_grp_purge;
+lck_grp_t              vm_page_lck_grp_alloc;
+lck_grp_t              vm_page_lck_grp_bucket;
+lck_grp_attr_t         vm_page_lck_grp_attr;
+lck_attr_t             vm_page_lck_attr;
+
+
+__private_extern__ void
+vm_page_init_lck_grp(void)
+{
+       /*
+        * initialze the vm_page lock world
+        */
+       lck_grp_attr_setdefault(&vm_page_lck_grp_attr);
+       lck_grp_init(&vm_page_lck_grp_free, "vm_page_free", &vm_page_lck_grp_attr);
+       lck_grp_init(&vm_page_lck_grp_queue, "vm_page_queue", &vm_page_lck_grp_attr);
+       lck_grp_init(&vm_page_lck_grp_local, "vm_page_queue_local", &vm_page_lck_grp_attr);
+       lck_grp_init(&vm_page_lck_grp_purge, "vm_page_purge", &vm_page_lck_grp_attr);
+       lck_grp_init(&vm_page_lck_grp_alloc, "vm_page_alloc", &vm_page_lck_grp_attr);
+       lck_grp_init(&vm_page_lck_grp_bucket, "vm_page_bucket", &vm_page_lck_grp_attr);
+       lck_attr_setdefault(&vm_page_lck_attr);
+       lck_mtx_init_ext(&vm_page_alloc_lock, &vm_page_alloc_lock_ext, &vm_page_lck_grp_alloc, &vm_page_lck_attr);
+
+       vm_compressor_init_locks();
+}
+
+void
+vm_page_init_local_q()
+{
+       unsigned int            num_cpus;
+       unsigned int            i;
+       struct vplq             *t_local_q;
+
+       num_cpus = ml_get_max_cpus();
+
+       /*
+        * no point in this for a uni-processor system
+        */
+       if (num_cpus >= 2) {
+               t_local_q = (struct vplq *)kalloc(num_cpus * sizeof(struct vplq));
+
+               for (i = 0; i < num_cpus; i++) {
+                       struct vpl      *lq;
+
+                       lq = &t_local_q[i].vpl_un.vpl;
+                       VPL_LOCK_INIT(lq, &vm_page_lck_grp_local, &vm_page_lck_attr);
+                       queue_init(&lq->vpl_queue);
+                       lq->vpl_count = 0;
+                       lq->vpl_internal_count = 0;
+                       lq->vpl_external_count = 0;
+               }
+               vm_page_local_q_count = num_cpus;
+
+               vm_page_local_q = (struct vplq *)t_local_q;
+       }
+}
+
+
 /*
  *     vm_page_bootstrap:
  *
@@ -316,69 +536,120 @@ vm_page_bootstrap(
         */
 
        m = &vm_page_template;
-       m->object = VM_OBJECT_NULL;             /* reset later */
-       m->offset = (vm_object_offset_t) -1;    /* reset later */
-       m->wire_count = 0;
+       bzero(m, sizeof (*m));
 
        m->pageq.next = NULL;
        m->pageq.prev = NULL;
        m->listq.next = NULL;
        m->listq.prev = NULL;
+       m->next_m = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
+
+       m->object = VM_OBJECT_NULL;             /* reset later */
+       m->offset = (vm_object_offset_t) -1;    /* reset later */
 
+       m->wire_count = 0;
+       m->local = FALSE;
        m->inactive = FALSE;
        m->active = FALSE;
+       m->pageout_queue = FALSE;
+       m->speculative = FALSE;
        m->laundry = FALSE;
        m->free = FALSE;
-       m->no_isync = TRUE;
        m->reference = FALSE;
-       m->pageout = FALSE;
-       m->dump_cleaning = FALSE;
-       m->list_req_pending = FALSE;
+       m->gobbled = FALSE;
+       m->private = FALSE;
+       m->throttled = FALSE;
+       m->__unused_pageq_bits = 0;
+
+       m->phys_page = 0;               /* reset later */
 
        m->busy = TRUE;
        m->wanted = FALSE;
        m->tabled = FALSE;
+       m->hashed = FALSE;
        m->fictitious = FALSE;
-       m->private = FALSE;
+       m->pmapped = FALSE;
+       m->wpmapped = FALSE;
+       m->pageout = FALSE;
        m->absent = FALSE;
        m->error = FALSE;
        m->dirty = FALSE;
        m->cleaning = FALSE;
        m->precious = FALSE;
        m->clustered = FALSE;
-       m->lock_supplied = FALSE;
-       m->unusual = FALSE;
+       m->overwriting = FALSE;
        m->restart = FALSE;
-       m->zero_fill = FALSE;
+       m->unusual = FALSE;
        m->encrypted = FALSE;
-
-       m->phys_page = 0;               /* reset later */
-
-       m->page_lock = VM_PROT_NONE;
-       m->unlock_request = VM_PROT_NONE;
-       m->page_error = KERN_SUCCESS;
+       m->encrypted_cleaning = FALSE;
+       m->cs_validated = FALSE;
+       m->cs_tainted = FALSE;
+       m->cs_nx = FALSE;
+       m->no_cache = FALSE;
+       m->reusable = FALSE;
+       m->slid = FALSE;
+       m->xpmapped = FALSE;
+       m->compressor = FALSE;
+       m->written_by_kernel = FALSE;
+       m->__unused_object_bits = 0;
 
        /*
         *      Initialize the page queues.
         */
-
-       mutex_init(&vm_page_queue_free_lock, 0);
-       mutex_init(&vm_page_queue_lock, 0);
-
-       vm_page_queue_free = VM_PAGE_NULL;
-       vm_page_queue_fictitious = VM_PAGE_NULL;
+       vm_page_init_lck_grp();
+       
+       lck_mtx_init_ext(&vm_page_queue_free_lock, &vm_page_queue_free_lock_ext, &vm_page_lck_grp_free, &vm_page_lck_attr);
+       lck_mtx_init_ext(&vm_page_queue_lock, &vm_page_queue_lock_ext, &vm_page_lck_grp_queue, &vm_page_lck_attr);
+       lck_mtx_init_ext(&vm_purgeable_queue_lock, &vm_purgeable_queue_lock_ext, &vm_page_lck_grp_purge, &vm_page_lck_attr);
+    
+       for (i = 0; i < PURGEABLE_Q_TYPE_MAX; i++) {
+               int group;
+
+               purgeable_queues[i].token_q_head = 0;
+               purgeable_queues[i].token_q_tail = 0;
+               for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
+                       queue_init(&purgeable_queues[i].objq[group]);
+
+               purgeable_queues[i].type = i;
+               purgeable_queues[i].new_pages = 0;
+#if MACH_ASSERT
+               purgeable_queues[i].debug_count_tokens = 0;
+               purgeable_queues[i].debug_count_objects = 0;
+#endif
+       };
+       purgeable_nonvolatile_count = 0;
+       queue_init(&purgeable_nonvolatile_queue);
+    
+       for (i = 0; i < MAX_COLORS; i++ )
+               queue_init(&vm_page_queue_free[i]);
+
+       queue_init(&vm_lopage_queue_free);
        queue_init(&vm_page_queue_active);
        queue_init(&vm_page_queue_inactive);
-       queue_init(&vm_page_queue_zf);
+       queue_init(&vm_page_queue_cleaned);
+       queue_init(&vm_page_queue_throttled);
+       queue_init(&vm_page_queue_anonymous);
+       queue_init(&vm_objects_wired);
 
+       for ( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ ) {
+               queue_init(&vm_page_queue_speculative[i].age_q);
+
+               vm_page_queue_speculative[i].age_ts.tv_sec = 0;
+               vm_page_queue_speculative[i].age_ts.tv_nsec = 0;
+       }
        vm_page_free_wanted = 0;
+       vm_page_free_wanted_privileged = 0;
+       
+       vm_page_set_colors();
+
 
        /*
         *      Steal memory for the map and zone subsystems.
         */
-
-       vm_map_steal_memory();
+       kernel_debug_string_simple("zone_steal_memory");
        zone_steal_memory();
+       kernel_debug_string_simple("vm_map_steal_memory");
+       vm_map_steal_memory();
 
        /*
         *      Allocate (and initialize) the virtual-to-physical
@@ -390,8 +661,6 @@ vm_page_bootstrap(
         *      than the number of physical pages in the system.
         */
 
-       simple_lock_init(&vm_page_bucket_lock, 0);
-       
        if (vm_page_bucket_count == 0) {
                unsigned int npages = pmap_free_pages();
 
@@ -399,6 +668,7 @@ vm_page_bootstrap(
                while (vm_page_bucket_count < npages)
                        vm_page_bucket_count <<= 1;
        }
+       vm_page_bucket_lock_count = (vm_page_bucket_count + BUCKETS_PER_LOCK - 1) / BUCKETS_PER_LOCK;
 
        vm_page_hash_mask = vm_page_bucket_count - 1;
 
@@ -424,20 +694,61 @@ vm_page_bootstrap(
        if (vm_page_hash_mask & vm_page_bucket_count)
                printf("vm_page_bootstrap: WARNING -- strange page hash\n");
 
+#if VM_PAGE_BUCKETS_CHECK
+#if VM_PAGE_FAKE_BUCKETS
+       /*
+        * Allocate a decoy set of page buckets, to detect
+        * any stomping there.
+        */
+       vm_page_fake_buckets = (vm_page_bucket_t *)
+               pmap_steal_memory(vm_page_bucket_count *
+                                 sizeof(vm_page_bucket_t));
+       vm_page_fake_buckets_start = (vm_map_offset_t) vm_page_fake_buckets;
+       vm_page_fake_buckets_end =
+               vm_map_round_page((vm_page_fake_buckets_start +
+                                  (vm_page_bucket_count *
+                                   sizeof (vm_page_bucket_t))),
+                                 PAGE_MASK);
+       char *cp;
+       for (cp = (char *)vm_page_fake_buckets_start;
+            cp < (char *)vm_page_fake_buckets_end;
+            cp++) {
+               *cp = 0x5a;
+       }
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
+       kernel_debug_string_simple("vm_page_buckets");
        vm_page_buckets = (vm_page_bucket_t *)
                pmap_steal_memory(vm_page_bucket_count *
                                  sizeof(vm_page_bucket_t));
 
+       kernel_debug_string_simple("vm_page_bucket_locks");
+       vm_page_bucket_locks = (lck_spin_t *)
+               pmap_steal_memory(vm_page_bucket_lock_count *
+                                 sizeof(lck_spin_t));
+
        for (i = 0; i < vm_page_bucket_count; i++) {
                register vm_page_bucket_t *bucket = &vm_page_buckets[i];
 
-               bucket->pages = VM_PAGE_NULL;
+               bucket->page_list = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
 #if     MACH_PAGE_HASH_STATS
                bucket->cur_count = 0;
                bucket->hi_count = 0;
 #endif /* MACH_PAGE_HASH_STATS */
        }
 
+       for (i = 0; i < vm_page_bucket_lock_count; i++)
+               lck_spin_init(&vm_page_bucket_locks[i], &vm_page_lck_grp_bucket, &vm_page_lck_attr);
+
+       lck_spin_init(&vm_objects_wired_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
+       lck_spin_init(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
+       vm_tag_init();
+
+#if VM_PAGE_BUCKETS_CHECK
+       vm_page_buckets_check_ready = TRUE;
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
        /*
         *      Machine-dependent code allocates the resident page table.
         *      It uses vm_page_init to initialize the page frames.
@@ -446,6 +757,7 @@ vm_page_bootstrap(
         *      to get the alignment right.
         */
 
+       kernel_debug_string_simple("pmap_startup");
        pmap_startup(&virtual_space_start, &virtual_space_end);
        virtual_space_start = round_page(virtual_space_start);
        virtual_space_end = trunc_page(virtual_space_end);
@@ -460,11 +772,15 @@ vm_page_bootstrap(
         *      wired, they nonetheless can't be moved. At this moment,
         *      all VM managed pages are "free", courtesy of pmap_startup.
         */
-       vm_page_wire_count = atop_64(max_mem) - vm_page_free_count;     /* initial value */
+       assert((unsigned int) atop_64(max_mem) == atop_64(max_mem));
+       vm_page_wire_count = ((unsigned int) atop_64(max_mem)) - vm_page_free_count - vm_lopage_free_count;     /* initial value */
+       vm_page_wire_count_initial = vm_page_wire_count;
+       vm_page_pages_initial = vm_page_pages;
 
-       printf("vm_page_bootstrap: %d free pages\n", vm_page_free_count);
-       vm_page_free_count_minimum = vm_page_free_count;
+       printf("vm_page_bootstrap: %d free pages and %d wired pages\n",
+              vm_page_free_count, vm_page_wire_count);
 
+       kernel_debug_string_simple("vm_page_bootstrap complete");
        simple_lock_init(&vm_paging_lock, 0);
 }
 
@@ -511,7 +827,7 @@ pmap_steal_memory(
        addr = virtual_space_start;
        virtual_space_start += size;
 
-       kprintf("pmap_steal_memory: %08X - %08X; size=%08X\n", addr, virtual_space_start, size);        /* (TEST/DEBUG) */
+       //kprintf("pmap_steal_memory: %08lX - %08lX; size=%08lX\n", (long)addr, (long)virtual_space_start, (long)size); /* (TEST/DEBUG) */
 
        /*
         *      Allocate and map physical pages to back new virtual pages.
@@ -520,60 +836,120 @@ pmap_steal_memory(
        for (vaddr = round_page(addr);
             vaddr < addr + size;
             vaddr += PAGE_SIZE) {
-               if (!pmap_next_page(&phys_page))
+
+               if (!pmap_next_page_hi(&phys_page))
                        panic("pmap_steal_memory");
 
                /*
                 *      XXX Logically, these mappings should be wired,
                 *      but some pmap modules barf if they are.
                 */
+#if defined(__LP64__)
+               pmap_pre_expand(kernel_pmap, vaddr);
+#endif
 
                pmap_enter(kernel_pmap, vaddr, phys_page,
-                          VM_PROT_READ|VM_PROT_WRITE, 
+                          VM_PROT_READ|VM_PROT_WRITE, VM_PROT_NONE,
                                VM_WIMG_USE_DEFAULT, FALSE);
                /*
                 * Account for newly stolen memory
                 */
                vm_page_wire_count++;
-
+               vm_page_stolen_count++;
        }
 
        return (void *) addr;
 }
 
+void vm_page_release_startup(vm_page_t mem);
 void
 pmap_startup(
        vm_offset_t *startp,
        vm_offset_t *endp)
 {
        unsigned int i, npages, pages_initialized, fill, fillval;
-       vm_page_t       pages;
        ppnum_t         phys_page;
        addr64_t        tmpaddr;
 
+
+#if    defined(__LP64__)
+       /*
+        * struct vm_page must be of size 64 due to VM_PAGE_PACK_PTR use
+        */
+       assert(sizeof(struct vm_page) == 64);
+
+       /*
+        * make sure we are aligned on a 64 byte boundary
+        * for VM_PAGE_PACK_PTR (it clips off the low-order
+        * 6 bits of the pointer)
+        */
+       if (virtual_space_start != virtual_space_end)
+               virtual_space_start = round_page(virtual_space_start);
+#endif
+
        /*
         *      We calculate how many page frames we will have
         *      and then allocate the page structures in one chunk.
         */
 
        tmpaddr = (addr64_t)pmap_free_pages() * (addr64_t)PAGE_SIZE;    /* Get the amount of memory left */
-       tmpaddr = tmpaddr + (addr64_t)(round_page_32(virtual_space_start) - virtual_space_start);       /* Account for any slop */
-       npages = (unsigned int)(tmpaddr / (addr64_t)(PAGE_SIZE + sizeof(*pages)));      /* Figure size of all vm_page_ts, including enough to hold the vm_page_ts */
+       tmpaddr = tmpaddr + (addr64_t)(round_page(virtual_space_start) - virtual_space_start);  /* Account for any slop */
+       npages = (unsigned int)(tmpaddr / (addr64_t)(PAGE_SIZE + sizeof(*vm_pages)));   /* Figure size of all vm_page_ts, including enough to hold the vm_page_ts */
 
-       pages = (vm_page_t) pmap_steal_memory(npages * sizeof *pages);
+       vm_pages = (vm_page_t) pmap_steal_memory(npages * sizeof *vm_pages);
 
        /*
         *      Initialize the page frames.
         */
-
+       kernel_debug_string_simple("Initialize the page frames");
        for (i = 0, pages_initialized = 0; i < npages; i++) {
                if (!pmap_next_page(&phys_page))
                        break;
+               if (pages_initialized == 0 || phys_page < vm_page_lowest)
+                       vm_page_lowest = phys_page;
 
-               vm_page_init(&pages[i], phys_page);
+               vm_page_init(&vm_pages[i], phys_page, FALSE);
                vm_page_pages++;
                pages_initialized++;
        }
+       vm_pages_count = pages_initialized;
+
+#if    defined(__LP64__)
+
+       if (VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[0])) != &vm_pages[0])
+               panic("VM_PAGE_PACK_PTR failed on &vm_pages[0] - %p", (void *)&vm_pages[0]);
+
+       if (VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[vm_pages_count-1])) != &vm_pages[vm_pages_count-1])
+               panic("VM_PAGE_PACK_PTR failed on &vm_pages[vm_pages_count-1] - %p", (void *)&vm_pages[vm_pages_count-1]);
+#endif
+       kernel_debug_string_simple("page fill/release");
+       /*
+        * Check if we want to initialize pages to a known value
+        */
+       fill = 0;                                                               /* Assume no fill */
+       if (PE_parse_boot_argn("fill", &fillval, sizeof (fillval))) fill = 1;                   /* Set fill */
+#if    DEBUG
+       /* This slows down booting the DEBUG kernel, particularly on
+        * large memory systems, but is worthwhile in deterministically
+        * trapping uninitialized memory usage.
+        */
+       if (fill == 0) {
+               fill = 1;
+               fillval = 0xDEB8F177;
+       }
+#endif
+       if (fill)
+               kprintf("Filling vm_pages with pattern: 0x%x\n", fillval);
+       // -debug code remove
+       if (2 == vm_himemory_mode) {
+               // free low -> high so high is preferred
+               for (i = 1; i <= pages_initialized; i++) {
+                       if(fill) fillPage(vm_pages[i - 1].phys_page, fillval);          /* Fill the page with a know value if requested at boot */                      
+                       vm_page_release_startup(&vm_pages[i - 1]);
+               }
+       }
+       else
+       // debug code remove-
 
        /*
         * Release pages in reverse order so that physical pages
@@ -581,44 +957,45 @@ pmap_startup(
         * the devices (which must address physical memory) happy if
         * they require several consecutive pages.
         */
-
-/*
- *             Check if we want to initialize pages to a known value
- */
-       
-       fill = 0;                                                                                                       /* Assume no fill */
-       if (PE_parse_boot_arg("fill", &fillval)) fill = 1;                      /* Set fill */
-       
        for (i = pages_initialized; i > 0; i--) {
-               if(fill) fillPage(pages[i - 1].phys_page, fillval);             /* Fill the page with a know value if requested at boot */                      
-               vm_page_release(&pages[i - 1]);
+               if(fill) fillPage(vm_pages[i - 1].phys_page, fillval);          /* Fill the page with a know value if requested at boot */                      
+               vm_page_release_startup(&vm_pages[i - 1]);
        }
 
+       VM_CHECK_MEMORYSTATUS;
+       
 #if 0
        {
                vm_page_t xx, xxo, xxl;
-               int j, k, l;
+               int i, j, k, l;
        
                j = 0;                                                                                                  /* (BRINGUP) */
                xxl = 0;
                
-               for(xx = vm_page_queue_free; xx; xxl = xx, xx = xx->pageq.next) {       /* (BRINGUP) */
-                       j++;                                                                                            /* (BRINGUP) */
-                       if(j > vm_page_free_count) {                                            /* (BRINGUP) */
-                               panic("pmap_startup: too many pages, xx = %08X, xxl = %08X\n", xx, xxl);
-                       }
-                       
-                       l = vm_page_free_count - j;                                                     /* (BRINGUP) */
-                       k = 0;                                                                                          /* (BRINGUP) */
-                       
-                       if(((j - 1) & 0xFFFF) == 0) kprintf("checking number %d of %d\n", j, vm_page_free_count);
-
-                       for(xxo = xx->pageq.next; xxo; xxo = xxo->pageq.next) { /* (BRINGUP) */
-                               k++;
-                               if(k > l) panic("pmap_startup: too many in secondary check %d %d\n", k, l);
-                               if((xx->phys_page & 0xFFFFFFFF) == (xxo->phys_page & 0xFFFFFFFF)) {     /* (BRINGUP) */
-                                       panic("pmap_startup: duplicate physaddr, xx = %08X, xxo = %08X\n", xx, xxo);
+               for( i = 0; i < vm_colors; i++ ) {
+                       queue_iterate(&vm_page_queue_free[i],
+                                     xx,
+                                     vm_page_t,
+                                     pageq) {  /* BRINGUP */
+                               j++;                                                                                            /* (BRINGUP) */
+                               if(j > vm_page_free_count) {                                            /* (BRINGUP) */
+                                       panic("pmap_startup: too many pages, xx = %08X, xxl = %08X\n", xx, xxl);
+                               }
+                               
+                               l = vm_page_free_count - j;                                                     /* (BRINGUP) */
+                               k = 0;                                                                                          /* (BRINGUP) */
+                               
+                               if(((j - 1) & 0xFFFF) == 0) kprintf("checking number %d of %d\n", j, vm_page_free_count);
+
+                               for(xxo = xx->pageq.next; xxo != &vm_page_queue_free[i]; xxo = xxo->pageq.next) {       /* (BRINGUP) */
+                                       k++;
+                                       if(k > l) panic("pmap_startup: too many in secondary check %d %d\n", k, l);
+                                       if((xx->phys_page & 0xFFFFFFFF) == (xxo->phys_page & 0xFFFFFFFF)) {     /* (BRINGUP) */
+                                               panic("pmap_startup: duplicate physaddr, xx = %08X, xxo = %08X\n", xx, xxo);
+                                       }
                                }
+
+                               xxl = xx;
                        }
                }
                
@@ -634,7 +1011,7 @@ pmap_startup(
         *      because pmap_steal_memory has been using it.
         */
 
-       virtual_space_start = round_page_32(virtual_space_start);
+       virtual_space_start = round_page(virtual_space_start);
 
        *startp = virtual_space_start;
        *endp = virtual_space_end;
@@ -650,6 +1027,7 @@ pmap_startup(
 void
 vm_page_module_init(void)
 {
+       uint64_t vm_page_zone_pages, vm_page_zone_data_size;
        vm_page_zone = zinit((vm_size_t) sizeof(struct vm_page),
                             0, PAGE_SIZE, "vm pages");
 
@@ -657,18 +1035,23 @@ vm_page_module_init(void)
        zone_debug_disable(vm_page_zone);
 #endif /* ZONE_DEBUG */
 
+       zone_change(vm_page_zone, Z_CALLERACCT, FALSE);
        zone_change(vm_page_zone, Z_EXPAND, FALSE);
        zone_change(vm_page_zone, Z_EXHAUST, TRUE);
        zone_change(vm_page_zone, Z_FOREIGN, TRUE);
-
-        /*
-         * Adjust zone statistics to account for the real pages allocated
-         * in vm_page_create(). [Q: is this really what we want?]
-         */
-        vm_page_zone->count += vm_page_pages;
-        vm_page_zone->cur_size += vm_page_pages * vm_page_zone->elem_size;
-
-       mutex_init(&vm_page_alloc_lock, 0);
+       zone_change(vm_page_zone, Z_GZALLOC_EXEMPT, TRUE);
+       /*
+        * Adjust zone statistics to account for the real pages allocated
+        * in vm_page_create(). [Q: is this really what we want?]
+        */
+       vm_page_zone->count += vm_page_pages;
+       vm_page_zone->sum_count += vm_page_pages;
+       vm_page_zone_data_size = vm_page_pages * vm_page_zone->elem_size;
+       vm_page_zone->cur_size += vm_page_zone_data_size;
+       vm_page_zone_pages = ((round_page(vm_page_zone_data_size)) / PAGE_SIZE);
+       OSAddAtomic64(vm_page_zone_pages, &(vm_page_zone->page_count));
+       /* since zone accounts for these, take them out of stolen */
+       VM_PAGE_MOVE_STOLEN(vm_page_zone_pages);
 }
 
 /*
@@ -691,11 +1074,13 @@ vm_page_create(
        for (phys_page = start;
             phys_page < end;
             phys_page++) {
-               while ((m = (vm_page_t) vm_page_grab_fictitious())
+               while ((m = (vm_page_t) vm_page_grab_fictitious_common(phys_page))
                        == VM_PAGE_NULL)
                        vm_page_more_fictitious();
 
-               vm_page_init(m, phys_page);
+               m->fictitious = FALSE;
+               pmap_clear_noencrypt(phys_page);
+
                vm_page_pages++;
                vm_page_release(m);
        }
@@ -709,9 +1094,10 @@ vm_page_create(
  *     NOTE:   The bucket count must be a power of 2
  */
 #define vm_page_hash(object, offset) (\
-       ( (natural_t)((uint32_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
+       ( (natural_t)((uintptr_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
         & vm_page_hash_mask)
 
+
 /*
  *     vm_page_insert:         [ internal use only ]
  *
@@ -720,62 +1106,131 @@ vm_page_create(
  *
  *     The object must be locked.
  */
-
 void
 vm_page_insert(
-       register vm_page_t              mem,
-       register vm_object_t            object,
-       register vm_object_offset_t     offset)
+       vm_page_t               mem,
+       vm_object_t             object,
+       vm_object_offset_t      offset)
+{
+       vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, FALSE, FALSE, NULL);
+}
+
+void
+vm_page_insert_wired(
+       vm_page_t               mem,
+       vm_object_t             object,
+       vm_object_offset_t      offset,
+       vm_tag_t                tag)
+{
+       vm_page_insert_internal(mem, object, offset, tag, FALSE, TRUE, FALSE, FALSE, NULL);
+}
+
+void
+vm_page_insert_internal(
+       vm_page_t               mem,
+       vm_object_t             object,
+       vm_object_offset_t      offset,
+       vm_tag_t                tag,
+       boolean_t               queues_lock_held,
+       boolean_t               insert_in_hash,
+       boolean_t               batch_pmap_op,
+        boolean_t               batch_accounting,
+       uint64_t                *delayed_ledger_update)
 {
-       register vm_page_bucket_t *bucket;
+       vm_page_bucket_t        *bucket;
+       lck_spin_t              *bucket_lock;
+       int                     hash_id;
+       task_t                  owner;
 
         XPR(XPR_VM_PAGE,
                 "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n",
-                (integer_t)object, (integer_t)offset, (integer_t)mem, 0,0);
-
+                object, offset, mem, 0,0);
+#if 0
+       /*
+        * we may not hold the page queue lock
+        * so this check isn't safe to make
+        */
        VM_PAGE_CHECK(mem);
-#if DEBUG
-       _mutex_assert(&object->Lock, MA_OWNED);
+#endif
 
-       if (mem->tabled || mem->object != VM_OBJECT_NULL)
-               panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
-                     "already in (obj=%p,off=0x%llx)",
-                     mem, object, offset, mem->object, mem->offset);
+       assert(page_aligned(offset));
+
+       assert(!VM_PAGE_WIRED(mem) || mem->private || mem->fictitious || (tag != VM_KERN_MEMORY_NONE));
+
+       /* the vm_submap_object is only a placeholder for submaps */
+       assert(object != vm_submap_object);
+
+       vm_object_lock_assert_exclusive(object);
+#if DEBUG
+       lck_mtx_assert(&vm_page_queue_lock,
+                      queues_lock_held ? LCK_MTX_ASSERT_OWNED
+                                       : LCK_MTX_ASSERT_NOTOWNED);
+#endif /* DEBUG */
+
+       if (insert_in_hash == TRUE) {
+#if DEBUG || VM_PAGE_CHECK_BUCKETS
+               if (mem->tabled || mem->object != VM_OBJECT_NULL)
+                       panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
+                             "already in (obj=%p,off=0x%llx)",
+                             mem, object, offset, mem->object, mem->offset);
 #endif
-       assert(!object->internal || offset < object->size);
+               assert(!object->internal || offset < object->vo_size);
 
-       /* only insert "pageout" pages into "pageout" objects,
-        * and normal pages into normal objects */
-       assert(object->pageout == mem->pageout);
+               /* only insert "pageout" pages into "pageout" objects,
+                * and normal pages into normal objects */
+#if 00
+               /*
+                * For some reason, this assertion gets tripped
+                * but it's mostly harmless, so let's disable it
+                * for now.
+                */
+               assert(object->pageout == mem->pageout);
+#endif /* 00 */
 
-       assert(vm_page_lookup(object, offset) == VM_PAGE_NULL);
+               assert(vm_page_lookup(object, offset) == VM_PAGE_NULL);
+               
+               /*
+                *      Record the object/offset pair in this page
+                */
 
-       /*
-        *      Record the object/offset pair in this page
-        */
+               mem->object = object;
+               mem->offset = offset;
 
-       mem->object = object;
-       mem->offset = offset;
+               /*
+                *      Insert it into the object_object/offset hash table
+                */
+               hash_id = vm_page_hash(object, offset);
+               bucket = &vm_page_buckets[hash_id];
+               bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
+       
+               lck_spin_lock(bucket_lock);
 
-       /*
-        *      Insert it into the object_object/offset hash table
-        */
+               mem->next_m = bucket->page_list;
+               bucket->page_list = VM_PAGE_PACK_PTR(mem);
+               assert(mem == VM_PAGE_UNPACK_PTR(bucket->page_list));
 
-       bucket = &vm_page_buckets[vm_page_hash(object, offset)];
-       simple_lock(&vm_page_bucket_lock);
-       mem->next = bucket->pages;
-       bucket->pages = mem;
 #if     MACH_PAGE_HASH_STATS
-       if (++bucket->cur_count > bucket->hi_count)
-               bucket->hi_count = bucket->cur_count;
+               if (++bucket->cur_count > bucket->hi_count)
+                       bucket->hi_count = bucket->cur_count;
 #endif /* MACH_PAGE_HASH_STATS */
-       simple_unlock(&vm_page_bucket_lock);
+               mem->hashed = TRUE;
+               lck_spin_unlock(bucket_lock);
+       }
 
+       {       
+               unsigned int    cache_attr;
+
+               cache_attr = object->wimg_bits & VM_WIMG_MASK;
+
+               if (cache_attr != VM_WIMG_USE_DEFAULT) {
+                       PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op);
+               }
+       }
        /*
         *      Now link into the object's list of backed pages.
         */
-
-       VM_PAGE_INSERT(mem, object);
+       queue_enter(&object->memq, mem, vm_page_t, listq);
+       object->memq_hint = mem;
        mem->tabled = TRUE;
 
        /*
@@ -783,13 +1238,118 @@ vm_page_insert(
         */
 
        object->resident_page_count++;
+       if (VM_PAGE_WIRED(mem)) {
+           if (!mem->private && !mem->fictitious) 
+           {
+               if (!object->wired_page_count)
+               {
+                   assert(VM_KERN_MEMORY_NONE != tag);
+                   object->wire_tag = tag;
+                   VM_OBJECT_WIRED(object);
+               }
+           }
+           object->wired_page_count++;
+       }
+       assert(object->resident_page_count >= object->wired_page_count);
 
-       if (object->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
-           object->purgable == VM_OBJECT_PURGABLE_EMPTY) {
-               vm_page_lock_queues();
-               vm_page_purgeable_count++;
-               vm_page_unlock_queues();
+        if (batch_accounting == FALSE) {
+               if (object->internal) {
+                       OSAddAtomic(1, &vm_page_internal_count);
+               } else {
+                       OSAddAtomic(1, &vm_page_external_count);
+               }
+       }
+
+       /*
+        * It wouldn't make sense to insert a "reusable" page in
+        * an object (the page would have been marked "reusable" only
+        * at the time of a madvise(MADV_FREE_REUSABLE) if it was already
+        * in the object at that time).
+        * But a page could be inserted in a "all_reusable" object, if
+        * something faults it in (a vm_read() from another task or a
+        * "use-after-free" issue in user space, for example).  It can
+        * also happen if we're relocating a page from that object to
+        * a different physical page during a physically-contiguous
+        * allocation.
+        */
+       assert(!mem->reusable);
+       if (mem->object->all_reusable) {
+               OSAddAtomic(+1, &vm_page_stats_reusable.reusable_count);
+       }
+
+       if (object->purgable == VM_PURGABLE_DENY) {
+               owner = TASK_NULL;
+       } else {
+               owner = object->vo_purgeable_owner;
+       }
+       if (owner &&
+           (object->purgable == VM_PURGABLE_NONVOLATILE ||
+            VM_PAGE_WIRED(mem))) {
+
+               if (delayed_ledger_update)
+                       *delayed_ledger_update += PAGE_SIZE;
+               else {
+                       /* more non-volatile bytes */
+                       ledger_credit(owner->ledger,
+                                     task_ledgers.purgeable_nonvolatile,
+                                     PAGE_SIZE);
+                       /* more footprint */
+                       ledger_credit(owner->ledger,
+                                     task_ledgers.phys_footprint,
+                                     PAGE_SIZE);
+               }
+
+       } else if (owner &&
+                  (object->purgable == VM_PURGABLE_VOLATILE ||
+                   object->purgable == VM_PURGABLE_EMPTY)) {
+               assert(! VM_PAGE_WIRED(mem));
+               /* more volatile bytes */
+               ledger_credit(owner->ledger,
+                             task_ledgers.purgeable_volatile,
+                             PAGE_SIZE);
+       }
+
+       if (object->purgable == VM_PURGABLE_VOLATILE) {
+               if (VM_PAGE_WIRED(mem)) {
+                       OSAddAtomic(+1, &vm_page_purgeable_wired_count);
+               } else {
+                       OSAddAtomic(+1, &vm_page_purgeable_count);
+               }
+       } else if (object->purgable == VM_PURGABLE_EMPTY &&
+                  mem->throttled) {
+               /*
+                * This page belongs to a purged VM object but hasn't
+                * been purged (because it was "busy").
+                * It's in the "throttled" queue and hence not
+                * visible to vm_pageout_scan().  Move it to a pageable
+                * queue, so that it can eventually be reclaimed, instead
+                * of lingering in the "empty" object.
+                */
+               if (queues_lock_held == FALSE)
+                       vm_page_lockspin_queues();
+               vm_page_deactivate(mem);
+               if (queues_lock_held == FALSE)
+                       vm_page_unlock_queues();
        }
+
+#if VM_OBJECT_TRACKING_OP_MODIFIED
+       if (vm_object_tracking_inited &&
+           object->internal &&
+           object->resident_page_count == 0 &&
+           object->pager == NULL &&
+           object->shadow != NULL &&
+           object->shadow->copy == object) {
+               void *bt[VM_OBJECT_TRACKING_BTDEPTH];
+               int numsaved = 0;
+
+               numsaved =OSBacktrace(bt, VM_OBJECT_TRACKING_BTDEPTH);
+               btlog_add_entry(vm_object_tracking_btlog,
+                               object,
+                               VM_OBJECT_TRACKING_OP_MODIFIED,
+                               bt,
+                               numsaved);
+       }
+#endif /* VM_OBJECT_TRACKING_OP_MODIFIED */
 }
 
 /*
@@ -798,26 +1358,33 @@ vm_page_insert(
  *     Exactly like vm_page_insert, except that we first
  *     remove any existing page at the given offset in object.
  *
- *     The object and page queues must be locked.
+ *     The object must be locked.
  */
-
 void
 vm_page_replace(
        register vm_page_t              mem,
        register vm_object_t            object,
        register vm_object_offset_t     offset)
 {
-       register vm_page_bucket_t *bucket;
+       vm_page_bucket_t *bucket;
+       vm_page_t        found_m = VM_PAGE_NULL;
+       lck_spin_t      *bucket_lock;
+       int             hash_id;
 
+#if 0
+       /*
+        * we don't hold the page queue lock
+        * so this check isn't safe to make
+        */
        VM_PAGE_CHECK(mem);
-#if DEBUG
-       _mutex_assert(&object->Lock, MA_OWNED);
-       _mutex_assert(&vm_page_queue_lock, MA_OWNED);
-
+#endif
+       vm_object_lock_assert_exclusive(object);
+#if DEBUG || VM_PAGE_CHECK_BUCKETS
        if (mem->tabled || mem->object != VM_OBJECT_NULL)
                panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
                      "already in (obj=%p,off=0x%llx)",
                      mem, object, offset, mem->object, mem->offset);
+       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
 #endif
        /*
         *      Record the object/offset pair in this page
@@ -831,66 +1398,51 @@ vm_page_replace(
         *      replacing any page that might have been there.
         */
 
-       bucket = &vm_page_buckets[vm_page_hash(object, offset)];
-       simple_lock(&vm_page_bucket_lock);
-       if (bucket->pages) {
-               vm_page_t *mp = &bucket->pages;
-               register vm_page_t m = *mp;
+       hash_id = vm_page_hash(object, offset);
+       bucket = &vm_page_buckets[hash_id];
+       bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
+
+       lck_spin_lock(bucket_lock);
+
+       if (bucket->page_list) {
+               vm_page_packed_t *mp = &bucket->page_list;
+               vm_page_t m = VM_PAGE_UNPACK_PTR(*mp);
+
                do {
                        if (m->object == object && m->offset == offset) {
                                /*
-                                * Remove page from bucket and from object,
-                                * and return it to the free list.
-                                */
-                               *mp = m->next;
-                               VM_PAGE_REMOVE(m);
-                               m->tabled = FALSE;
-                               m->object = VM_OBJECT_NULL;
-                               m->offset = (vm_object_offset_t) -1;
-                               object->resident_page_count--;
-
-                               if (object->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
-                                   object->purgable == VM_OBJECT_PURGABLE_EMPTY) {
-                                       assert(vm_page_purgeable_count > 0);
-                                       vm_page_purgeable_count--;
-                               }
-                                       
-                               /*
-                                * Return page to the free list.
-                                * Note the page is not tabled now, so this
-                                * won't self-deadlock on the bucket lock.
+                                * Remove old page from hash list
                                 */
+                               *mp = m->next_m;
+                               m->hashed = FALSE;
 
-                               vm_page_free(m);
+                               found_m = m;
                                break;
                        }
-                       mp = &m->next;
-               } while ((m = *mp));
-               mem->next = bucket->pages;
+                       mp = &m->next_m;
+               } while ((m = VM_PAGE_UNPACK_PTR(*mp)));
+
+               mem->next_m = bucket->page_list;
        } else {
-               mem->next = VM_PAGE_NULL;
+               mem->next_m = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
        }
-       bucket->pages = mem;
-       simple_unlock(&vm_page_bucket_lock);
-
        /*
-        *      Now link into the object's list of backed pages.
+        * insert new page at head of hash list
         */
+       bucket->page_list = VM_PAGE_PACK_PTR(mem);
+       mem->hashed = TRUE;
 
-       VM_PAGE_INSERT(mem, object);
-       mem->tabled = TRUE;
-
-       /*
-        *      And show that the object has one more resident
-        *      page.
-        */
+       lck_spin_unlock(bucket_lock);
 
-       object->resident_page_count++;
-
-       if (object->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
-           object->purgable == VM_OBJECT_PURGABLE_EMPTY) {
-               vm_page_purgeable_count++;
+       if (found_m) {
+               /*
+                * there was already a page at the specified
+                * offset for this object... remove it from
+                * the object and free it back to the free list
+                */
+               vm_page_free_unlocked(found_m, FALSE);
        }
+       vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, FALSE, FALSE, FALSE, NULL);
 }
 
 /*
@@ -899,77 +1451,164 @@ vm_page_replace(
  *     Removes the given mem entry from the object/offset-page
  *     table and the object page list.
  *
- *     The object and page queues must be locked.
+ *     The object must be locked.
  */
 
 void
 vm_page_remove(
-       register vm_page_t      mem)
+       vm_page_t       mem,
+       boolean_t       remove_from_hash)
 {
-       register vm_page_bucket_t       *bucket;
-       register vm_page_t      this;
+       vm_page_bucket_t *bucket;
+       vm_page_t       this;
+       lck_spin_t      *bucket_lock;
+       int             hash_id;
+       task_t          owner;
 
         XPR(XPR_VM_PAGE,
                 "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n",
-                (integer_t)mem->object, (integer_t)mem->offset, 
-               (integer_t)mem, 0,0);
-#if DEBUG
-       _mutex_assert(&vm_page_queue_lock, MA_OWNED);
-       _mutex_assert(&mem->object->Lock, MA_OWNED);
-#endif
+                mem->object, mem->offset, 
+               mem, 0,0);
+
+       vm_object_lock_assert_exclusive(mem->object);
        assert(mem->tabled);
        assert(!mem->cleaning);
-       VM_PAGE_CHECK(mem);
-
-
+       assert(!mem->laundry);
+#if 0
        /*
-        *      Remove from the object_object/offset hash table
+        * we don't hold the page queue lock
+        * so this check isn't safe to make
         */
+       VM_PAGE_CHECK(mem);
+#endif
+       if (remove_from_hash == TRUE) {
+               /*
+                *      Remove from the object_object/offset hash table
+                */
+               hash_id = vm_page_hash(mem->object, mem->offset);
+               bucket = &vm_page_buckets[hash_id];
+               bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
 
-       bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
-       simple_lock(&vm_page_bucket_lock);
-       if ((this = bucket->pages) == mem) {
-               /* optimize for common case */
+               lck_spin_lock(bucket_lock);
 
-               bucket->pages = mem->next;
-       } else {
-               register vm_page_t      *prev;
+               if ((this = VM_PAGE_UNPACK_PTR(bucket->page_list)) == mem) {
+                       /* optimize for common case */
 
-               for (prev = &this->next;
-                    (this = *prev) != mem;
-                    prev = &this->next)
-                       continue;
-               *prev = this->next;
-       }
+                       bucket->page_list = mem->next_m;
+               } else {
+                       vm_page_packed_t        *prev;
+
+                       for (prev = &this->next_m;
+                            (this = VM_PAGE_UNPACK_PTR(*prev)) != mem;
+                            prev = &this->next_m)
+                               continue;
+                       *prev = this->next_m;
+               }
 #if     MACH_PAGE_HASH_STATS
-       bucket->cur_count--;
+               bucket->cur_count--;
 #endif /* MACH_PAGE_HASH_STATS */
-       simple_unlock(&vm_page_bucket_lock);
-
+               mem->hashed = FALSE;
+               lck_spin_unlock(bucket_lock);
+       }
        /*
         *      Now remove from the object's list of backed pages.
         */
 
-       VM_PAGE_REMOVE(mem);
+       vm_page_remove_internal(mem);
 
        /*
         *      And show that the object has one fewer resident
         *      page.
         */
 
+       assert(mem->object->resident_page_count > 0);
        mem->object->resident_page_count--;
 
-       if (mem->object->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
-           mem->object->purgable == VM_OBJECT_PURGABLE_EMPTY) {
-               assert(vm_page_purgeable_count > 0);
-               vm_page_purgeable_count--;
+       if (mem->object->internal) {
+#if DEBUG
+               assert(vm_page_internal_count);
+#endif /* DEBUG */
+
+               OSAddAtomic(-1, &vm_page_internal_count);
+       } else {
+               assert(vm_page_external_count);
+               OSAddAtomic(-1, &vm_page_external_count);
+
+               if (mem->xpmapped) {
+                       assert(vm_page_xpmapped_external_count);
+                       OSAddAtomic(-1, &vm_page_xpmapped_external_count);
+               }
+       }
+       if (!mem->object->internal && (mem->object->objq.next || mem->object->objq.prev)) {
+               if (mem->object->resident_page_count == 0)
+                       vm_object_cache_remove(mem->object);
+       }
+
+       if (VM_PAGE_WIRED(mem)) {
+               assert(mem->object->wired_page_count > 0);
+               mem->object->wired_page_count--;
+               if (!mem->object->wired_page_count) {
+                   VM_OBJECT_UNWIRED(mem->object);
+               }
+       }
+       assert(mem->object->resident_page_count >=
+              mem->object->wired_page_count);
+       if (mem->reusable) {
+               assert(mem->object->reusable_page_count > 0);
+               mem->object->reusable_page_count--;
+               assert(mem->object->reusable_page_count <=
+                      mem->object->resident_page_count);
+               mem->reusable = FALSE;
+               OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
+               vm_page_stats_reusable.reused_remove++;
+       } else if (mem->object->all_reusable) {
+               OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
+               vm_page_stats_reusable.reused_remove++;
+       }
+
+       if (mem->object->purgable == VM_PURGABLE_DENY) {
+               owner = TASK_NULL;
+       } else {
+               owner = mem->object->vo_purgeable_owner;
+       }
+       if (owner &&
+           (mem->object->purgable == VM_PURGABLE_NONVOLATILE ||
+            VM_PAGE_WIRED(mem))) {
+               /* less non-volatile bytes */
+               ledger_debit(owner->ledger,
+                            task_ledgers.purgeable_nonvolatile,
+                            PAGE_SIZE);
+               /* less footprint */
+               ledger_debit(owner->ledger,
+                            task_ledgers.phys_footprint,
+                            PAGE_SIZE);
+       } else if (owner &&
+                  (mem->object->purgable == VM_PURGABLE_VOLATILE ||
+                   mem->object->purgable == VM_PURGABLE_EMPTY)) {
+               assert(! VM_PAGE_WIRED(mem));
+               /* less volatile bytes */
+               ledger_debit(owner->ledger,
+                            task_ledgers.purgeable_volatile,
+                            PAGE_SIZE);
        }
+       if (mem->object->purgable == VM_PURGABLE_VOLATILE) {
+               if (VM_PAGE_WIRED(mem)) {
+                       assert(vm_page_purgeable_wired_count > 0);
+                       OSAddAtomic(-1, &vm_page_purgeable_wired_count);
+               } else {
+                       assert(vm_page_purgeable_count > 0);
+                       OSAddAtomic(-1, &vm_page_purgeable_count);
+               }
+       }
+       if (mem->object->set_cache_attr == TRUE)
+               pmap_set_cache_attributes(mem->phys_page, 0);
 
        mem->tabled = FALSE;
        mem->object = VM_OBJECT_NULL;
        mem->offset = (vm_object_offset_t) -1;
 }
 
+
 /*
  *     vm_page_lookup:
  *
@@ -979,110 +1618,208 @@ vm_page_remove(
  *     The object must be locked.  No side effects.
  */
 
-unsigned long vm_page_lookup_hint = 0;
-unsigned long vm_page_lookup_hint_next = 0;
-unsigned long vm_page_lookup_hint_prev = 0;
-unsigned long vm_page_lookup_hint_miss = 0;
+#define        VM_PAGE_HASH_LOOKUP_THRESHOLD   10
+
+#if DEBUG_VM_PAGE_LOOKUP
+
+struct {
+       uint64_t        vpl_total;
+       uint64_t        vpl_empty_obj;
+       uint64_t        vpl_bucket_NULL;
+       uint64_t        vpl_hit_hint;
+       uint64_t        vpl_hit_hint_next;
+       uint64_t        vpl_hit_hint_prev;
+       uint64_t        vpl_fast;
+       uint64_t        vpl_slow;
+       uint64_t        vpl_hit;
+       uint64_t        vpl_miss;
+
+       uint64_t        vpl_fast_elapsed;
+       uint64_t        vpl_slow_elapsed;
+} vm_page_lookup_stats __attribute__((aligned(8)));
+
+#endif
+
+#define        KDP_VM_PAGE_WALK_MAX    1000
+
+vm_page_t
+kdp_vm_page_lookup(
+       vm_object_t             object,
+       vm_object_offset_t      offset)
+{
+       vm_page_t cur_page;
+       int num_traversed = 0;
+
+       if (not_in_kdp) {
+               panic("panic: kdp_vm_page_lookup done outside of kernel debugger");
+       }
+
+       queue_iterate(&object->memq, cur_page, vm_page_t, listq) {
+               if (cur_page->offset == offset) {
+                       return cur_page;
+               }
+               num_traversed++;
+
+               if (num_traversed >= KDP_VM_PAGE_WALK_MAX) {
+                       return VM_PAGE_NULL;
+               }
+       }
+
+       return VM_PAGE_NULL;
+}
 
 vm_page_t
 vm_page_lookup(
-       register vm_object_t            object,
-       register vm_object_offset_t     offset)
+       vm_object_t             object,
+       vm_object_offset_t      offset)
 {
-       register vm_page_t      mem;
-       register vm_page_bucket_t *bucket;
-       queue_entry_t           qe;
-#if 0
-       _mutex_assert(&object->Lock, MA_OWNED);
+       vm_page_t       mem;
+       vm_page_bucket_t *bucket;
+       queue_entry_t   qe;
+       lck_spin_t      *bucket_lock = NULL;
+       int             hash_id;
+#if DEBUG_VM_PAGE_LOOKUP
+       uint64_t        start, elapsed;
+
+       OSAddAtomic64(1, &vm_page_lookup_stats.vpl_total);
+#endif
+       vm_object_lock_assert_held(object);
+
+       if (object->resident_page_count == 0) {
+#if DEBUG_VM_PAGE_LOOKUP
+               OSAddAtomic64(1, &vm_page_lookup_stats.vpl_empty_obj);
 #endif
+               return (VM_PAGE_NULL);
+       }
 
        mem = object->memq_hint;
+
        if (mem != VM_PAGE_NULL) {
                assert(mem->object == object);
+
                if (mem->offset == offset) {
-                       vm_page_lookup_hint++;
-                       return mem;
+#if DEBUG_VM_PAGE_LOOKUP
+                       OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint);
+#endif
+                       return (mem);
                }
                qe = queue_next(&mem->listq);
+
                if (! queue_end(&object->memq, qe)) {
                        vm_page_t       next_page;
 
                        next_page = (vm_page_t) qe;
                        assert(next_page->object == object);
+
                        if (next_page->offset == offset) {
-                               vm_page_lookup_hint_next++;
                                object->memq_hint = next_page; /* new hint */
-                               return next_page;
+#if DEBUG_VM_PAGE_LOOKUP
+                               OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_next);
+#endif
+                               return (next_page);
                        }
                }
                qe = queue_prev(&mem->listq);
+
                if (! queue_end(&object->memq, qe)) {
                        vm_page_t prev_page;
 
                        prev_page = (vm_page_t) qe;
                        assert(prev_page->object == object);
+
                        if (prev_page->offset == offset) {
-                               vm_page_lookup_hint_prev++;
                                object->memq_hint = prev_page; /* new hint */
-                               return prev_page;
+#if DEBUG_VM_PAGE_LOOKUP
+                               OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_prev);
+#endif
+                               return (prev_page);
                        }
                }
        }
+       /*
+        * Search the hash table for this object/offset pair
+        */
+       hash_id = vm_page_hash(object, offset);
+       bucket = &vm_page_buckets[hash_id];
 
        /*
-        *      Search the hash table for this object/offset pair
+        * since we hold the object lock, we are guaranteed that no
+        * new pages can be inserted into this object... this in turn
+        * guarantess that the page we're looking for can't exist
+        * if the bucket it hashes to is currently NULL even when looked
+        * at outside the scope of the hash bucket lock... this is a 
+        * really cheap optimiztion to avoid taking the lock
         */
+       if (!bucket->page_list) {
+#if DEBUG_VM_PAGE_LOOKUP
+               OSAddAtomic64(1, &vm_page_lookup_stats.vpl_bucket_NULL);
+#endif
+               return (VM_PAGE_NULL);
+       }
 
-       bucket = &vm_page_buckets[vm_page_hash(object, offset)];
+#if DEBUG_VM_PAGE_LOOKUP
+       start = mach_absolute_time();
+#endif
+       if (object->resident_page_count <= VM_PAGE_HASH_LOOKUP_THRESHOLD) {
+               /*
+                * on average, it's roughly 3 times faster to run a short memq list
+                * than to take the spin lock and go through the hash list
+                */
+               mem = (vm_page_t)queue_first(&object->memq);
 
-       simple_lock(&vm_page_bucket_lock);
-       for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
-               VM_PAGE_CHECK(mem);
-               if ((mem->object == object) && (mem->offset == offset))
-                       break;
-       }
-       simple_unlock(&vm_page_bucket_lock);
+               while (!queue_end(&object->memq, (queue_entry_t)mem)) {
 
-       if (mem != VM_PAGE_NULL) {
-               if (object->memq_hint != VM_PAGE_NULL) {
-                       vm_page_lookup_hint_miss++;
-               }
-               assert(mem->object == object);
-               object->memq_hint = mem;
-       }
+                       if (mem->offset == offset)
+                               break;
 
-       return(mem);
-}
+                       mem = (vm_page_t)queue_next(&mem->listq);
+               }
+               if (queue_end(&object->memq, (queue_entry_t)mem))
+                       mem = NULL;
+       } else {
 
+               bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
 
-vm_page_t
-vm_page_lookup_nohint(
-       vm_object_t             object,
-       vm_object_offset_t      offset)
-{
-       register vm_page_t      mem;
-       register vm_page_bucket_t *bucket;
+               lck_spin_lock(bucket_lock);
 
+               for (mem = VM_PAGE_UNPACK_PTR(bucket->page_list); mem != VM_PAGE_NULL; mem = VM_PAGE_UNPACK_PTR(mem->next_m)) {
 #if 0
-       _mutex_assert(&object->Lock, MA_OWNED);
+                       /*
+                        * we don't hold the page queue lock
+                        * so this check isn't safe to make
+                        */
+                       VM_PAGE_CHECK(mem);
 #endif
-       /*
-        *      Search the hash table for this object/offset pair
-        */
+                       if ((mem->object == object) && (mem->offset == offset))
+                               break;
+               }
+               lck_spin_unlock(bucket_lock);
+       }
 
-       bucket = &vm_page_buckets[vm_page_hash(object, offset)];
+#if DEBUG_VM_PAGE_LOOKUP
+       elapsed = mach_absolute_time() - start;
 
-       simple_lock(&vm_page_bucket_lock);
-       for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
-               VM_PAGE_CHECK(mem);
-               if ((mem->object == object) && (mem->offset == offset))
-                       break;
+       if (bucket_lock) {
+               OSAddAtomic64(1, &vm_page_lookup_stats.vpl_slow);
+               OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_slow_elapsed);
+       } else {
+               OSAddAtomic64(1, &vm_page_lookup_stats.vpl_fast);
+               OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_fast_elapsed);
        }
-       simple_unlock(&vm_page_bucket_lock);
+       if (mem != VM_PAGE_NULL)
+               OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit);
+       else
+               OSAddAtomic64(1, &vm_page_lookup_stats.vpl_miss);
+#endif
+       if (mem != VM_PAGE_NULL) {
+               assert(mem->object == object);
 
-       return(mem);
+               object->memq_hint = mem;
+       }
+       return (mem);
 }
 
+
 /*
  *     vm_page_rename:
  *
@@ -1095,32 +1832,78 @@ void
 vm_page_rename(
        register vm_page_t              mem,
        register vm_object_t            new_object,
-       vm_object_offset_t              new_offset)
+       vm_object_offset_t              new_offset,
+       boolean_t                       encrypted_ok)
 {
+       boolean_t internal_to_external, external_to_internal;
+       vm_tag_t  tag;
+
        assert(mem->object != new_object);
+
+        assert(mem->object);
+
        /*
         * ENCRYPTED SWAP:
         * The encryption key is based on the page's memory object
         * (aka "pager") and paging offset.  Moving the page to
         * another VM object changes its "pager" and "paging_offset"
-        * so it has to be decrypted first.
+        * so it has to be decrypted first, or we would lose the key.
+        *
+        * One exception is VM object collapsing, where we transfer pages
+        * from one backing object to its parent object.  This operation also
+        * transfers the paging information, so the <pager,paging_offset> info
+        * should remain consistent.  The caller (vm_object_do_collapse())
+        * sets "encrypted_ok" in this case.
         */
-       if (mem->encrypted) {
+       if (!encrypted_ok && mem->encrypted) {
                panic("vm_page_rename: page %p is encrypted\n", mem);
        }
+
+        XPR(XPR_VM_PAGE,
+                "vm_page_rename, new object 0x%X, offset 0x%X page 0x%X\n",
+                new_object, new_offset, 
+               mem, 0,0);
+
        /*
         *      Changes to mem->object require the page lock because
         *      the pageout daemon uses that lock to get the object.
         */
+       vm_page_lockspin_queues();
 
-        XPR(XPR_VM_PAGE,
-                "vm_page_rename, new object 0x%X, offset 0x%X page 0x%X\n",
-                (integer_t)new_object, (integer_t)new_offset, 
-               (integer_t)mem, 0,0);
+       internal_to_external = FALSE;
+       external_to_internal = FALSE;
+
+       if (mem->local) {
+               /*
+                * it's much easier to get the vm_page_pageable_xxx accounting correct
+                * if we first move the page to the active queue... it's going to end
+                * up there anyway, and we don't do vm_page_rename's frequently enough
+                * for this to matter.
+                */
+               vm_page_queues_remove(mem);
+               vm_page_activate(mem);
+       }
+       if (mem->active || mem->inactive || mem->speculative) {
+               if (mem->object->internal && !new_object->internal) {
+                       internal_to_external = TRUE;
+               }
+               if (!mem->object->internal && new_object->internal) {
+                       external_to_internal = TRUE;
+               }
+       }
+
+       tag = mem->object->wire_tag;
+       vm_page_remove(mem, TRUE);
+       vm_page_insert_internal(mem, new_object, new_offset, tag, TRUE, TRUE, FALSE, FALSE, NULL);
+
+       if (internal_to_external) {
+               vm_page_pageable_internal_count--;
+               vm_page_pageable_external_count++;
+       } else if (external_to_internal) {
+               vm_page_pageable_external_count--;
+               vm_page_pageable_internal_count++;
+       }
 
-       vm_page_lock_queues();
-       vm_page_remove(mem);
-       vm_page_insert(mem, new_object, new_offset);
        vm_page_unlock_queues();
 }
 
@@ -1134,11 +1917,40 @@ vm_page_rename(
 void
 vm_page_init(
        vm_page_t       mem,
-       ppnum_t phys_page)
+       ppnum_t         phys_page,
+       boolean_t       lopage)
 {
        assert(phys_page);
+
+#if    DEBUG
+       if ((phys_page != vm_page_fictitious_addr) && (phys_page != vm_page_guard_addr)) {
+               if (!(pmap_valid_page(phys_page))) {
+                       panic("vm_page_init: non-DRAM phys_page 0x%x\n", phys_page);
+               }
+       }
+#endif
        *mem = vm_page_template;
        mem->phys_page = phys_page;
+#if 0
+       /*
+        * we're leaving this turned off for now... currently pages
+        * come off the free list and are either immediately dirtied/referenced
+        * due to zero-fill or COW faults, or are used to read or write files...
+        * in the file I/O case, the UPL mechanism takes care of clearing
+        * the state of the HW ref/mod bits in a somewhat fragile way.
+        * Since we may change the way this works in the future (to toughen it up),
+        * I'm leaving this as a reminder of where these bits could get cleared
+        */
+
+       /*
+        * make sure both the h/w referenced and modified bits are
+        * clear at this point... we are especially dependent on 
+        * not finding a 'stale' h/w modified in a number of spots
+        * once this page goes back into use
+        */
+       pmap_clear_refmod(phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
+#endif
+       mem->lopage = lopage;
 }
 
 /*
@@ -1148,52 +1960,64 @@ vm_page_init(
  *     Returns VM_PAGE_NULL if there are no free pages.
  */
 int    c_vm_page_grab_fictitious = 0;
+int    c_vm_page_grab_fictitious_failed = 0;
 int    c_vm_page_release_fictitious = 0;
 int    c_vm_page_more_fictitious = 0;
 
 vm_page_t
-vm_page_grab_fictitious(void)
+vm_page_grab_fictitious_common(
+       ppnum_t phys_addr)
 {
-       register vm_page_t m;
+       vm_page_t       m;
+
+       if ((m = (vm_page_t)zget(vm_page_zone))) {
 
-       m = (vm_page_t)zget(vm_page_zone);
-       if (m) {
-               vm_page_init(m, vm_page_fictitious_addr);
+               vm_page_init(m, phys_addr, FALSE);
                m->fictitious = TRUE;
-       }
 
-       c_vm_page_grab_fictitious++;
+               c_vm_page_grab_fictitious++;
+       } else
+               c_vm_page_grab_fictitious_failed++;
+
        return m;
 }
 
+vm_page_t
+vm_page_grab_fictitious(void)
+{
+       return vm_page_grab_fictitious_common(vm_page_fictitious_addr);
+}
+
+vm_page_t
+vm_page_grab_guard(void)
+{
+       return vm_page_grab_fictitious_common(vm_page_guard_addr);
+}
+
+
 /*
  *     vm_page_release_fictitious:
  *
- *     Release a fictitious page to the free list.
+ *     Release a fictitious page to the zone pool
  */
-
 void
 vm_page_release_fictitious(
-       register vm_page_t m)
+       vm_page_t m)
 {
        assert(!m->free);
-       assert(m->busy);
        assert(m->fictitious);
-       assert(m->phys_page == vm_page_fictitious_addr);
+       assert(m->phys_page == vm_page_fictitious_addr ||
+              m->phys_page == vm_page_guard_addr);
 
        c_vm_page_release_fictitious++;
-#if DEBUG
-       if (m->free)
-               panic("vm_page_release_fictitious");
-#endif
-       m->free = TRUE;
+
        zfree(vm_page_zone, m);
 }
 
 /*
  *     vm_page_more_fictitious:
  *
- *     Add more fictitious pages to the free list.
+ *     Add more fictitious pages to the zone.
  *     Allowed to block. This routine is way intimate
  *     with the zones code, for several reasons:
  *     1. we need to carve some page structures out of physical
@@ -1207,23 +2031,13 @@ vm_page_release_fictitious(
  *        permanent allocation of a resource.
  *     3. To smooth allocation humps, we allocate single pages
  *        with kernel_memory_allocate(), and cram them into the
- *        zone. This also allows us to initialize the vm_page_t's
- *        on the way into the zone, so that zget() always returns
- *        an initialized structure. The zone free element pointer
- *        and the free page pointer are both the first item in the
- *        vm_page_t.
- *     4. By having the pages in the zone pre-initialized, we need
- *        not keep 2 levels of lists. The garbage collector simply
- *        scans our list, and reduces physical memory usage as it
- *        sees fit.
+ *        zone.
  */
 
 void vm_page_more_fictitious(void)
 {
-       register vm_page_t m;
-       vm_offset_t addr;
-       kern_return_t retval;
-       int i;
+       vm_offset_t     addr;
+       kern_return_t   retval;
 
        c_vm_page_more_fictitious++;
 
@@ -1236,7 +2050,7 @@ void vm_page_more_fictitious(void)
         * If winner is not vm-privileged, then the page allocation will fail,
         * and it will temporarily block here in the vm_page_wait().
         */
-       mutex_lock(&vm_page_alloc_lock);
+       lck_mtx_lock(&vm_page_alloc_lock);
        /*
         * If another thread allocated space, just bail out now.
         */
@@ -1253,76 +2067,30 @@ void vm_page_more_fictitious(void)
                 * of fictitious pages required in this manner is 2. 5 is
                 * simply a somewhat larger number.
                 */
-               mutex_unlock(&vm_page_alloc_lock);
+               lck_mtx_unlock(&vm_page_alloc_lock);
                return;
        }
 
        retval = kernel_memory_allocate(zone_map,
                                        &addr, PAGE_SIZE, VM_PROT_ALL,
-                                       KMA_KOBJECT|KMA_NOPAGEWAIT);
+                                       KMA_KOBJECT|KMA_NOPAGEWAIT, VM_KERN_MEMORY_ZONE);
        if (retval != KERN_SUCCESS) { 
                /*
-                * No page was available. Tell the pageout daemon, drop the
+                * No page was available. Drop the
                 * lock to give another thread a chance at it, and
                 * wait for the pageout daemon to make progress.
                 */
-               mutex_unlock(&vm_page_alloc_lock);
+               lck_mtx_unlock(&vm_page_alloc_lock);
                vm_page_wait(THREAD_UNINT);
                return;
        }
-       /*
-        * Initialize as many vm_page_t's as will fit on this page. This
-        * depends on the zone code disturbing ONLY the first item of
-        * each zone element.
-        */
-       m = (vm_page_t)addr;
-       for (i = PAGE_SIZE/sizeof(struct vm_page); i > 0; i--) {
-               vm_page_init(m, vm_page_fictitious_addr);
-               m->fictitious = TRUE;
-               m++;
-       }
-       zcram(vm_page_zone, (void *) addr, PAGE_SIZE);
-       mutex_unlock(&vm_page_alloc_lock);
-}
-
-/*
- *     vm_page_convert:
- *
- *     Attempt to convert a fictitious page into a real page.
- */
-
-boolean_t
-vm_page_convert(
-       register vm_page_t m)
-{
-       register vm_page_t real_m;
-
-       assert(m->busy);
-       assert(m->fictitious);
-       assert(!m->dirty);
-
-       real_m = vm_page_grab();
-       if (real_m == VM_PAGE_NULL)
-               return FALSE;
-
-       m->phys_page = real_m->phys_page;
-       m->fictitious = FALSE;
-       m->no_isync = TRUE;
-
-       vm_page_lock_queues();
-       if (m->active)
-               vm_page_active_count++;
-       else if (m->inactive)
-               vm_page_inactive_count++;
-       vm_page_unlock_queues();
 
-       real_m->phys_page = vm_page_fictitious_addr;
-       real_m->fictitious = TRUE;
+       zcram(vm_page_zone, addr, PAGE_SIZE);
 
-       vm_page_release_fictitious(real_m);
-       return TRUE;
+       lck_mtx_unlock(&vm_page_alloc_lock);
 }
 
+
 /*
  *     vm_pool_low():
  *
@@ -1334,80 +2102,273 @@ int
 vm_pool_low(void)
 {
        /* No locking, at worst we will fib. */
-       return( vm_page_free_count < vm_page_free_reserved );
+       return( vm_page_free_count <= vm_page_free_reserved );
+}
+
+
+
+/*
+ * this is an interface to support bring-up of drivers
+ * on platforms with physical memory > 4G...
+ */
+int            vm_himemory_mode = 2;
+
+
+/*
+ * this interface exists to support hardware controllers
+ * incapable of generating DMAs with more than 32 bits
+ * of address on platforms with physical memory > 4G...
+ */
+unsigned int   vm_lopages_allocated_q = 0;
+unsigned int   vm_lopages_allocated_cpm_success = 0;
+unsigned int   vm_lopages_allocated_cpm_failed = 0;
+queue_head_t   vm_lopage_queue_free;
+
+vm_page_t
+vm_page_grablo(void)
+{
+       vm_page_t       mem;
+
+       if (vm_lopage_needed == FALSE)
+               return (vm_page_grab());
+
+       lck_mtx_lock_spin(&vm_page_queue_free_lock);
+
+        if ( !queue_empty(&vm_lopage_queue_free)) {
+                queue_remove_first(&vm_lopage_queue_free,
+                                   mem,
+                                   vm_page_t,
+                                   pageq);
+               assert(vm_lopage_free_count);
+
+                vm_lopage_free_count--;
+               vm_lopages_allocated_q++;
+
+               if (vm_lopage_free_count < vm_lopage_lowater)
+                       vm_lopage_refill = TRUE;
+
+               lck_mtx_unlock(&vm_page_queue_free_lock);
+       } else {
+               lck_mtx_unlock(&vm_page_queue_free_lock);
+
+               if (cpm_allocate(PAGE_SIZE, &mem, atop(0xffffffff), 0, FALSE, KMA_LOMEM) != KERN_SUCCESS) {
+
+                       lck_mtx_lock_spin(&vm_page_queue_free_lock);
+                       vm_lopages_allocated_cpm_failed++;
+                       lck_mtx_unlock(&vm_page_queue_free_lock);
+
+                       return (VM_PAGE_NULL);
+               }
+               mem->busy = TRUE;
+
+               vm_page_lockspin_queues();
+               
+               mem->gobbled = FALSE;
+               vm_page_gobble_count--;
+               vm_page_wire_count--;
+
+               vm_lopages_allocated_cpm_success++;
+               vm_page_unlock_queues();
+       }
+       assert(mem->busy);
+       assert(!mem->free);
+       assert(!mem->pmapped);
+       assert(!mem->wpmapped);
+       assert(!pmap_is_noencrypt(mem->phys_page));
+
+       mem->pageq.next = NULL;
+       mem->pageq.prev = NULL;
+
+       return (mem);
 }
 
+
 /*
  *     vm_page_grab:
  *
- *     Remove a page from the free list.
- *     Returns VM_PAGE_NULL if the free list is too small.
+ *     first try to grab a page from the per-cpu free list...
+ *     this must be done while pre-emption is disabled... if
+ *     a page is available, we're done... 
+ *     if no page is available, grab the vm_page_queue_free_lock
+ *     and see if current number of free pages would allow us
+ *     to grab at least 1... if not, return VM_PAGE_NULL as before... 
+ *     if there are pages available, disable preemption and
+ *     recheck the state of the per-cpu free list... we could
+ *     have been preempted and moved to a different cpu, or
+ *     some other thread could have re-filled it... if still
+ *     empty, figure out how many pages we can steal from the
+ *     global free queue and move to the per-cpu queue...
+ *     return 1 of these pages when done... only wakeup the
+ *     pageout_scan thread if we moved pages from the global
+ *     list... no need for the wakeup if we've satisfied the
+ *     request from the per-cpu queue.
  */
 
-unsigned long  vm_page_grab_count = 0; /* measure demand */
 
 vm_page_t
-vm_page_grab(void)
+vm_page_grab( void )
 {
-       register vm_page_t      mem;
+       vm_page_t       mem;
+
+
+       disable_preemption();
+
+       if ((mem = PROCESSOR_DATA(current_processor(), free_pages))) {
+return_page_from_cpu_list:
+               PROCESSOR_DATA(current_processor(), page_grab_count) += 1;
+               PROCESSOR_DATA(current_processor(), free_pages) = mem->pageq.next;
+
+               enable_preemption();
+               mem->pageq.next = NULL;
+
+               assert(mem->listq.next == NULL && mem->listq.prev == NULL);
+               assert(mem->tabled == FALSE);
+               assert(mem->object == VM_OBJECT_NULL);
+               assert(!mem->laundry);
+               assert(!mem->free);
+               assert(pmap_verify_free(mem->phys_page));
+               assert(mem->busy);
+               assert(!mem->encrypted);
+               assert(!mem->pmapped);
+               assert(!mem->wpmapped);
+               assert(!mem->active);
+               assert(!mem->inactive);
+               assert(!mem->throttled);
+               assert(!mem->speculative);
+               assert(!pmap_is_noencrypt(mem->phys_page));
+
+               return mem;
+       }
+       enable_preemption();
 
-       mutex_lock(&vm_page_queue_free_lock);
-       vm_page_grab_count++;
 
        /*
         *      Optionally produce warnings if the wire or gobble
         *      counts exceed some threshold.
         */
-       if (vm_page_wire_count_warning > 0
-           && vm_page_wire_count >= vm_page_wire_count_warning) {
+#if VM_PAGE_WIRE_COUNT_WARNING
+       if (vm_page_wire_count >= VM_PAGE_WIRE_COUNT_WARNING) {
                printf("mk: vm_page_grab(): high wired page count of %d\n",
                        vm_page_wire_count);
-               assert(vm_page_wire_count < vm_page_wire_count_warning);
        }
-       if (vm_page_gobble_count_warning > 0
-           && vm_page_gobble_count >= vm_page_gobble_count_warning) {
+#endif
+#if VM_PAGE_GOBBLE_COUNT_WARNING
+       if (vm_page_gobble_count >= VM_PAGE_GOBBLE_COUNT_WARNING) {
                printf("mk: vm_page_grab(): high gobbled page count of %d\n",
                        vm_page_gobble_count);
-               assert(vm_page_gobble_count < vm_page_gobble_count_warning);
        }
+#endif
+       lck_mtx_lock_spin(&vm_page_queue_free_lock);
 
        /*
         *      Only let privileged threads (involved in pageout)
         *      dip into the reserved pool.
         */
-
        if ((vm_page_free_count < vm_page_free_reserved) &&
            !(current_thread()->options & TH_OPT_VMPRIV)) {
-               mutex_unlock(&vm_page_queue_free_lock);
+               lck_mtx_unlock(&vm_page_queue_free_lock);
                mem = VM_PAGE_NULL;
-               goto wakeup_pageout;
        }
+       else {
+              vm_page_t        head;
+              vm_page_t        tail;
+              unsigned int     pages_to_steal;
+              unsigned int     color;
 
-       while (vm_page_queue_free == VM_PAGE_NULL) {
-               mutex_unlock(&vm_page_queue_free_lock);
-               VM_PAGE_WAIT();
-               mutex_lock(&vm_page_queue_free_lock);
-       }
+              while ( vm_page_free_count == 0 ) {
 
-       if (--vm_page_free_count < vm_page_free_count_minimum)
-               vm_page_free_count_minimum = vm_page_free_count;
-       mem = vm_page_queue_free;
-       vm_page_queue_free = (vm_page_t) mem->pageq.next;
-       mem->pageq.next = NULL;
-       mem->pageq.prev = NULL;
-       assert(mem->listq.next == NULL && mem->listq.prev == NULL);
-       assert(mem->tabled == FALSE);
-       assert(mem->object == VM_OBJECT_NULL);
-       assert(!mem->laundry);
-       mem->free = FALSE;
-       mem->no_isync = TRUE;
-       mutex_unlock(&vm_page_queue_free_lock);
+                       lck_mtx_unlock(&vm_page_queue_free_lock);
+                       /*
+                        * must be a privileged thread to be
+                        * in this state since a non-privileged 
+                        * thread would have bailed if we were
+                        * under the vm_page_free_reserved mark
+                        */
+                       VM_PAGE_WAIT();
+                       lck_mtx_lock_spin(&vm_page_queue_free_lock);
+               }
 
-       assert(pmap_verify_free(mem->phys_page));
+               disable_preemption();
 
-       /*
-        *      Decide if we should poke the pageout daemon.
-        *      We do this if the free count is less than the low
+               if ((mem = PROCESSOR_DATA(current_processor(), free_pages))) {
+                       lck_mtx_unlock(&vm_page_queue_free_lock);
+
+                       /*
+                        * we got preempted and moved to another processor
+                        * or we got preempted and someone else ran and filled the cache
+                        */
+                       goto return_page_from_cpu_list;
+               }
+               if (vm_page_free_count <= vm_page_free_reserved)
+                       pages_to_steal = 1;
+               else {
+                       if (vm_free_magazine_refill_limit <= (vm_page_free_count - vm_page_free_reserved))
+                               pages_to_steal = vm_free_magazine_refill_limit;
+                       else
+                               pages_to_steal = (vm_page_free_count - vm_page_free_reserved);
+               }
+               color = PROCESSOR_DATA(current_processor(), start_color);
+               head = tail = NULL;
+
+               vm_page_free_count -= pages_to_steal;
+
+               while (pages_to_steal--) {
+
+                       while (queue_empty(&vm_page_queue_free[color]))
+                               color = (color + 1) & vm_color_mask;
+               
+                       queue_remove_first(&vm_page_queue_free[color],
+                                          mem,
+                                          vm_page_t,
+                                          pageq);
+                       mem->pageq.next = NULL;
+                       mem->pageq.prev = NULL;
+
+                       assert(!mem->active);
+                       assert(!mem->inactive);
+                       assert(!mem->throttled);
+                       assert(!mem->speculative);                      
+
+                       color = (color + 1) & vm_color_mask;
+
+                       if (head == NULL)
+                               head = mem;
+                       else
+                               tail->pageq.next = (queue_t)mem;
+                       tail = mem;
+
+                       assert(mem->listq.next == NULL && mem->listq.prev == NULL);
+                       assert(mem->tabled == FALSE);
+                       assert(mem->object == VM_OBJECT_NULL);
+                       assert(!mem->laundry);
+                       assert(mem->free);
+                       mem->free = FALSE;
+
+                       assert(pmap_verify_free(mem->phys_page));
+                       assert(mem->busy);
+                       assert(!mem->free);
+                       assert(!mem->encrypted);
+                       assert(!mem->pmapped);
+                       assert(!mem->wpmapped);
+                       assert(!pmap_is_noencrypt(mem->phys_page));
+               }
+               lck_mtx_unlock(&vm_page_queue_free_lock);
+
+               PROCESSOR_DATA(current_processor(), free_pages) = head->pageq.next;
+               PROCESSOR_DATA(current_processor(), start_color) = color;
+
+               /*
+                * satisfy this request
+                */
+               PROCESSOR_DATA(current_processor(), page_grab_count) += 1;
+               mem = head;
+               mem->pageq.next = NULL;
+
+               enable_preemption();
+       }
+       /*
+        *      Decide if we should poke the pageout daemon.
+        *      We do this if the free count is less than the low
         *      water mark, or if the free count is less than the high
         *      water mark (but above the low water mark) and the inactive
         *      count is less than its target.
@@ -1415,13 +2376,13 @@ vm_page_grab(void)
         *      We don't have the counts locked ... if they change a little,
         *      it doesn't really matter.
         */
-
-wakeup_pageout:
        if ((vm_page_free_count < vm_page_free_min) ||
-           ((vm_page_free_count < vm_page_free_target) &&
-            (vm_page_inactive_count < vm_page_inactive_target)))
-               thread_wakeup((event_t) &vm_page_free_wanted);
+            ((vm_page_free_count < vm_page_free_target) &&
+             ((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_min)))
+                thread_wakeup((event_t) &vm_page_free_wanted);
 
+       VM_CHECK_MEMORYSTATUS;
+       
 //     dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 4);      /* (TEST/DEBUG) */
 
        return mem;
@@ -1437,62 +2398,125 @@ void
 vm_page_release(
        register vm_page_t      mem)
 {
+       unsigned int    color;
+       int     need_wakeup = 0;
+       int     need_priv_wakeup = 0;
 
-#if 0
-       unsigned int pindex;
-       phys_entry *physent;
 
-       physent = mapping_phys_lookup(mem->phys_page, &pindex);         /* (BRINGUP) */
-       if(physent->ppLink & ppN) {                                                                                     /* (BRINGUP) */
-               panic("vm_page_release: already released - %08X %08X\n", mem, mem->phys_page);
-       }
-       physent->ppLink = physent->ppLink | ppN;                                                        /* (BRINGUP) */
-#endif
        assert(!mem->private && !mem->fictitious);
-
+       if (vm_page_free_verify) {
+               assert(pmap_verify_free(mem->phys_page));
+       }
 //     dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 5);      /* (TEST/DEBUG) */
 
-       mutex_lock(&vm_page_queue_free_lock);
+       pmap_clear_noencrypt(mem->phys_page);
+
+       lck_mtx_lock_spin(&vm_page_queue_free_lock);
 #if DEBUG
        if (mem->free)
                panic("vm_page_release");
 #endif
-       mem->free = TRUE;
+
+       assert(mem->busy);
        assert(!mem->laundry);
        assert(mem->object == VM_OBJECT_NULL);
        assert(mem->pageq.next == NULL &&
               mem->pageq.prev == NULL);
-       mem->pageq.next = (queue_entry_t) vm_page_queue_free;
-       vm_page_queue_free = mem;
-       vm_page_free_count++;
+       assert(mem->listq.next == NULL &&
+              mem->listq.prev == NULL);
+       
+       if ((mem->lopage == TRUE || vm_lopage_refill == TRUE) &&
+           vm_lopage_free_count < vm_lopage_free_limit &&
+           mem->phys_page < max_valid_low_ppnum) {
+               /*
+                * this exists to support hardware controllers
+                * incapable of generating DMAs with more than 32 bits
+                * of address on platforms with physical memory > 4G...
+                */
+               queue_enter_first(&vm_lopage_queue_free,
+                                 mem,
+                                 vm_page_t,
+                                 pageq);
+               vm_lopage_free_count++;
+
+               if (vm_lopage_free_count >= vm_lopage_free_limit)
+                       vm_lopage_refill = FALSE;
+
+               mem->lopage = TRUE;
+       } else {          
+               mem->lopage = FALSE;
+               mem->free = TRUE;
+
+               color = mem->phys_page & vm_color_mask;
+               queue_enter_first(&vm_page_queue_free[color],
+                                 mem,
+                                 vm_page_t,
+                                 pageq);
+               vm_page_free_count++;
+               /*
+                *      Check if we should wake up someone waiting for page.
+                *      But don't bother waking them unless they can allocate.
+                *
+                *      We wakeup only one thread, to prevent starvation.
+                *      Because the scheduling system handles wait queues FIFO,
+                *      if we wakeup all waiting threads, one greedy thread
+                *      can starve multiple niceguy threads.  When the threads
+                *      all wakeup, the greedy threads runs first, grabs the page,
+                *      and waits for another page.  It will be the first to run
+                *      when the next page is freed.
+                *
+                *      However, there is a slight danger here.
+                *      The thread we wake might not use the free page.
+                *      Then the other threads could wait indefinitely
+                *      while the page goes unused.  To forestall this,
+                *      the pageout daemon will keep making free pages
+                *      as long as vm_page_free_wanted is non-zero.
+                */
 
-       /*
-        *      Check if we should wake up someone waiting for page.
-        *      But don't bother waking them unless they can allocate.
-        *
-        *      We wakeup only one thread, to prevent starvation.
-        *      Because the scheduling system handles wait queues FIFO,
-        *      if we wakeup all waiting threads, one greedy thread
-        *      can starve multiple niceguy threads.  When the threads
-        *      all wakeup, the greedy threads runs first, grabs the page,
-        *      and waits for another page.  It will be the first to run
-        *      when the next page is freed.
-        *
-        *      However, there is a slight danger here.
-        *      The thread we wake might not use the free page.
-        *      Then the other threads could wait indefinitely
-        *      while the page goes unused.  To forestall this,
-        *      the pageout daemon will keep making free pages
-        *      as long as vm_page_free_wanted is non-zero.
-        */
+               assert(vm_page_free_count > 0);
+               if (vm_page_free_wanted_privileged > 0) {
+                       vm_page_free_wanted_privileged--;
+                       need_priv_wakeup = 1;
+               } else if (vm_page_free_wanted > 0 &&
+                          vm_page_free_count > vm_page_free_reserved) {
+                       vm_page_free_wanted--;
+                       need_wakeup = 1;
+               }
+       }
+       lck_mtx_unlock(&vm_page_queue_free_lock);
 
-       if ((vm_page_free_wanted > 0) &&
-           (vm_page_free_count >= vm_page_free_reserved)) {
-               vm_page_free_wanted--;
+       if (need_priv_wakeup)
+               thread_wakeup_one((event_t) &vm_page_free_wanted_privileged);
+       else if (need_wakeup)
                thread_wakeup_one((event_t) &vm_page_free_count);
-       }
 
-       mutex_unlock(&vm_page_queue_free_lock);
+       VM_CHECK_MEMORYSTATUS;
+}
+
+/*
+ * This version of vm_page_release() is used only at startup
+ * when we are single-threaded and pages are being released 
+ * for the first time. Hence, no locking or unnecessary checks are made.
+ * Note: VM_CHECK_MEMORYSTATUS invoked by the caller.
+ */
+void
+vm_page_release_startup(
+       register vm_page_t      mem)
+{
+       queue_t queue_free;
+
+       if (vm_lopage_free_count < vm_lopage_free_limit &&
+           mem->phys_page < max_valid_low_ppnum) {
+               mem->lopage = TRUE;
+               vm_lopage_free_count++;
+               queue_free = &vm_lopage_queue_free;
+       } else {          
+               mem->lopage = FALSE;
+               mem->free = TRUE;
+               vm_page_free_count++;
+               queue_free = &vm_page_queue_free[mem->phys_page & vm_color_mask];
+       }
+       queue_enter_first(queue_free, mem, vm_page_t, pageq);
 }
 
 /*
@@ -1519,24 +2543,41 @@ vm_page_wait(
         */
        kern_return_t   wait_result;
        int             need_wakeup = 0;
+       int             is_privileged = current_thread()->options & TH_OPT_VMPRIV;
+
+       lck_mtx_lock_spin(&vm_page_queue_free_lock);
 
-       mutex_lock(&vm_page_queue_free_lock);
+       if (is_privileged && vm_page_free_count) {
+               lck_mtx_unlock(&vm_page_queue_free_lock);
+               return TRUE;
+       }
        if (vm_page_free_count < vm_page_free_target) {
-               if (vm_page_free_wanted++ == 0)
-                       need_wakeup = 1;
-               wait_result = assert_wait((event_t)&vm_page_free_count, interruptible);
-               mutex_unlock(&vm_page_queue_free_lock);
+
+               if (is_privileged) {
+                       if (vm_page_free_wanted_privileged++ == 0)
+                               need_wakeup = 1;
+                       wait_result = assert_wait((event_t)&vm_page_free_wanted_privileged, interruptible);
+               } else {
+                       if (vm_page_free_wanted++ == 0)
+                               need_wakeup = 1;
+                       wait_result = assert_wait((event_t)&vm_page_free_count, interruptible);
+               }
+               lck_mtx_unlock(&vm_page_queue_free_lock);
                counter(c_vm_page_wait_block++);
 
                if (need_wakeup)
                        thread_wakeup((event_t)&vm_page_free_wanted);
 
-               if (wait_result == THREAD_WAITING)
+               if (wait_result == THREAD_WAITING) {
+                       VM_DEBUG_EVENT(vm_page_wait_block, VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
+                                      vm_page_free_wanted_privileged, vm_page_free_wanted, 0, 0);
                        wait_result = thread_block(THREAD_CONTINUE_NULL);
+                       VM_DEBUG_EVENT(vm_page_wait_block, VM_PAGE_WAIT_BLOCK, DBG_FUNC_END, 0, 0, 0, 0);
+               }
 
                return(wait_result == THREAD_AWAKENED);
        } else {
-               mutex_unlock(&vm_page_queue_free_lock);
+               lck_mtx_unlock(&vm_page_queue_free_lock);
                return TRUE;
        }
 }
@@ -1557,9 +2598,7 @@ vm_page_alloc(
 {
        register vm_page_t      mem;
 
-#if DEBUG
-       _mutex_assert(&object->Lock, MA_OWNED);
-#endif
+       vm_object_lock_assert_exclusive(object);
        mem = vm_page_grab();
        if (mem == VM_PAGE_NULL)
                return VM_PAGE_NULL;
@@ -1569,49 +2608,122 @@ vm_page_alloc(
        return(mem);
 }
 
+/*
+ *     vm_page_alloc_guard:
+ *     
+ *     Allocate a fictitious page which will be used
+ *     as a guard page.  The page will be inserted into
+ *     the object and returned to the caller.
+ */
+
+vm_page_t
+vm_page_alloc_guard(
+       vm_object_t             object,
+       vm_object_offset_t      offset)
+{
+       register vm_page_t      mem;
+
+       vm_object_lock_assert_exclusive(object);
+       mem = vm_page_grab_guard();
+       if (mem == VM_PAGE_NULL)
+               return VM_PAGE_NULL;
+
+       vm_page_insert(mem, object, offset);
+
+       return(mem);
+}
+
+
 counter(unsigned int c_laundry_pages_freed = 0;)
 
-int vm_pagein_cluster_unused = 0;
-boolean_t      vm_page_free_verify = TRUE;
 /*
- *     vm_page_free:
+ *     vm_page_free_prepare:
  *
- *     Returns the given page to the free list,
- *     disassociating it with any VM object.
+ *     Removes page from any queue it may be on
+ *     and disassociates it from its VM object.
  *
  *     Object and page queues must be locked prior to entry.
  */
-void
-vm_page_free(
-       register vm_page_t      mem)
+static void
+vm_page_free_prepare(
+       vm_page_t       mem)
 {
-       vm_object_t     object = mem->object;
+       vm_page_free_prepare_queues(mem);
+       vm_page_free_prepare_object(mem, TRUE);
+}
 
+
+void
+vm_page_free_prepare_queues(
+       vm_page_t       mem)
+{
+       VM_PAGE_CHECK(mem);
        assert(!mem->free);
        assert(!mem->cleaning);
-       assert(!mem->pageout);
-       if (vm_page_free_verify && !mem->fictitious && !mem->private) {
-               assert(pmap_verify_free(mem->phys_page));
-       }
-
-#if DEBUG
-       if (mem->object)
-               _mutex_assert(&mem->object->Lock, MA_OWNED);
-       _mutex_assert(&vm_page_queue_lock, MA_OWNED);
 
+#if MACH_ASSERT || DEBUG
+       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
        if (mem->free)
-              panic("vm_page_free: freeing page on free list\n");
-#endif
-       if (mem->tabled)
-               vm_page_remove(mem);    /* clears tabled, object, offset */
-       VM_PAGE_QUEUES_REMOVE(mem);     /* clears active or inactive */
-
-       if (mem->clustered) {
-               mem->clustered = FALSE;
-               vm_pagein_cluster_unused++;
+               panic("vm_page_free: freeing page on free list\n");
+#endif /* MACH_ASSERT || DEBUG */
+       if (mem->object) {
+               vm_object_lock_assert_exclusive(mem->object);
+       }
+       if (mem->laundry) {
+               /*
+                * We may have to free a page while it's being laundered
+                * if we lost its pager (due to a forced unmount, for example).
+                * We need to call vm_pageout_steal_laundry() before removing
+                * the page from its VM object, so that we can remove it
+                * from its pageout queue and adjust the laundry accounting
+                */
+               vm_pageout_steal_laundry(mem, TRUE);
+               counter(++c_laundry_pages_freed);
        }
+       
+       vm_page_queues_remove(mem);     /* clears local/active/inactive/throttled/speculative */
+
+       if (VM_PAGE_WIRED(mem)) {
+               if (mem->object) {
+                       assert(mem->object->wired_page_count > 0);
+                       mem->object->wired_page_count--;
+                       if (!mem->object->wired_page_count) {
+                           VM_OBJECT_UNWIRED(mem->object);
+                       }
 
-       if (mem->wire_count) {
+                       assert(mem->object->resident_page_count >=
+                              mem->object->wired_page_count);
+
+                       if (mem->object->purgable == VM_PURGABLE_VOLATILE) {
+                               OSAddAtomic(+1, &vm_page_purgeable_count);
+                               assert(vm_page_purgeable_wired_count > 0);
+                               OSAddAtomic(-1, &vm_page_purgeable_wired_count);
+                       }
+                       if ((mem->object->purgable == VM_PURGABLE_VOLATILE ||
+                            mem->object->purgable == VM_PURGABLE_EMPTY) &&
+                           mem->object->vo_purgeable_owner != TASK_NULL) {
+                               task_t owner;
+
+                               owner = mem->object->vo_purgeable_owner;
+                               /*
+                                * While wired, this page was accounted
+                                * as "non-volatile" but it should now
+                                * be accounted as "volatile".
+                                */
+                               /* one less "non-volatile"... */
+                               ledger_debit(owner->ledger,
+                                            task_ledgers.purgeable_nonvolatile,
+                                            PAGE_SIZE);
+                               /* ... and "phys_footprint" */
+                               ledger_debit(owner->ledger,
+                                            task_ledgers.phys_footprint,
+                                            PAGE_SIZE);
+                               /* one more "volatile" */
+                               ledger_credit(owner->ledger,
+                                             task_ledgers.purgeable_volatile,
+                                             PAGE_SIZE);
+                       }
+               }
                if (!mem->private && !mem->fictitious)
                        vm_page_wire_count--;
                mem->wire_count = 0;
@@ -1621,138 +2733,227 @@ vm_page_free(
                        vm_page_wire_count--;
                vm_page_gobble_count--;
        }
-       mem->gobbled = FALSE;
-
-       if (mem->laundry) {
-               vm_pageout_throttle_up(mem);
-               counter(++c_laundry_pages_freed);
-       }
-
-       PAGE_WAKEUP(mem);       /* clears wanted */
+}
 
-       if (mem->absent)
-               vm_object_absent_release(object);
 
-       /* Some of these may be unnecessary */
-       mem->page_lock = 0;
-       mem->unlock_request = 0;
-       mem->busy = TRUE;
-       mem->absent = FALSE;
-       mem->error = FALSE;
-       mem->dirty = FALSE;
-       mem->precious = FALSE;
-       mem->reference = FALSE;
-       mem->encrypted = FALSE;
+void
+vm_page_free_prepare_object(
+       vm_page_t       mem,
+       boolean_t       remove_from_hash)
+{
+       if (mem->tabled)
+               vm_page_remove(mem, remove_from_hash);  /* clears tabled, object, offset */
 
-       mem->page_error = KERN_SUCCESS;
+       PAGE_WAKEUP(mem);               /* clears wanted */
 
        if (mem->private) {
                mem->private = FALSE;
                mem->fictitious = TRUE;
                mem->phys_page = vm_page_fictitious_addr;
        }
+       if ( !mem->fictitious) {
+               vm_page_init(mem, mem->phys_page, mem->lopage);
+       }
+}
+
+
+/*
+ *     vm_page_free:
+ *
+ *     Returns the given page to the free list,
+ *     disassociating it with any VM object.
+ *
+ *     Object and page queues must be locked prior to entry.
+ */
+void
+vm_page_free(
+       vm_page_t       mem)
+{
+       vm_page_free_prepare(mem);
+
        if (mem->fictitious) {
                vm_page_release_fictitious(mem);
        } else {
-               /* depends on the queues lock */
-               if(mem->zero_fill) {
-                       vm_zf_count-=1;
-                       mem->zero_fill = FALSE;
-               }
-               vm_page_init(mem, mem->phys_page);
                vm_page_release(mem);
        }
 }
 
 
 void
-vm_page_free_list(
-       register vm_page_t      mem)
+vm_page_free_unlocked(
+       vm_page_t       mem,
+       boolean_t       remove_from_hash)
 {
-        register vm_page_t     nxt;
-       register vm_page_t      first = NULL;
-       register vm_page_t      last = VM_PAGE_NULL;
-       register int            pg_count = 0;
+       vm_page_lockspin_queues();
+       vm_page_free_prepare_queues(mem);
+       vm_page_unlock_queues();
 
-#if DEBUG
-       _mutex_assert(&vm_page_queue_lock, MA_OWNED);
-#endif
-       while (mem) {
-#if DEBUG
-               if (mem->tabled || mem->object)
-                       panic("vm_page_free_list: freeing tabled page\n");
-               if (mem->inactive || mem->active || mem->free)
-                       panic("vm_page_free_list: freeing page on list\n");
-#endif
-               assert(mem->pageq.prev == NULL);
-               nxt = (vm_page_t)(mem->pageq.next);
+       vm_page_free_prepare_object(mem, remove_from_hash);
+
+       if (mem->fictitious) {
+               vm_page_release_fictitious(mem);
+       } else {
+               vm_page_release(mem);
+       }
+}
 
-               if (mem->clustered)
-                       vm_pagein_cluster_unused++;
 
-               if (mem->laundry) {
-                       vm_pageout_throttle_up(mem);
-                       counter(++c_laundry_pages_freed);
-               }
-               mem->busy = TRUE;
+/*
+ * Free a list of pages.  The list can be up to several hundred pages,
+ * as blocked up by vm_pageout_scan().
+ * The big win is not having to take the free list lock once
+ * per page.
+ */
+void
+vm_page_free_list(
+       vm_page_t       freeq,
+       boolean_t       prepare_object)
+{
+        vm_page_t      mem;
+        vm_page_t      nxt;
+       vm_page_t       local_freeq;
+       int             pg_count;
 
-               PAGE_WAKEUP(mem);       /* clears wanted */
+       while (freeq) {
 
-               if (mem->private)
-                       mem->fictitious = TRUE;
+               pg_count = 0;
+               local_freeq = VM_PAGE_NULL;
+               mem = freeq;
 
-               if (!mem->fictitious) {
-                       /* depends on the queues lock */
-                       if (mem->zero_fill)
-                               vm_zf_count -= 1;
-                       assert(!mem->laundry);
-                       vm_page_init(mem, mem->phys_page);
+               /*
+                * break up the processing into smaller chunks so
+                * that we can 'pipeline' the pages onto the
+                * free list w/o introducing too much
+                * contention on the global free queue lock
+                */
+               while (mem && pg_count < 64) {
 
-                       mem->free = TRUE;
+                       assert(!mem->inactive);
+                       assert(!mem->active);
+                       assert(!mem->throttled);
+                       assert(!mem->free);
+                       assert(!mem->speculative);
+                       assert(!VM_PAGE_WIRED(mem));
+                       assert(mem->pageq.prev == NULL);
 
-                       if (first == NULL)
-                               last = mem;
-                       mem->pageq.next = (queue_t) first;
-                       first = mem;
+                       nxt = (vm_page_t)(mem->pageq.next);
+               
+                       if (vm_page_free_verify && !mem->fictitious && !mem->private) {
+                               assert(pmap_verify_free(mem->phys_page));
+                       }
+                       if (prepare_object == TRUE)
+                               vm_page_free_prepare_object(mem, TRUE);
 
-                       pg_count++;
-               } else {
-                       mem->phys_page = vm_page_fictitious_addr;
-                       vm_page_release_fictitious(mem);
+                       if (!mem->fictitious) {
+                               assert(mem->busy);
+
+                               if ((mem->lopage == TRUE || vm_lopage_refill == TRUE) &&
+                                   vm_lopage_free_count < vm_lopage_free_limit &&
+                                   mem->phys_page < max_valid_low_ppnum) {
+                                       mem->pageq.next = NULL;
+                                       vm_page_release(mem);
+                               } else {
+                                       /*
+                                        * IMPORTANT: we can't set the page "free" here
+                                        * because that would make the page eligible for
+                                        * a physically-contiguous allocation (see
+                                        * vm_page_find_contiguous()) right away (we don't
+                                        * hold the vm_page_queue_free lock).  That would
+                                        * cause trouble because the page is not actually
+                                        * in the free queue yet...
+                                        */
+                                       mem->pageq.next = (queue_entry_t)local_freeq;
+                                       local_freeq = mem;
+                                       pg_count++;
+
+                                       pmap_clear_noencrypt(mem->phys_page);
+                               }
+                       } else {
+                               assert(mem->phys_page == vm_page_fictitious_addr ||
+                                      mem->phys_page == vm_page_guard_addr);
+                               vm_page_release_fictitious(mem);
+                       }
+                       mem = nxt;
                }
-               mem = nxt;
-       }
-       if (first) {
-             
-               mutex_lock(&vm_page_queue_free_lock);
+               freeq = mem;
+
+               if ( (mem = local_freeq) ) {
+                       unsigned int    avail_free_count;
+                       unsigned int    need_wakeup = 0;
+                       unsigned int    need_priv_wakeup = 0;
+         
+                       lck_mtx_lock_spin(&vm_page_queue_free_lock);
+
+                       while (mem) {
+                               int     color;
+
+                               nxt = (vm_page_t)(mem->pageq.next);
+
+                               assert(!mem->free);
+                               assert(mem->busy);
+                               mem->free = TRUE;
+
+                               color = mem->phys_page & vm_color_mask;
+                               queue_enter_first(&vm_page_queue_free[color],
+                                                 mem,
+                                                 vm_page_t,
+                                                 pageq);
+                               mem = nxt;
+                       }
+                       vm_page_free_count += pg_count;
+                       avail_free_count = vm_page_free_count;
 
-               last->pageq.next = (queue_entry_t) vm_page_queue_free;
-               vm_page_queue_free = first;
+                       if (vm_page_free_wanted_privileged > 0 && avail_free_count > 0) {
 
-               vm_page_free_count += pg_count;
+                               if (avail_free_count < vm_page_free_wanted_privileged) {
+                                       need_priv_wakeup = avail_free_count;
+                                       vm_page_free_wanted_privileged -= avail_free_count;
+                                       avail_free_count = 0;
+                               } else {
+                                       need_priv_wakeup = vm_page_free_wanted_privileged;
+                                       vm_page_free_wanted_privileged = 0;
+                                       avail_free_count -= vm_page_free_wanted_privileged;
+                               }
+                       }
+                       if (vm_page_free_wanted > 0 && avail_free_count > vm_page_free_reserved) {
+                               unsigned int  available_pages;
 
-               if ((vm_page_free_wanted > 0) &&
-                   (vm_page_free_count >= vm_page_free_reserved)) {
-                       unsigned int  available_pages;
+                               available_pages = avail_free_count - vm_page_free_reserved;
 
-                       if (vm_page_free_count >= vm_page_free_reserved) {
-                               available_pages = (vm_page_free_count
-                                                  - vm_page_free_reserved);
-                       } else {
-                               available_pages = 0;
+                               if (available_pages >= vm_page_free_wanted) {
+                                       need_wakeup = vm_page_free_wanted;
+                                       vm_page_free_wanted = 0;
+                               } else {
+                                       need_wakeup = available_pages;
+                                       vm_page_free_wanted -= available_pages;
+                               }
                        }
+                       lck_mtx_unlock(&vm_page_queue_free_lock);
 
-                       if (available_pages >= vm_page_free_wanted) {
-                               vm_page_free_wanted = 0;
+                       if (need_priv_wakeup != 0) {
+                               /*
+                                * There shouldn't be that many VM-privileged threads,
+                                * so let's wake them all up, even if we don't quite
+                                * have enough pages to satisfy them all.
+                                */
+                               thread_wakeup((event_t)&vm_page_free_wanted_privileged);
+                       }
+                       if (need_wakeup != 0 && vm_page_free_wanted == 0) {
+                               /*
+                                * We don't expect to have any more waiters
+                                * after this, so let's wake them all up at
+                                * once.
+                                */
                                thread_wakeup((event_t) &vm_page_free_count);
-                       } else {
-                               while (available_pages--) {
-                                       vm_page_free_wanted--;
-                                       thread_wakeup_one((event_t) &vm_page_free_count);
-                               }
+                       } else for (; need_wakeup != 0; need_wakeup--) {
+                               /*
+                                * Wake up one waiter per page we just released.
+                                */
+                               thread_wakeup_one((event_t) &vm_page_free_count);
                        }
+
+                       VM_CHECK_MEMORYSTATUS;
                }
-               mutex_unlock(&vm_page_queue_free_lock);
        }
 }
 
@@ -1766,30 +2967,109 @@ vm_page_free_list(
  *
  *     The page's object and the page queues must be locked.
  */
+
+
 void
 vm_page_wire(
-       register vm_page_t      mem)
+       register vm_page_t mem,
+       vm_tag_t           tag,
+       boolean_t          check_memorystatus)
 {
 
 //     dbgLog(current_thread(), mem->offset, mem->object, 1);  /* (TEST/DEBUG) */
 
        VM_PAGE_CHECK(mem);
+       if (mem->object) {
+               vm_object_lock_assert_exclusive(mem->object);
+       } else {
+               /*
+                * In theory, the page should be in an object before it
+                * gets wired, since we need to hold the object lock
+                * to update some fields in the page structure.
+                * However, some code (i386 pmap, for example) might want
+                * to wire a page before it gets inserted into an object.
+                * That's somewhat OK, as long as nobody else can get to
+                * that page and update it at the same time.
+                */
+       }
 #if DEBUG
-       if (mem->object)
-               _mutex_assert(&mem->object->Lock, MA_OWNED);
-       _mutex_assert(&vm_page_queue_lock, MA_OWNED);
+       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
 #endif
-       if (mem->wire_count == 0) {
-               VM_PAGE_QUEUES_REMOVE(mem);
+       if ( !VM_PAGE_WIRED(mem)) {
+
+               if (mem->pageout_queue) {
+                       mem->pageout = FALSE;
+                       vm_pageout_throttle_up(mem);
+               }
+               vm_page_queues_remove(mem);
+
+               if (mem->object) {
+
+                       if (!mem->private && !mem->fictitious) 
+                       {
+                           if (!mem->object->wired_page_count)
+                           {
+                               assert(VM_KERN_MEMORY_NONE != tag);
+                               mem->object->wire_tag = tag;
+                               VM_OBJECT_WIRED(mem->object);
+                           }
+                       }
+                       mem->object->wired_page_count++;
+
+                       assert(mem->object->resident_page_count >=
+                              mem->object->wired_page_count);
+                       if (mem->object->purgable == VM_PURGABLE_VOLATILE) {
+                               assert(vm_page_purgeable_count > 0);
+                               OSAddAtomic(-1, &vm_page_purgeable_count);
+                               OSAddAtomic(1, &vm_page_purgeable_wired_count);
+                       }
+                       if ((mem->object->purgable == VM_PURGABLE_VOLATILE ||
+                            mem->object->purgable == VM_PURGABLE_EMPTY) &&
+                           mem->object->vo_purgeable_owner != TASK_NULL) {
+                               task_t owner;
+
+                               owner = mem->object->vo_purgeable_owner;
+                               /* less volatile bytes */
+                               ledger_debit(owner->ledger,
+                                            task_ledgers.purgeable_volatile,
+                                            PAGE_SIZE);
+                               /* more not-quite-volatile bytes */
+                               ledger_credit(owner->ledger,
+                                             task_ledgers.purgeable_nonvolatile,
+                                             PAGE_SIZE);
+                               /* more footprint */
+                               ledger_credit(owner->ledger,
+                                             task_ledgers.phys_footprint,
+                                             PAGE_SIZE);
+                       }
+                       if (mem->object->all_reusable) {
+                               /*
+                                * Wired pages are not counted as "re-usable"
+                                * in "all_reusable" VM objects, so nothing
+                                * to do here.
+                                */
+                       } else if (mem->reusable) {
+                               /*
+                                * This page is not "re-usable" when it's
+                                * wired, so adjust its state and the
+                                * accounting.
+                                */
+                               vm_object_reuse_pages(mem->object,
+                                                     mem->offset,
+                                                     mem->offset+PAGE_SIZE_64,
+                                                     FALSE);
+                       }
+               }
+               assert(!mem->reusable);
+
                if (!mem->private && !mem->fictitious && !mem->gobbled)
                        vm_page_wire_count++;
                if (mem->gobbled)
                        vm_page_gobble_count--;
                mem->gobbled = FALSE;
-               if(mem->zero_fill) {
-                       /* depends on the queues lock */
-                       vm_zf_count-=1;
-                       mem->zero_fill = FALSE;
+
+               if (check_memorystatus == TRUE) {
+                       VM_CHECK_MEMORYSTATUS;
                }
                /* 
                 * ENCRYPTED SWAP:
@@ -1803,32 +3083,7 @@ vm_page_wire(
        }
        assert(!mem->gobbled);
        mem->wire_count++;
-}
-
-/*
- *      vm_page_gobble:
- *
- *      Mark this page as consumed by the vm/ipc/xmm subsystems.
- *
- *      Called only for freshly vm_page_grab()ed pages - w/ nothing locked.
- */
-void
-vm_page_gobble(
-        register vm_page_t      mem)
-{
-        vm_page_lock_queues();
-        VM_PAGE_CHECK(mem);
-
-       assert(!mem->gobbled);
-       assert(mem->wire_count == 0);
-
-        if (!mem->gobbled && mem->wire_count == 0) {
-                if (!mem->private && !mem->fictitious)
-                        vm_page_wire_count++;
-        }
-       vm_page_gobble_count++;
-        mem->gobbled = TRUE;
-        vm_page_unlock_queues();
+       VM_PAGE_CHECK(mem);
 }
 
 /*
@@ -1841,29 +3096,70 @@ vm_page_gobble(
  */
 void
 vm_page_unwire(
-       register vm_page_t      mem)
+       vm_page_t       mem,
+       boolean_t       queueit)
 {
 
 //     dbgLog(current_thread(), mem->offset, mem->object, 0);  /* (TEST/DEBUG) */
 
        VM_PAGE_CHECK(mem);
-       assert(mem->wire_count > 0);
+       assert(VM_PAGE_WIRED(mem));
+       assert(!mem->gobbled);
+       assert(mem->object != VM_OBJECT_NULL);
 #if DEBUG
-       if (mem->object)
-               _mutex_assert(&mem->object->Lock, MA_OWNED);
-       _mutex_assert(&vm_page_queue_lock, MA_OWNED);
+       vm_object_lock_assert_exclusive(mem->object);
+       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
 #endif
        if (--mem->wire_count == 0) {
-               assert(!mem->private && !mem->fictitious);
-               vm_page_wire_count--;
-               assert(!mem->laundry);
+               if (!mem->private && !mem->fictitious) {
+                       vm_page_wire_count--;
+               }
+               assert(mem->object->wired_page_count > 0);
+               mem->object->wired_page_count--;
+               if (!mem->object->wired_page_count) {
+                   VM_OBJECT_UNWIRED(mem->object);
+               }
+               assert(mem->object->resident_page_count >=
+                      mem->object->wired_page_count);
+               if (mem->object->purgable == VM_PURGABLE_VOLATILE) {
+                       OSAddAtomic(+1, &vm_page_purgeable_count);
+                       assert(vm_page_purgeable_wired_count > 0);
+                       OSAddAtomic(-1, &vm_page_purgeable_wired_count);
+               }
+               if ((mem->object->purgable == VM_PURGABLE_VOLATILE ||
+                    mem->object->purgable == VM_PURGABLE_EMPTY) &&
+                   mem->object->vo_purgeable_owner != TASK_NULL) {
+                       task_t owner;
+
+                       owner = mem->object->vo_purgeable_owner;
+                       /* more volatile bytes */
+                       ledger_credit(owner->ledger,
+                                     task_ledgers.purgeable_volatile,
+                                     PAGE_SIZE);
+                       /* less not-quite-volatile bytes */
+                       ledger_debit(owner->ledger,
+                                    task_ledgers.purgeable_nonvolatile,
+                                    PAGE_SIZE);
+                       /* less footprint */
+                       ledger_debit(owner->ledger,
+                                    task_ledgers.phys_footprint,
+                                    PAGE_SIZE);
+               }
                assert(mem->object != kernel_object);
                assert(mem->pageq.next == NULL && mem->pageq.prev == NULL);
-               queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
-               vm_page_active_count++;
-               mem->active = TRUE;
-               mem->reference = TRUE;
+
+               if (queueit == TRUE) {
+                       if (mem->object->purgable == VM_PURGABLE_EMPTY) {
+                               vm_page_deactivate(mem);
+                       } else {
+                               vm_page_activate(mem);
+                       }
+               }
+
+               VM_CHECK_MEMORYSTATUS;
+               
        }
+       VM_PAGE_CHECK(mem);
 }
 
 /*
@@ -1877,14 +3173,25 @@ vm_page_unwire(
  */
 void
 vm_page_deactivate(
-       register vm_page_t      m)
+       vm_page_t       m)
 {
+       vm_page_deactivate_internal(m, TRUE);
+}
+
+
+void
+vm_page_deactivate_internal(
+       vm_page_t       m,
+       boolean_t       clear_hw_reference)
+{
+
        VM_PAGE_CHECK(m);
        assert(m->object != kernel_object);
+       assert(m->phys_page != vm_page_guard_addr);
 
 //     dbgLog(m->phys_page, vm_page_free_count, vm_page_wire_count, 6);        /* (TEST/DEBUG) */
 #if DEBUG
-       _mutex_assert(&vm_page_queue_lock, MA_OWNED);
+       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
 #endif
        /*
         *      This page is no longer very interesting.  If it was
@@ -1893,46 +3200,110 @@ vm_page_deactivate(
         *      inactive queue.  Note wired pages should not have
         *      their reference bit cleared.
         */
+       assert ( !(m->absent && !m->unusual));
+
        if (m->gobbled) {               /* can this happen? */
-               assert(m->wire_count == 0);
+               assert( !VM_PAGE_WIRED(m));
+
                if (!m->private && !m->fictitious)
                        vm_page_wire_count--;
                vm_page_gobble_count--;
                m->gobbled = FALSE;
        }
-       if (m->private || (m->wire_count != 0))
+       /*
+        * if this page is currently on the pageout queue, we can't do the
+        * vm_page_queues_remove (which doesn't handle the pageout queue case)
+        * and we can't remove it manually since we would need the object lock
+        * (which is not required here) to decrement the activity_in_progress
+        * reference which is held on the object while the page is in the pageout queue...
+        * just let the normal laundry processing proceed
+        */
+       if (m->laundry || m->pageout_queue || m->private || m->fictitious || m->compressor || (VM_PAGE_WIRED(m)))
                return;
-       if (m->active || (m->inactive && m->reference)) {
-               if (!m->fictitious && !m->absent)
-                       pmap_clear_reference(m->phys_page);
-               m->reference = FALSE;
-               VM_PAGE_QUEUES_REMOVE(m);
-       }
-       if (m->wire_count == 0 && !m->inactive) {
-               m->page_ticket = vm_page_ticket;
-               vm_page_ticket_roll++;
-
-               if(vm_page_ticket_roll == VM_PAGE_TICKETS_IN_ROLL) {
-                       vm_page_ticket_roll = 0;
-                       if(vm_page_ticket == VM_PAGE_TICKET_ROLL_IDS)
-                               vm_page_ticket= 0;
-                       else
-                               vm_page_ticket++;
-               }
-               
-               assert(!m->laundry);
-               assert(m->pageq.next == NULL && m->pageq.prev == NULL);
-               if(m->zero_fill) {
-                       queue_enter(&vm_page_queue_zf, m, vm_page_t, pageq);
+
+       if (!m->absent && clear_hw_reference == TRUE)
+               pmap_clear_reference(m->phys_page);
+
+       m->reference = FALSE;
+       m->no_cache = FALSE;
+
+       if (!m->inactive) {
+               vm_page_queues_remove(m);
+
+               if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) &&
+                   m->dirty && m->object->internal &&
+                   (m->object->purgable == VM_PURGABLE_DENY ||
+                    m->object->purgable == VM_PURGABLE_NONVOLATILE ||
+                    m->object->purgable == VM_PURGABLE_VOLATILE)) {
+                       vm_page_check_pageable_safe(m);
+                       queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
+                       m->throttled = TRUE;
+                       vm_page_throttled_count++;
                } else {
-                       queue_enter(&vm_page_queue_inactive,
-                                                       m, vm_page_t, pageq);
+                       if (m->object->named && m->object->ref_count == 1) {
+                               vm_page_speculate(m, FALSE);
+#if DEVELOPMENT || DEBUG
+                               vm_page_speculative_recreated++;
+#endif
+                       } else {
+                               vm_page_enqueue_inactive(m, FALSE);
+                       }
                }
+       }
+}
+
+/*
+ * vm_page_enqueue_cleaned
+ *
+ * Put the page on the cleaned queue, mark it cleaned, etc.
+ * Being on the cleaned queue (and having m->clean_queue set)
+ * does ** NOT ** guarantee that the page is clean!
+ *
+ * Call with the queues lock held.
+ */
+
+void vm_page_enqueue_cleaned(vm_page_t m)
+{
+       assert(m->phys_page != vm_page_guard_addr);
+#if DEBUG
+       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+#endif
+       assert( !(m->absent && !m->unusual));
+
+       if (m->gobbled) {
+               assert( !VM_PAGE_WIRED(m));
+               if (!m->private && !m->fictitious)
+                       vm_page_wire_count--;
+               vm_page_gobble_count--;
+               m->gobbled = FALSE;
+       }
+       /*
+        * if this page is currently on the pageout queue, we can't do the
+        * vm_page_queues_remove (which doesn't handle the pageout queue case)
+        * and we can't remove it manually since we would need the object lock
+        * (which is not required here) to decrement the activity_in_progress
+        * reference which is held on the object while the page is in the pageout queue...
+        * just let the normal laundry processing proceed
+        */
+       if (m->laundry || m->clean_queue || m->pageout_queue || m->private || m->fictitious)
+               return;
+
+       vm_page_queues_remove(m);
 
-               m->inactive = TRUE;
-               if (!m->fictitious)
-                       vm_page_inactive_count++;
+       vm_page_check_pageable_safe(m);
+       queue_enter(&vm_page_queue_cleaned, m, vm_page_t, pageq);
+       m->clean_queue = TRUE;
+       vm_page_cleaned_count++;
+
+       m->inactive = TRUE;
+       vm_page_inactive_count++;
+       if (m->object->internal) {
+               vm_page_pageable_internal_count++;
+       } else {
+               vm_page_pageable_external_count++;
        }
+
+       vm_pageout_enqueued_cleaned++;
 }
 
 /*
@@ -1948,46 +3319,416 @@ vm_page_activate(
        register vm_page_t      m)
 {
        VM_PAGE_CHECK(m);
+#ifdef FIXME_4778297
        assert(m->object != kernel_object);
+#endif
+       assert(m->phys_page != vm_page_guard_addr);
 #if DEBUG
-       _mutex_assert(&vm_page_queue_lock, MA_OWNED);
+       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
 #endif
+       assert( !(m->absent && !m->unusual));
+
        if (m->gobbled) {
-               assert(m->wire_count == 0);
+               assert( !VM_PAGE_WIRED(m));
                if (!m->private && !m->fictitious)
                        vm_page_wire_count--;
                vm_page_gobble_count--;
                m->gobbled = FALSE;
        }
-       if (m->private)
+       /*
+        * if this page is currently on the pageout queue, we can't do the
+        * vm_page_queues_remove (which doesn't handle the pageout queue case)
+        * and we can't remove it manually since we would need the object lock
+        * (which is not required here) to decrement the activity_in_progress
+        * reference which is held on the object while the page is in the pageout queue...
+        * just let the normal laundry processing proceed
+        */
+       if (m->laundry || m->pageout_queue || m->private || m->fictitious || m->compressor)
+               return;
+
+#if DEBUG
+       if (m->active)
+               panic("vm_page_activate: already active");
+#endif
+
+       if (m->speculative) {
+               DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
+               DTRACE_VM2(pgfrec, int, 1, (uint64_t *), NULL);
+       }
+       
+       vm_page_queues_remove(m);
+
+       if ( !VM_PAGE_WIRED(m)) {
+               vm_page_check_pageable_safe(m);
+               if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) && 
+                   m->dirty && m->object->internal && 
+                   (m->object->purgable == VM_PURGABLE_DENY ||
+                    m->object->purgable == VM_PURGABLE_NONVOLATILE ||
+                    m->object->purgable == VM_PURGABLE_VOLATILE)) {
+                       queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
+                       m->throttled = TRUE;
+                       vm_page_throttled_count++;
+               } else {
+                       queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
+                       m->active = TRUE;
+                       vm_page_active_count++;
+                       if (m->object->internal) {
+                               vm_page_pageable_internal_count++;
+                       } else {
+                               vm_page_pageable_external_count++;
+                       }
+               }
+               m->reference = TRUE;
+               m->no_cache = FALSE;
+       }
+       VM_PAGE_CHECK(m);
+}
+
+
+/*
+ *      vm_page_speculate:
+ *
+ *      Put the specified page on the speculative list (if appropriate).
+ *
+ *      The page queues must be locked.
+ */
+void
+vm_page_speculate(
+       vm_page_t       m,
+       boolean_t       new)
+{
+        struct vm_speculative_age_q    *aq;
+
+       VM_PAGE_CHECK(m);
+       vm_page_check_pageable_safe(m);
+
+       assert(m->phys_page != vm_page_guard_addr);
+#if DEBUG
+       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+#endif
+       assert( !(m->absent && !m->unusual));
+
+       /*
+        * if this page is currently on the pageout queue, we can't do the
+        * vm_page_queues_remove (which doesn't handle the pageout queue case)
+        * and we can't remove it manually since we would need the object lock
+        * (which is not required here) to decrement the activity_in_progress
+        * reference which is held on the object while the page is in the pageout queue...
+        * just let the normal laundry processing proceed
+        */
+       if (m->laundry || m->pageout_queue || m->private || m->fictitious || m->compressor)
                return;
 
-       if (m->inactive) {
-               assert(!m->laundry);
-               if (m->zero_fill) {
-                       queue_remove(&vm_page_queue_zf, m, vm_page_t, pageq);
+       vm_page_queues_remove(m);
+
+       if ( !VM_PAGE_WIRED(m)) {
+               mach_timespec_t         ts;
+               clock_sec_t sec;
+               clock_nsec_t nsec;
+
+               clock_get_system_nanotime(&sec, &nsec);
+               ts.tv_sec = (unsigned int) sec;
+               ts.tv_nsec = nsec;
+
+               if (vm_page_speculative_count == 0) {
+
+                       speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
+                       speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
+
+                       aq = &vm_page_queue_speculative[speculative_age_index];
+
+                       /*
+                        * set the timer to begin a new group
+                        */
+                       aq->age_ts.tv_sec = vm_page_speculative_q_age_ms / 1000;
+                       aq->age_ts.tv_nsec = (vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
+
+                       ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
+               } else {
+                       aq = &vm_page_queue_speculative[speculative_age_index];
+
+                       if (CMP_MACH_TIMESPEC(&ts, &aq->age_ts) >= 0) {
+
+                               speculative_age_index++;
+
+                               if (speculative_age_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q)
+                                       speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
+                               if (speculative_age_index == speculative_steal_index) {
+                                       speculative_steal_index = speculative_age_index + 1;
+
+                                       if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q)
+                                               speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
+                               }
+                               aq = &vm_page_queue_speculative[speculative_age_index];
+
+                               if (!queue_empty(&aq->age_q))
+                                       vm_page_speculate_ageit(aq);
+
+                               aq->age_ts.tv_sec = vm_page_speculative_q_age_ms / 1000;
+                               aq->age_ts.tv_nsec = (vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
+
+                               ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
+                       }
+               }
+               enqueue_tail(&aq->age_q, &m->pageq);
+               m->speculative = TRUE;
+               vm_page_speculative_count++;
+               if (m->object->internal) {
+                       vm_page_pageable_internal_count++;
                } else {
-                       queue_remove(&vm_page_queue_inactive, 
-                                               m, vm_page_t, pageq);
+                       vm_page_pageable_external_count++;
+               }
+
+               if (new == TRUE) {
+                       vm_object_lock_assert_exclusive(m->object);
+
+                       m->object->pages_created++;
+#if DEVELOPMENT || DEBUG
+                       vm_page_speculative_created++;
+#endif
                }
-               m->pageq.next = NULL;
-               m->pageq.prev = NULL;
-               if (!m->fictitious)
-                       vm_page_inactive_count--;
-               m->inactive = FALSE;
        }
-       if (m->wire_count == 0) {
+       VM_PAGE_CHECK(m);
+}
+
+
+/*
+ * move pages from the specified aging bin to
+ * the speculative bin that pageout_scan claims from
+ *
+ *      The page queues must be locked.
+ */
+void
+vm_page_speculate_ageit(struct vm_speculative_age_q *aq)
+{
+        struct vm_speculative_age_q    *sq;
+       vm_page_t       t;
+
+       sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
+
+       if (queue_empty(&sq->age_q)) {
+               sq->age_q.next = aq->age_q.next;
+               sq->age_q.prev = aq->age_q.prev;
+               
+               t = (vm_page_t)sq->age_q.next;
+               t->pageq.prev = &sq->age_q;
+
+               t = (vm_page_t)sq->age_q.prev;
+               t->pageq.next = &sq->age_q;
+       } else {
+               t = (vm_page_t)sq->age_q.prev;
+               t->pageq.next = aq->age_q.next;
+                                               
+               t = (vm_page_t)aq->age_q.next;
+               t->pageq.prev = sq->age_q.prev;
+
+               t = (vm_page_t)aq->age_q.prev;
+               t->pageq.next = &sq->age_q;
+
+               sq->age_q.prev = aq->age_q.prev;
+       }
+       queue_init(&aq->age_q);
+}
+
+
+void
+vm_page_lru(
+       vm_page_t       m)
+{
+       VM_PAGE_CHECK(m);
+       assert(m->object != kernel_object);
+       assert(m->phys_page != vm_page_guard_addr);
+
 #if DEBUG
-               if (m->active)
-                       panic("vm_page_activate: already active");
+       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
 #endif
-               assert(!m->laundry);
-               assert(m->pageq.next == NULL && m->pageq.prev == NULL);
-               queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
-               m->active = TRUE;
-               m->reference = TRUE;
-               if (!m->fictitious)
-                       vm_page_active_count++;
+       /*
+        * if this page is currently on the pageout queue, we can't do the
+        * vm_page_queues_remove (which doesn't handle the pageout queue case)
+        * and we can't remove it manually since we would need the object lock
+        * (which is not required here) to decrement the activity_in_progress
+        * reference which is held on the object while the page is in the pageout queue...
+        * just let the normal laundry processing proceed
+        */
+       if (m->laundry || m->pageout_queue || m->private || m->compressor || (VM_PAGE_WIRED(m)))
+               return;
+
+       m->no_cache = FALSE;
+
+       vm_page_queues_remove(m);
+
+       vm_page_enqueue_inactive(m, FALSE);
+}
+
+
+void
+vm_page_reactivate_all_throttled(void)
+{
+       vm_page_t       first_throttled, last_throttled;
+       vm_page_t       first_active;
+       vm_page_t       m;
+       int             extra_active_count;
+       int             extra_internal_count, extra_external_count;
+
+       if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default))
+               return;
+
+       extra_active_count = 0;
+       extra_internal_count = 0;
+       extra_external_count = 0;
+       vm_page_lock_queues();
+       if (! queue_empty(&vm_page_queue_throttled)) {
+               /*
+                * Switch "throttled" pages to "active".
+                */
+               queue_iterate(&vm_page_queue_throttled, m, vm_page_t, pageq) {
+                       VM_PAGE_CHECK(m);
+                       assert(m->throttled);
+                       assert(!m->active);
+                       assert(!m->inactive);
+                       assert(!m->speculative);
+                       assert(!VM_PAGE_WIRED(m));
+
+                       extra_active_count++;
+                       if (m->object->internal) {
+                               extra_internal_count++;
+                       } else {
+                               extra_external_count++;
+                       }
+
+                       m->throttled = FALSE;
+                       m->active = TRUE;
+                       VM_PAGE_CHECK(m);
+               }
+
+               /*
+                * Transfer the entire throttled queue to a regular LRU page queues.
+                * We insert it at the head of the active queue, so that these pages
+                * get re-evaluated by the LRU algorithm first, since they've been
+                * completely out of it until now.
+                */
+               first_throttled = (vm_page_t) queue_first(&vm_page_queue_throttled);
+               last_throttled = (vm_page_t) queue_last(&vm_page_queue_throttled);
+               first_active = (vm_page_t) queue_first(&vm_page_queue_active);
+               if (queue_empty(&vm_page_queue_active)) {
+                       queue_last(&vm_page_queue_active) = (queue_entry_t) last_throttled;
+               } else {
+                       queue_prev(&first_active->pageq) = (queue_entry_t) last_throttled;
+               }
+               queue_first(&vm_page_queue_active) = (queue_entry_t) first_throttled;
+               queue_prev(&first_throttled->pageq) = (queue_entry_t) &vm_page_queue_active;
+               queue_next(&last_throttled->pageq) = (queue_entry_t) first_active;
+
+#if DEBUG
+               printf("reactivated %d throttled pages\n", vm_page_throttled_count);
+#endif
+               queue_init(&vm_page_queue_throttled);
+               /*
+                * Adjust the global page counts.
+                */
+               vm_page_active_count += extra_active_count;
+               vm_page_pageable_internal_count += extra_internal_count;
+               vm_page_pageable_external_count += extra_external_count;
+               vm_page_throttled_count = 0;
+       }
+       assert(vm_page_throttled_count == 0);
+       assert(queue_empty(&vm_page_queue_throttled));
+       vm_page_unlock_queues();
+}
+
+
+/*
+ * move pages from the indicated local queue to the global active queue
+ * its ok to fail if we're below the hard limit and force == FALSE
+ * the nolocks == TRUE case is to allow this function to be run on
+ * the hibernate path
+ */
+
+void
+vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks)
+{
+       struct vpl      *lq;
+       vm_page_t       first_local, last_local;
+       vm_page_t       first_active;
+       vm_page_t       m;
+       uint32_t        count = 0;
+
+       if (vm_page_local_q == NULL)
+               return;
+
+       lq = &vm_page_local_q[lid].vpl_un.vpl;
+
+       if (nolocks == FALSE) {
+               if (lq->vpl_count < vm_page_local_q_hard_limit && force == FALSE) {
+                       if ( !vm_page_trylockspin_queues())
+                               return;
+               } else
+                       vm_page_lockspin_queues();
+
+               VPL_LOCK(&lq->vpl_lock);
+       }
+       if (lq->vpl_count) {
+               /*
+                * Switch "local" pages to "active".
+                */
+               assert(!queue_empty(&lq->vpl_queue));
+
+               queue_iterate(&lq->vpl_queue, m, vm_page_t, pageq) {
+                       VM_PAGE_CHECK(m);
+                       vm_page_check_pageable_safe(m);
+                       assert(m->local);
+                       assert(!m->active);
+                       assert(!m->inactive);
+                       assert(!m->speculative);
+                       assert(!VM_PAGE_WIRED(m));
+                       assert(!m->throttled);
+                       assert(!m->fictitious);
+
+                       if (m->local_id != lid)
+                               panic("vm_page_reactivate_local: found vm_page_t(%p) with wrong cpuid", m);
+                       
+                       m->local_id = 0;
+                       m->local = FALSE;
+                       m->active = TRUE;
+                       VM_PAGE_CHECK(m);
+
+                       count++;
+               }
+               if (count != lq->vpl_count)
+                       panic("vm_page_reactivate_local: count = %d, vm_page_local_count = %d\n", count, lq->vpl_count);
+
+               /*
+                * Transfer the entire local queue to a regular LRU page queues.
+                */
+               first_local = (vm_page_t) queue_first(&lq->vpl_queue);
+               last_local = (vm_page_t) queue_last(&lq->vpl_queue);
+               first_active = (vm_page_t) queue_first(&vm_page_queue_active);
+
+               if (queue_empty(&vm_page_queue_active)) {
+                       queue_last(&vm_page_queue_active) = (queue_entry_t) last_local;
+               } else {
+                       queue_prev(&first_active->pageq) = (queue_entry_t) last_local;
+               }
+               queue_first(&vm_page_queue_active) = (queue_entry_t) first_local;
+               queue_prev(&first_local->pageq) = (queue_entry_t) &vm_page_queue_active;
+               queue_next(&last_local->pageq) = (queue_entry_t) first_active;
+
+               queue_init(&lq->vpl_queue);
+               /*
+                * Adjust the global page counts.
+                */
+               vm_page_active_count += lq->vpl_count;
+               vm_page_pageable_internal_count += lq->vpl_internal_count;
+               vm_page_pageable_external_count += lq->vpl_external_count;
+               lq->vpl_count = 0;
+               lq->vpl_internal_count = 0;
+               lq->vpl_external_count = 0;
+       }
+       assert(queue_empty(&lq->vpl_queue));
+
+       if (nolocks == FALSE) {
+               VPL_UNLOCK(&lq->vpl_lock);
+               vm_page_unlock_queues();
        }
 }
 
@@ -1996,18 +3737,26 @@ vm_page_activate(
  *
  *     Zero-fill a part of the page.
  */
+#define PMAP_ZERO_PART_PAGE_IMPLEMENTED
 void
 vm_page_part_zero_fill(
        vm_page_t       m,
        vm_offset_t     m_pa,
        vm_size_t       len)
 {
-       vm_page_t       tmp;
 
+#if 0
+       /*
+        * we don't hold the page queue lock
+        * so this check isn't safe to make
+        */
        VM_PAGE_CHECK(m);
+#endif
+
 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
        pmap_zero_part_page(m->phys_page, m_pa, len);
 #else
+       vm_page_t       tmp;
        while (1) {
                        tmp = vm_page_grab();
                if (tmp == VM_PAGE_NULL) {
@@ -2025,9 +3774,7 @@ vm_page_part_zero_fill(
                                m_pa + len, PAGE_SIZE - (m_pa + len));
        }
        vm_page_copy(tmp,m);
-       vm_page_lock_queues();
-       vm_page_free(tmp); 
-       vm_page_unlock_queues();
+       VM_PAGE_FREE(tmp); 
 #endif
 
 }
@@ -2043,9 +3790,14 @@ vm_page_zero_fill(
 {
         XPR(XPR_VM_PAGE,
                 "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n",
-                (integer_t)m->object, (integer_t)m->offset, (integer_t)m, 0,0);
-
+                m->object, m->offset, m, 0,0);
+#if 0
+       /*
+        * we don't hold the page queue lock
+        * so this check isn't safe to make
+        */
        VM_PAGE_CHECK(m);
+#endif
 
 //     dbgTrace(0xAEAEAEAE, m->phys_page, 0);          /* (BRINGUP) */
        pmap_zero_page(m->phys_page);
@@ -2065,9 +3817,14 @@ vm_page_part_copy(
        vm_offset_t     dst_pa,
        vm_size_t       len)
 {
+#if 0
+       /*
+        * we don't hold the page queue lock
+        * so this check isn't safe to make
+        */
        VM_PAGE_CHECK(src_m);
        VM_PAGE_CHECK(dst_m);
-
+#endif
        pmap_copy_part_page(src_m->phys_page, src_pa,
                        dst_m->phys_page, dst_pa, len);
 }
@@ -2082,6 +3839,9 @@ vm_page_part_copy(
  * make sure the page is decrypted first, if necessary.
  */
 
+int vm_page_copy_cs_validations = 0;
+int vm_page_copy_cs_tainted = 0;
+
 void
 vm_page_copy(
        vm_page_t       src_m,
@@ -2089,12 +3849,18 @@ vm_page_copy(
 {
         XPR(XPR_VM_PAGE,
         "vm_page_copy, object 0x%X offset 0x%X to object 0x%X offset 0x%X\n",
-        (integer_t)src_m->object, src_m->offset, 
-       (integer_t)dest_m->object, dest_m->offset,
+        src_m->object, src_m->offset, 
+       dest_m->object, dest_m->offset,
        0);
-
+#if 0
+       /*
+        * we don't hold the page queue lock
+        * so this check isn't safe to make
+        */
        VM_PAGE_CHECK(src_m);
        VM_PAGE_CHECK(dest_m);
+#endif
+       vm_object_lock_assert_held(src_m->object);
 
        /*
         * ENCRYPTED SWAP:
@@ -2107,32 +3873,101 @@ vm_page_copy(
        }
        dest_m->encrypted = FALSE;
 
+       if (src_m->object != VM_OBJECT_NULL &&
+           src_m->object->code_signed) {
+               /*
+                * We're copying a page from a code-signed object.
+                * Whoever ends up mapping the copy page might care about
+                * the original page's integrity, so let's validate the
+                * source page now.
+                */
+               vm_page_copy_cs_validations++;
+               vm_page_validate_cs(src_m);
+       }
+
+       if (vm_page_is_slideable(src_m)) {
+               boolean_t was_busy = src_m->busy;
+               src_m->busy = TRUE;
+               (void) vm_page_slide(src_m, 0);
+               assert(src_m->busy);
+               if (!was_busy) {
+                       PAGE_WAKEUP_DONE(src_m);
+               }
+       }
+
+       /*
+        * Propagate the cs_tainted bit to the copy page. Do not propagate
+        * the cs_validated bit.
+        */
+       dest_m->cs_tainted = src_m->cs_tainted;
+       if (dest_m->cs_tainted) {
+               vm_page_copy_cs_tainted++;
+       }
+       dest_m->slid = src_m->slid;
+       dest_m->error = src_m->error; /* sliding src_m might have failed... */
        pmap_copy_page(src_m->phys_page, dest_m->phys_page);
 }
 
-/*
- *     Currently, this is a primitive allocator that grabs
- *     free pages from the system, sorts them by physical
- *     address, then searches for a region large enough to
- *     satisfy the user's request.
- *
- *     Additional levels of effort:
- *             + steal clean active/inactive pages
- *             + force pageouts of dirty pages
- *             + maintain a map of available physical
- *             memory
- */
+#if MACH_ASSERT
+static void
+_vm_page_print(
+       vm_page_t       p)
+{
+       printf("vm_page %p: \n", p);
+       printf("  pageq: next=%p prev=%p\n", p->pageq.next, p->pageq.prev);
+       printf("  listq: next=%p prev=%p\n", p->listq.next, p->listq.prev);
+       printf("  next=%p\n", VM_PAGE_UNPACK_PTR(p->next_m));
+       printf("  object=%p offset=0x%llx\n", p->object, p->offset);
+       printf("  wire_count=%u\n", p->wire_count);
+
+       printf("  %slocal, %sinactive, %sactive, %spageout_queue, %sspeculative, %slaundry\n",
+              (p->local ? "" : "!"),
+              (p->inactive ? "" : "!"),
+              (p->active ? "" : "!"),
+              (p->pageout_queue ? "" : "!"),
+              (p->speculative ? "" : "!"),
+              (p->laundry ? "" : "!"));
+       printf("  %sfree, %sref, %sgobbled, %sprivate, %sthrottled\n",
+              (p->free ? "" : "!"),
+              (p->reference ? "" : "!"),
+              (p->gobbled ? "" : "!"),
+              (p->private ? "" : "!"),
+              (p->throttled ? "" : "!"));
+       printf("  %sbusy, %swanted, %stabled, %sfictitious, %spmapped, %swpmapped\n",
+               (p->busy ? "" : "!"),
+               (p->wanted ? "" : "!"),
+               (p->tabled ? "" : "!"),
+               (p->fictitious ? "" : "!"),
+               (p->pmapped ? "" : "!"),
+               (p->wpmapped ? "" : "!"));
+       printf("  %spageout, %sabsent, %serror, %sdirty, %scleaning, %sprecious, %sclustered\n",
+              (p->pageout ? "" : "!"),
+              (p->absent ? "" : "!"),
+              (p->error ? "" : "!"),
+              (p->dirty ? "" : "!"),
+              (p->cleaning ? "" : "!"),
+              (p->precious ? "" : "!"),
+              (p->clustered ? "" : "!"));
+       printf("  %soverwriting, %srestart, %sunusual, %sencrypted, %sencrypted_cleaning\n",
+              (p->overwriting ? "" : "!"),
+              (p->restart ? "" : "!"),
+              (p->unusual ? "" : "!"),
+              (p->encrypted ? "" : "!"),
+              (p->encrypted_cleaning ? "" : "!"));
+       printf("  %scs_validated, %scs_tainted, %scs_nx, %sno_cache\n",
+              (p->cs_validated ? "" : "!"),
+              (p->cs_tainted ? "" : "!"),
+              (p->cs_nx ? "" : "!"),
+              (p->no_cache ? "" : "!"));
+
+       printf("phys_page=0x%x\n", p->phys_page);
+}
 
-#if    MACH_ASSERT
 /*
  *     Check that the list of pages is ordered by
  *     ascending physical address and has no holes.
  */
-int    vm_page_verify_contiguous(
-       vm_page_t       pages,
-       unsigned int    npages);
-
-int
+static int
 vm_page_verify_contiguous(
        vm_page_t       pages,
        unsigned int    npages)
@@ -2145,324 +3980,2684 @@ vm_page_verify_contiguous(
        page_count = 1;
        for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
                if (m->phys_page != prev_addr + 1) {
-                       printf("m 0x%x prev_addr 0x%x, current addr 0x%x\n",
-                              m, prev_addr, m->phys_page);
-                       printf("pages 0x%x page_count %d\n", pages, page_count);
+                       printf("m %p prev_addr 0x%lx, current addr 0x%x\n",
+                              m, (long)prev_addr, m->phys_page);
+                       printf("pages %p page_count %d npages %d\n", pages, page_count, npages);
                        panic("vm_page_verify_contiguous:  not contiguous!");
                }
                prev_addr = m->phys_page;
                ++page_count;
        }
        if (page_count != npages) {
-               printf("pages 0x%x actual count 0x%x but requested 0x%x\n",
+               printf("pages %p actual count 0x%x but requested 0x%x\n",
                       pages, page_count, npages);
                panic("vm_page_verify_contiguous:  count error");
        }
        return 1;
 }
-#endif /* MACH_ASSERT */
 
 
-cpm_counter(unsigned int       vpfls_pages_handled = 0;)
-cpm_counter(unsigned int       vpfls_head_insertions = 0;)
-cpm_counter(unsigned int       vpfls_tail_insertions = 0;)
-cpm_counter(unsigned int       vpfls_general_insertions = 0;)
-cpm_counter(unsigned int       vpfc_failed = 0;)
-cpm_counter(unsigned int       vpfc_satisfied = 0;)
-
 /*
- *     Find a region large enough to contain at least npages
- *     of contiguous physical memory.
- *
- *     Requirements:
- *             - Called while holding vm_page_queue_free_lock.
- *             - Doesn't respect vm_page_free_reserved; caller
- *             must not ask for more pages than are legal to grab.
- *
- *     Returns a pointer to a list of gobbled pages or VM_PAGE_NULL.
- *
- * Algorithm:
- *     Loop over the free list, extracting one page at a time and
- *     inserting those into a sorted sub-list.  We stop as soon as
- *     there's a contiguous range within the sorted list that can
- *     satisfy the contiguous memory request.  This contiguous sub-
- *     list is chopped out of the sorted sub-list and the remainder
- *     of the sorted sub-list is put back onto the beginning of the
- *     free list.
+ *     Check the free lists for proper length etc.
  */
-static vm_page_t
-vm_page_find_contiguous(
-       unsigned int    contig_pages)
+static boolean_t vm_page_verify_this_free_list_enabled = FALSE;
+static unsigned int
+vm_page_verify_free_list(
+       queue_head_t    *vm_page_queue,
+       unsigned int    color,
+       vm_page_t       look_for_page,
+       boolean_t       expect_page)
 {
-       vm_page_t       sort_list;
-       vm_page_t       *contfirstprev, contlast;
-       vm_page_t       m, m1;
-       ppnum_t         prevcontaddr;
-       ppnum_t         nextcontaddr;
-       unsigned int    npages;
+       unsigned int    npages;
+       vm_page_t       m;
+       vm_page_t       prev_m;
+       boolean_t       found_page;
 
-       m = NULL;
-#if DEBUG
-       _mutex_assert(&vm_page_queue_free_lock, MA_OWNED);
-#endif
-#if    MACH_ASSERT
-       /*
-        *      Verify pages in the free list..
-        */
+       if (! vm_page_verify_this_free_list_enabled)
+               return 0;
+
+       found_page = FALSE;
        npages = 0;
-       for (m = vm_page_queue_free; m != VM_PAGE_NULL; m = NEXT_PAGE(m))
+       prev_m = (vm_page_t) vm_page_queue;
+       queue_iterate(vm_page_queue,
+                     m,
+                     vm_page_t,
+                     pageq) {
+
+               if (m == look_for_page) {
+                       found_page = TRUE;
+               }
+               if ((vm_page_t) m->pageq.prev != prev_m)
+                       panic("vm_page_verify_free_list(color=%u, npages=%u): page %p corrupted prev ptr %p instead of %p\n",
+                             color, npages, m, m->pageq.prev, prev_m);
+               if ( ! m->busy )
+                       panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not busy\n",
+                             color, npages, m);
+               if (color != (unsigned int) -1) {
+                       if ((m->phys_page & vm_color_mask) != color)
+                               panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u\n",
+                                     color, npages, m, m->phys_page & vm_color_mask, color);
+                       if ( ! m->free )
+                               panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not free\n",
+                                     color, npages, m);
+               }
                ++npages;
-       if (npages != vm_page_free_count)
-               panic("vm_sort_free_list:  prelim:  npages %u free_count %d",
-                     npages, vm_page_free_count);
+               prev_m = m;
+       }
+       if (look_for_page != VM_PAGE_NULL) {
+               unsigned int other_color;
+
+               if (expect_page && !found_page) {
+                       printf("vm_page_verify_free_list(color=%u, npages=%u): page %p not found phys=%u\n",
+                              color, npages, look_for_page, look_for_page->phys_page);
+                       _vm_page_print(look_for_page);
+                       for (other_color = 0;
+                            other_color < vm_colors;
+                            other_color++) {
+                               if (other_color == color)
+                                       continue;
+                               vm_page_verify_free_list(&vm_page_queue_free[other_color],
+                                                        other_color, look_for_page, FALSE);
+                       }
+                       if (color == (unsigned int) -1) {
+                               vm_page_verify_free_list(&vm_lopage_queue_free,
+                                                        (unsigned int) -1, look_for_page, FALSE);
+                       }
+                       panic("vm_page_verify_free_list(color=%u)\n", color);
+               }
+               if (!expect_page && found_page) {
+                       printf("vm_page_verify_free_list(color=%u, npages=%u): page %p found phys=%u\n",
+                              color, npages, look_for_page, look_for_page->phys_page);
+               }
+       }
+       return npages;
+}
+
+static boolean_t vm_page_verify_all_free_lists_enabled = FALSE;
+static void
+vm_page_verify_free_lists( void )
+{
+       unsigned int    color, npages, nlopages;
+       boolean_t       toggle = TRUE;
+
+       if (! vm_page_verify_all_free_lists_enabled)
+               return;
+
+       npages = 0;
+
+       lck_mtx_lock(&vm_page_queue_free_lock);
+       
+       if (vm_page_verify_this_free_list_enabled == TRUE) {
+               /*
+                * This variable has been set globally for extra checking of
+                * each free list Q. Since we didn't set it, we don't own it
+                * and we shouldn't toggle it.
+                */
+               toggle = FALSE;
+       }
+
+       if (toggle == TRUE) {
+               vm_page_verify_this_free_list_enabled = TRUE;
+       }
+
+       for( color = 0; color < vm_colors; color++ ) {
+               npages += vm_page_verify_free_list(&vm_page_queue_free[color],
+                                                  color, VM_PAGE_NULL, FALSE);
+       }
+       nlopages = vm_page_verify_free_list(&vm_lopage_queue_free,
+                                           (unsigned int) -1,
+                                           VM_PAGE_NULL, FALSE);
+       if (npages != vm_page_free_count || nlopages != vm_lopage_free_count)
+               panic("vm_page_verify_free_lists:  "
+                     "npages %u free_count %d nlopages %u lo_free_count %u",
+                     npages, vm_page_free_count, nlopages, vm_lopage_free_count);
+
+       if (toggle == TRUE) {
+               vm_page_verify_this_free_list_enabled = FALSE;
+       }
+
+       lck_mtx_unlock(&vm_page_queue_free_lock);
+}
+
+void
+vm_page_queues_assert(
+       vm_page_t       mem,
+       int             val)
+{
+#if DEBUG
+       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+#endif
+       if (mem->free + mem->active + mem->inactive + mem->speculative +
+           mem->throttled + mem->pageout_queue > (val)) {
+               _vm_page_print(mem);
+               panic("vm_page_queues_assert(%p, %d)\n", mem, val);
+       }
+       if (VM_PAGE_WIRED(mem)) {
+               assert(!mem->active);
+               assert(!mem->inactive);
+               assert(!mem->speculative);
+               assert(!mem->throttled);
+               assert(!mem->pageout_queue);
+       }
+}
 #endif /* MACH_ASSERT */
 
-       if (contig_pages == 0 || vm_page_queue_free == VM_PAGE_NULL)
-               return VM_PAGE_NULL;
 
-#define PPNUM_PREV(x)  (((x) > 0) ? ((x) - 1) : 0)
-#define PPNUM_NEXT(x)  (((x) < PPNUM_MAX) ? ((x) + 1) : PPNUM_MAX)
-#define SET_NEXT_PAGE(m,n)     ((m)->pageq.next = (struct queue_entry *) (n))
 
-       npages = 1;
-       contfirstprev = &sort_list;
-       contlast = sort_list = vm_page_queue_free;
-       vm_page_queue_free = NEXT_PAGE(sort_list);
-       SET_NEXT_PAGE(sort_list, VM_PAGE_NULL);
-       prevcontaddr = PPNUM_PREV(sort_list->phys_page);
-       nextcontaddr = PPNUM_NEXT(sort_list->phys_page);
 
-       while (npages < contig_pages && 
-              (m = vm_page_queue_free) != VM_PAGE_NULL)
-       {
-               cpm_counter(++vpfls_pages_handled);
 
-               /* prepend to existing run? */
-               if (m->phys_page == prevcontaddr)
-               {
-                       vm_page_queue_free = NEXT_PAGE(m);
-                       cpm_counter(++vpfls_head_insertions);
-                       prevcontaddr = PPNUM_PREV(prevcontaddr);
-                       SET_NEXT_PAGE(m, *contfirstprev);
-                       *contfirstprev = m;
-                       npages++;
-                       continue; /* no tail expansion check needed */
-               } 
-
-               /* append to tail of existing run? */
-               else if (m->phys_page == nextcontaddr)
-               {
-                       vm_page_queue_free = NEXT_PAGE(m);
-                       cpm_counter(++vpfls_tail_insertions);
-                       nextcontaddr = PPNUM_NEXT(nextcontaddr);
-                       SET_NEXT_PAGE(m, NEXT_PAGE(contlast));
-                       SET_NEXT_PAGE(contlast, m);
-                       contlast = m;
-                       npages++;
-               }
+extern boolean_t (* volatile consider_buffer_cache_collect)(int);
+
+/*
+ *     CONTIGUOUS PAGE ALLOCATION
+ *
+ *     Find a region large enough to contain at least n pages
+ *     of contiguous physical memory.
+ *
+ *     This is done by traversing the vm_page_t array in a linear fashion
+ *     we assume that the vm_page_t array has the avaiable physical pages in an
+ *     ordered, ascending list... this is currently true of all our implementations
+ *     and must remain so... there can be 'holes' in the array...  we also can
+ *     no longer tolerate the vm_page_t's in the list being 'freed' and reclaimed
+ *     which use to happen via 'vm_page_convert'... that function was no longer
+ *     being called and was removed...
+ *     
+ *     The basic flow consists of stabilizing some of the interesting state of 
+ *     a vm_page_t behind the vm_page_queue and vm_page_free locks... we start our
+ *     sweep at the beginning of the array looking for pages that meet our criterea
+ *     for a 'stealable' page... currently we are pretty conservative... if the page
+ *     meets this criterea and is physically contiguous to the previous page in the 'run'
+ *     we keep developing it.  If we hit a page that doesn't fit, we reset our state
+ *     and start to develop a new run... if at this point we've already considered
+ *     at least MAX_CONSIDERED_BEFORE_YIELD pages, we'll drop the 2 locks we hold,
+ *     and mutex_pause (which will yield the processor), to keep the latency low w/r 
+ *     to other threads trying to acquire free pages (or move pages from q to q),
+ *     and then continue from the spot we left off... we only make 1 pass through the
+ *     array.  Once we have a 'run' that is long enough, we'll go into the loop which
+ *     which steals the pages from the queues they're currently on... pages on the free
+ *     queue can be stolen directly... pages that are on any of the other queues
+ *     must be removed from the object they are tabled on... this requires taking the
+ *     object lock... we do this as a 'try' to prevent deadlocks... if the 'try' fails
+ *     or if the state of the page behind the vm_object lock is no longer viable, we'll
+ *     dump the pages we've currently stolen back to the free list, and pick up our
+ *     scan from the point where we aborted the 'current' run.
+ *
+ *
+ *     Requirements:
+ *             - neither vm_page_queue nor vm_free_list lock can be held on entry
+ *
+ *     Returns a pointer to a list of gobbled/wired pages or VM_PAGE_NULL.
+ *
+ * Algorithm:
+ */
+
+#define        MAX_CONSIDERED_BEFORE_YIELD     1000
+
+
+#define RESET_STATE_OF_RUN()   \
+       MACRO_BEGIN             \
+       prevcontaddr = -2;      \
+       start_pnum = -1;        \
+       free_considered = 0;    \
+       substitute_needed = 0;  \
+       npages = 0;             \
+       MACRO_END                       
+
+/*
+ * Can we steal in-use (i.e. not free) pages when searching for
+ * physically-contiguous pages ?
+ */
+#define VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL 1
+
+static unsigned int vm_page_find_contiguous_last_idx = 0,  vm_page_lomem_find_contiguous_last_idx = 0;
+#if DEBUG
+int vm_page_find_contig_debug = 0;
+#endif
+
+static vm_page_t
+vm_page_find_contiguous(
+       unsigned int    contig_pages,
+       ppnum_t         max_pnum,
+       ppnum_t     pnum_mask,
+       boolean_t       wire,
+       int             flags)
+{
+       vm_page_t       m = NULL;
+       ppnum_t         prevcontaddr;
+       ppnum_t         start_pnum;
+       unsigned int    npages, considered, scanned;
+       unsigned int    page_idx, start_idx, last_idx, orig_last_idx;
+       unsigned int    idx_last_contig_page_found = 0;
+       int             free_considered, free_available;
+       int             substitute_needed;
+       boolean_t       wrapped, zone_gc_called = FALSE;
+#if DEBUG
+       clock_sec_t     tv_start_sec, tv_end_sec;
+       clock_usec_t    tv_start_usec, tv_end_usec;
+#endif
+
+       int             yielded = 0;
+       int             dumped_run = 0;
+       int             stolen_pages = 0;
+       int             compressed_pages = 0;
+
+
+       if (contig_pages == 0)
+               return VM_PAGE_NULL;
+
+full_scan_again:
+
+#if MACH_ASSERT
+       vm_page_verify_free_lists();
+#endif
+#if DEBUG
+       clock_get_system_microtime(&tv_start_sec, &tv_start_usec);
+#endif
+       PAGE_REPLACEMENT_ALLOWED(TRUE);
+
+       vm_page_lock_queues();
+
+
+       lck_mtx_lock(&vm_page_queue_free_lock);
+
+       RESET_STATE_OF_RUN();
+
+       scanned = 0;
+       considered = 0;
+       free_available = vm_page_free_count - vm_page_free_reserved;
+
+       wrapped = FALSE;
+       
+       if(flags & KMA_LOMEM) 
+               idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx;
+       else
+               idx_last_contig_page_found =  vm_page_find_contiguous_last_idx;
+
+       orig_last_idx = idx_last_contig_page_found;
+       last_idx = orig_last_idx;
+
+       for (page_idx = last_idx, start_idx = last_idx;
+            npages < contig_pages && page_idx < vm_pages_count;
+            page_idx++) {
+retry:
+               if (wrapped &&
+                   npages == 0 &&
+                   page_idx >= orig_last_idx) {
+                       /*
+                        * We're back where we started and we haven't
+                        * found any suitable contiguous range.  Let's
+                        * give up.
+                        */
+                       break;
+               }
+               scanned++;
+               m = &vm_pages[page_idx];
+
+               assert(!m->fictitious);
+               assert(!m->private);
+
+               if (max_pnum && m->phys_page > max_pnum) {
+                       /* no more low pages... */
+                       break;
+               }
+               if (!npages & ((m->phys_page & pnum_mask) != 0)) {
+                       /*
+                        * not aligned
+                        */
+                       RESET_STATE_OF_RUN();
+
+               } else if (VM_PAGE_WIRED(m) || m->gobbled ||
+                          m->encrypted_cleaning ||
+                          m->pageout_queue || m->laundry || m->wanted ||
+                          m->cleaning || m->overwriting || m->pageout) {
+                       /*
+                        * page is in a transient state
+                        * or a state we don't want to deal
+                        * with, so don't consider it which
+                        * means starting a new run
+                        */
+                       RESET_STATE_OF_RUN();
+
+               } else if (!m->free && !m->active && !m->inactive && !m->speculative && !m->throttled && !m->compressor) {
+                       /*
+                        * page needs to be on one of our queues
+                        * or it needs to belong to the compressor pool
+                        * in order for it to be stable behind the
+                        * locks we hold at this point...
+                        * if not, don't consider it which
+                        * means starting a new run
+                        */
+                       RESET_STATE_OF_RUN();
+
+               } else if (!m->free && (!m->tabled || m->busy)) {
+                       /*
+                        * pages on the free list are always 'busy'
+                        * so we couldn't test for 'busy' in the check
+                        * for the transient states... pages that are
+                        * 'free' are never 'tabled', so we also couldn't
+                        * test for 'tabled'.  So we check here to make
+                        * sure that a non-free page is not busy and is
+                        * tabled on an object... 
+                        * if not, don't consider it which
+                        * means starting a new run
+                        */
+                       RESET_STATE_OF_RUN();
+
+               } else {
+                       if (m->phys_page != prevcontaddr + 1) {
+                               if ((m->phys_page & pnum_mask) != 0) {
+                                       RESET_STATE_OF_RUN();
+                                       goto did_consider;
+                               } else {
+                                       npages = 1;
+                                       start_idx = page_idx;
+                                       start_pnum = m->phys_page;
+                               }
+                       } else {
+                               npages++;
+                       }
+                       prevcontaddr = m->phys_page;
+                       
+                       VM_PAGE_CHECK(m);
+                       if (m->free) {
+                               free_considered++;
+                       } else {
+                               /*
+                                * This page is not free.
+                                * If we can't steal used pages,
+                                * we have to give up this run
+                                * and keep looking.
+                                * Otherwise, we might need to
+                                * move the contents of this page
+                                * into a substitute page.
+                                */
+#if VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
+                               if (m->pmapped || m->dirty || m->precious) {
+                                       substitute_needed++;
+                               }
+#else
+                               RESET_STATE_OF_RUN();
+#endif
+                       }
+
+                       if ((free_considered + substitute_needed) > free_available) {   
+                               /*
+                                * if we let this run continue
+                                * we will end up dropping the vm_page_free_count
+                                * below the reserve limit... we need to abort
+                                * this run, but we can at least re-consider this
+                                * page... thus the jump back to 'retry'
+                                */
+                               RESET_STATE_OF_RUN();
+
+                               if (free_available && considered <= MAX_CONSIDERED_BEFORE_YIELD) {
+                                       considered++;
+                                       goto retry;
+                               }
+                               /*
+                                * free_available == 0
+                                * so can't consider any free pages... if
+                                * we went to retry in this case, we'd
+                                * get stuck looking at the same page
+                                * w/o making any forward progress
+                                * we also want to take this path if we've already
+                                * reached our limit that controls the lock latency
+                                */
+                       }
+               }
+did_consider:
+               if (considered > MAX_CONSIDERED_BEFORE_YIELD && npages <= 1) {
+
+                       PAGE_REPLACEMENT_ALLOWED(FALSE);
+
+                       lck_mtx_unlock(&vm_page_queue_free_lock);
+                       vm_page_unlock_queues();
+
+                       mutex_pause(0);
+
+                       PAGE_REPLACEMENT_ALLOWED(TRUE);
+
+                       vm_page_lock_queues();
+                       lck_mtx_lock(&vm_page_queue_free_lock);
+
+                       RESET_STATE_OF_RUN();
+                       /*
+                        * reset our free page limit since we
+                        * dropped the lock protecting the vm_page_free_queue
+                        */
+                       free_available = vm_page_free_count - vm_page_free_reserved;
+                       considered = 0;
+
+                       yielded++;
+
+                       goto retry;
+               }
+               considered++;
+       }
+       m = VM_PAGE_NULL;
+
+       if (npages != contig_pages) {
+               if (!wrapped) {
+                       /*
+                        * We didn't find a contiguous range but we didn't
+                        * start from the very first page.
+                        * Start again from the very first page.
+                        */
+                       RESET_STATE_OF_RUN();
+                       if( flags & KMA_LOMEM)
+                               idx_last_contig_page_found  = vm_page_lomem_find_contiguous_last_idx = 0;
+                       else
+                               idx_last_contig_page_found = vm_page_find_contiguous_last_idx = 0;
+                       last_idx = 0;
+                       page_idx = last_idx;
+                       wrapped = TRUE;
+                       goto retry;
+               }
+               lck_mtx_unlock(&vm_page_queue_free_lock);
+       } else {
+               vm_page_t       m1;
+               vm_page_t       m2;
+               unsigned int    cur_idx;
+               unsigned int    tmp_start_idx;
+               vm_object_t     locked_object = VM_OBJECT_NULL;
+               boolean_t       abort_run = FALSE;
+               
+               assert(page_idx - start_idx == contig_pages);
+
+               tmp_start_idx = start_idx;
+
+               /*
+                * first pass through to pull the free pages
+                * off of the free queue so that in case we
+                * need substitute pages, we won't grab any 
+                * of the free pages in the run... we'll clear
+                * the 'free' bit in the 2nd pass, and even in
+                * an abort_run case, we'll collect all of the
+                * free pages in this run and return them to the free list
+                */
+               while (start_idx < page_idx) {
+
+                       m1 = &vm_pages[start_idx++];
+
+#if !VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
+                       assert(m1->free);
+#endif
+
+                       if (m1->free) {
+                               unsigned int color;
+
+                               color = m1->phys_page & vm_color_mask;
+#if MACH_ASSERT
+                               vm_page_verify_free_list(&vm_page_queue_free[color], color, m1, TRUE);
+#endif
+                               queue_remove(&vm_page_queue_free[color],
+                                            m1,
+                                            vm_page_t,
+                                            pageq);
+                               m1->pageq.next = NULL;
+                               m1->pageq.prev = NULL;
+#if MACH_ASSERT
+                               vm_page_verify_free_list(&vm_page_queue_free[color], color, VM_PAGE_NULL, FALSE);
+#endif
+                               /*
+                                * Clear the "free" bit so that this page
+                                * does not get considered for another
+                                * concurrent physically-contiguous allocation.
+                                */
+                               m1->free = FALSE; 
+                               assert(m1->busy);
+
+                               vm_page_free_count--;
+                       }
+               }
+               if( flags & KMA_LOMEM)
+                       vm_page_lomem_find_contiguous_last_idx = page_idx;
+               else 
+                       vm_page_find_contiguous_last_idx = page_idx;
+               
+               /*
+                * we can drop the free queue lock at this point since
+                * we've pulled any 'free' candidates off of the list
+                * we need it dropped so that we can do a vm_page_grab
+                * when substituing for pmapped/dirty pages
+                */
+               lck_mtx_unlock(&vm_page_queue_free_lock);
+
+               start_idx = tmp_start_idx;
+               cur_idx = page_idx - 1;
+
+               while (start_idx++ < page_idx) {
+                       /*
+                        * must go through the list from back to front
+                        * so that the page list is created in the
+                        * correct order - low -> high phys addresses
+                        */
+                       m1 = &vm_pages[cur_idx--];
+
+                       assert(!m1->free);
+
+                       if (m1->object == VM_OBJECT_NULL) {
+                               /*
+                                * page has already been removed from
+                                * the free list in the 1st pass
+                                */
+                               assert(m1->offset == (vm_object_offset_t) -1);
+                               assert(m1->busy);
+                               assert(!m1->wanted);
+                               assert(!m1->laundry);
+                       } else {
+                               vm_object_t object;
+                               int refmod;
+                               boolean_t disconnected, reusable;
+
+                               if (abort_run == TRUE)
+                                       continue;
+
+                               object = m1->object;
+
+                               if (object != locked_object) {
+                                       if (locked_object) {
+                                               vm_object_unlock(locked_object);
+                                               locked_object = VM_OBJECT_NULL;
+                                       }
+                                       if (vm_object_lock_try(object))
+                                               locked_object = object;
+                               }
+                               if (locked_object == VM_OBJECT_NULL || 
+                                   (VM_PAGE_WIRED(m1) || m1->gobbled ||
+                                    m1->encrypted_cleaning ||
+                                    m1->pageout_queue || m1->laundry || m1->wanted ||
+                                    m1->cleaning || m1->overwriting || m1->pageout || m1->busy)) {
+
+                                       if (locked_object) {
+                                               vm_object_unlock(locked_object);
+                                               locked_object = VM_OBJECT_NULL;
+                                       }
+                                       tmp_start_idx = cur_idx;
+                                       abort_run = TRUE;
+                                       continue;
+                               }
+
+                               disconnected = FALSE;
+                               reusable = FALSE;
+
+                               if ((m1->reusable ||
+                                    m1->object->all_reusable) &&
+                                   m1->inactive &&
+                                   !m1->dirty &&
+                                   !m1->reference) {
+                                       /* reusable page... */
+                                       refmod = pmap_disconnect(m1->phys_page);
+                                       disconnected = TRUE;
+                                       if (refmod == 0) {
+                                               /*
+                                                * ... not reused: can steal
+                                                * without relocating contents.
+                                                */
+                                               reusable = TRUE;
+                                       }
+                               }
+
+                               if ((m1->pmapped &&
+                                    ! reusable) ||
+                                   m1->dirty ||
+                                   m1->precious) {
+                                       vm_object_offset_t offset;
+
+                                       m2 = vm_page_grab();
+
+                                       if (m2 == VM_PAGE_NULL) {
+                                               if (locked_object) {
+                                                       vm_object_unlock(locked_object);
+                                                       locked_object = VM_OBJECT_NULL;
+                                               }
+                                               tmp_start_idx = cur_idx;
+                                               abort_run = TRUE;
+                                               continue;
+                                       }
+                                       if (! disconnected) {
+                                               if (m1->pmapped)
+                                                       refmod = pmap_disconnect(m1->phys_page);
+                                               else
+                                                       refmod = 0;
+                                       }
+
+                                       /* copy the page's contents */
+                                       pmap_copy_page(m1->phys_page, m2->phys_page);
+                                       /* copy the page's state */
+                                       assert(!VM_PAGE_WIRED(m1));
+                                       assert(!m1->free);
+                                       assert(!m1->pageout_queue);
+                                       assert(!m1->laundry);
+                                       m2->reference   = m1->reference;
+                                       assert(!m1->gobbled);
+                                       assert(!m1->private);
+                                       m2->no_cache    = m1->no_cache;
+                                       m2->xpmapped    = 0;
+                                       assert(!m1->busy);
+                                       assert(!m1->wanted);
+                                       assert(!m1->fictitious);
+                                       m2->pmapped     = m1->pmapped; /* should flush cache ? */
+                                       m2->wpmapped    = m1->wpmapped;
+                                       assert(!m1->pageout);
+                                       m2->absent      = m1->absent;
+                                       m2->error       = m1->error;
+                                       m2->dirty       = m1->dirty;
+                                       assert(!m1->cleaning);
+                                       m2->precious    = m1->precious;
+                                       m2->clustered   = m1->clustered;
+                                       assert(!m1->overwriting);
+                                       m2->restart     = m1->restart;
+                                       m2->unusual     = m1->unusual;
+                                       m2->encrypted   = m1->encrypted;
+                                       assert(!m1->encrypted_cleaning);
+                                       m2->cs_validated = m1->cs_validated;
+                                       m2->cs_tainted  = m1->cs_tainted;
+                                       m2->cs_nx       = m1->cs_nx;
+
+                                       /*
+                                        * If m1 had really been reusable,
+                                        * we would have just stolen it, so
+                                        * let's not propagate it's "reusable"
+                                        * bit and assert that m2 is not
+                                        * marked as "reusable".
+                                        */
+                                       // m2->reusable = m1->reusable;
+                                       assert(!m2->reusable);
+
+                                       assert(!m1->lopage);
+                                       m2->slid        = m1->slid;
+                                       m2->compressor  = m1->compressor;
+
+                                       /*
+                                        * page may need to be flushed if
+                                        * it is marshalled into a UPL
+                                        * that is going to be used by a device
+                                        * that doesn't support coherency
+                                        */
+                                       m2->written_by_kernel = TRUE;
+
+                                       /*
+                                        * make sure we clear the ref/mod state
+                                        * from the pmap layer... else we risk
+                                        * inheriting state from the last time
+                                        * this page was used...
+                                        */
+                                       pmap_clear_refmod(m2->phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
+
+                                       if (refmod & VM_MEM_REFERENCED)
+                                               m2->reference = TRUE;
+                                       if (refmod & VM_MEM_MODIFIED) {
+                                               SET_PAGE_DIRTY(m2, TRUE);
+                                       }
+                                       offset = m1->offset;
+
+                                       /*
+                                        * completely cleans up the state
+                                        * of the page so that it is ready
+                                        * to be put onto the free list, or
+                                        * for this purpose it looks like it
+                                        * just came off of the free list
+                                        */
+                                       vm_page_free_prepare(m1);
+
+                                       /*
+                                        * now put the substitute page
+                                        * on the object
+                                        */
+                                       vm_page_insert_internal(m2, locked_object, offset, VM_KERN_MEMORY_NONE, TRUE, TRUE, FALSE, FALSE, NULL);
+
+                                       if (m2->compressor) {
+                                               m2->pmapped = TRUE;
+                                               m2->wpmapped = TRUE;
+
+                                               PMAP_ENTER(kernel_pmap, m2->offset, m2,
+                                                          VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE);
+
+                                               compressed_pages++;
+
+                                       } else {
+                                               if (m2->reference)
+                                                       vm_page_activate(m2);
+                                               else
+                                                       vm_page_deactivate(m2);
+                                       }
+                                       PAGE_WAKEUP_DONE(m2);
+
+                               } else {
+                                       assert(!m1->compressor);
+
+                                       /*
+                                        * completely cleans up the state
+                                        * of the page so that it is ready
+                                        * to be put onto the free list, or
+                                        * for this purpose it looks like it
+                                        * just came off of the free list
+                                        */
+                                       vm_page_free_prepare(m1);
+                               }
+
+                               stolen_pages++;
+
+                       }
+                       m1->pageq.next = (queue_entry_t) m;
+                       m1->pageq.prev = NULL;
+                       m = m1;
+               }
+               if (locked_object) {
+                       vm_object_unlock(locked_object);
+                       locked_object = VM_OBJECT_NULL;
+               }
+
+               if (abort_run == TRUE) {
+                       if (m != VM_PAGE_NULL) {
+                               vm_page_free_list(m, FALSE);
+                       }
+
+                       dumped_run++;
+
+                       /*
+                        * want the index of the last
+                        * page in this run that was
+                        * successfully 'stolen', so back
+                        * it up 1 for the auto-decrement on use
+                        * and 1 more to bump back over this page
+                        */
+                       page_idx = tmp_start_idx + 2;
+                       if (page_idx >= vm_pages_count) {
+                               if (wrapped)
+                                       goto done_scanning;
+                               page_idx = last_idx = 0;
+                               wrapped = TRUE;
+                       }
+                       abort_run = FALSE;
+               
+                       /*
+                        * We didn't find a contiguous range but we didn't
+                        * start from the very first page.
+                        * Start again from the very first page.
+                        */
+                       RESET_STATE_OF_RUN();
+                       
+                       if( flags & KMA_LOMEM)
+                               idx_last_contig_page_found  = vm_page_lomem_find_contiguous_last_idx = page_idx;
+                       else
+                               idx_last_contig_page_found = vm_page_find_contiguous_last_idx = page_idx;
+                       
+                       last_idx = page_idx;
+                       
+                       lck_mtx_lock(&vm_page_queue_free_lock);
+                       /*
+                       * reset our free page limit since we
+                       * dropped the lock protecting the vm_page_free_queue
+                       */
+                       free_available = vm_page_free_count - vm_page_free_reserved;
+                       goto retry;
+               }
+
+               for (m1 = m; m1 != VM_PAGE_NULL; m1 = NEXT_PAGE(m1)) {
+
+                       if (wire == TRUE)
+                               m1->wire_count++;
+                       else
+                               m1->gobbled = TRUE;
+               }
+               if (wire == FALSE)
+                       vm_page_gobble_count += npages;
+
+               /*
+                * gobbled pages are also counted as wired pages
+                */
+               vm_page_wire_count += npages;
+
+               assert(vm_page_verify_contiguous(m, npages));
+       }
+done_scanning:
+       PAGE_REPLACEMENT_ALLOWED(FALSE);
+
+       vm_page_unlock_queues();
+
+#if DEBUG
+       clock_get_system_microtime(&tv_end_sec, &tv_end_usec);
+
+       tv_end_sec -= tv_start_sec;
+       if (tv_end_usec < tv_start_usec) {
+               tv_end_sec--;
+               tv_end_usec += 1000000;
+       }
+       tv_end_usec -= tv_start_usec;
+       if (tv_end_usec >= 1000000) {
+               tv_end_sec++;
+               tv_end_sec -= 1000000;
+       }
+       if (vm_page_find_contig_debug) {
+               printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds...  started at %d...  scanned %d pages...  yielded %d times...  dumped run %d times... stole %d pages... stole %d compressed pages\n",
+                      __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
+                      (long)tv_end_sec, tv_end_usec, orig_last_idx,
+                      scanned, yielded, dumped_run, stolen_pages, compressed_pages);
+       }
+
+#endif
+#if MACH_ASSERT
+       vm_page_verify_free_lists();
+#endif
+       if (m == NULL && zone_gc_called == FALSE) {
+               printf("%s(num=%d,low=%d): found %d pages at 0x%llx...scanned %d pages...  yielded %d times...  dumped run %d times... stole %d pages... stole %d compressed pages... wired count is %d\n",
+                      __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
+                      scanned, yielded, dumped_run, stolen_pages, compressed_pages, vm_page_wire_count);
+
+               if (consider_buffer_cache_collect != NULL) {
+                       (void)(*consider_buffer_cache_collect)(1);
+               }
+
+               consider_zone_gc(TRUE);
+
+               zone_gc_called = TRUE;
+
+               printf("vm_page_find_contiguous: zone_gc called... wired count is %d\n", vm_page_wire_count);
+               goto full_scan_again;
+       }
+
+       return m;
+}
+
+/*
+ *     Allocate a list of contiguous, wired pages.
+ */
+kern_return_t
+cpm_allocate(
+       vm_size_t       size,
+       vm_page_t       *list,
+       ppnum_t         max_pnum,
+       ppnum_t         pnum_mask,
+       boolean_t       wire,
+       int             flags)
+{
+       vm_page_t               pages;
+       unsigned int            npages;
+
+       if (size % PAGE_SIZE != 0)
+               return KERN_INVALID_ARGUMENT;
+
+       npages = (unsigned int) (size / PAGE_SIZE);
+       if (npages != size / PAGE_SIZE) {
+               /* 32-bit overflow */
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       /*
+        *      Obtain a pointer to a subset of the free
+        *      list large enough to satisfy the request;
+        *      the region will be physically contiguous.
+        */
+       pages = vm_page_find_contiguous(npages, max_pnum, pnum_mask, wire, flags);
+
+       if (pages == VM_PAGE_NULL)
+               return KERN_NO_SPACE;
+       /*
+        * determine need for wakeups
+        */
+       if ((vm_page_free_count < vm_page_free_min) ||
+            ((vm_page_free_count < vm_page_free_target) &&
+             ((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_min)))
+                thread_wakeup((event_t) &vm_page_free_wanted);
+               
+       VM_CHECK_MEMORYSTATUS;
+       
+       /*
+        *      The CPM pages should now be available and
+        *      ordered by ascending physical address.
+        */
+       assert(vm_page_verify_contiguous(pages, npages));
+
+       *list = pages;
+       return KERN_SUCCESS;
+}
+
+
+unsigned int vm_max_delayed_work_limit = DEFAULT_DELAYED_WORK_LIMIT;
+
+/*
+ * when working on a 'run' of pages, it is necessary to hold 
+ * the vm_page_queue_lock (a hot global lock) for certain operations
+ * on the page... however, the majority of the work can be done
+ * while merely holding the object lock... in fact there are certain
+ * collections of pages that don't require any work brokered by the
+ * vm_page_queue_lock... to mitigate the time spent behind the global
+ * lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
+ * while doing all of the work that doesn't require the vm_page_queue_lock...
+ * then call vm_page_do_delayed_work to acquire the vm_page_queue_lock and do the
+ * necessary work for each page... we will grab the busy bit on the page
+ * if it's not already held so that vm_page_do_delayed_work can drop the object lock
+ * if it can't immediately take the vm_page_queue_lock in order to compete
+ * for the locks in the same order that vm_pageout_scan takes them.
+ * the operation names are modeled after the names of the routines that
+ * need to be called in order to make the changes very obvious in the
+ * original loop
+ */
+
+void
+vm_page_do_delayed_work(
+       vm_object_t     object,
+       vm_tag_t        tag,
+       struct vm_page_delayed_work *dwp,
+       int             dw_count)
+{
+       int             j;
+       vm_page_t       m;
+        vm_page_t       local_free_q = VM_PAGE_NULL;
+
+       /*
+        * pageout_scan takes the vm_page_lock_queues first
+        * then tries for the object lock... to avoid what
+        * is effectively a lock inversion, we'll go to the
+        * trouble of taking them in that same order... otherwise
+        * if this object contains the majority of the pages resident
+        * in the UBC (or a small set of large objects actively being
+        * worked on contain the majority of the pages), we could
+        * cause the pageout_scan thread to 'starve' in its attempt
+        * to find pages to move to the free queue, since it has to
+        * successfully acquire the object lock of any candidate page
+        * before it can steal/clean it.
+        */
+       if (!vm_page_trylockspin_queues()) {
+               vm_object_unlock(object);
+
+               vm_page_lockspin_queues();
+
+               for (j = 0; ; j++) {
+                       if (!vm_object_lock_avoid(object) &&
+                           _vm_object_lock_try(object))
+                               break;
+                       vm_page_unlock_queues();
+                       mutex_pause(j);
+                       vm_page_lockspin_queues();
+               }
+       }
+       for (j = 0; j < dw_count; j++, dwp++) {
+
+               m = dwp->dw_m;
+
+               if (dwp->dw_mask & DW_vm_pageout_throttle_up)
+                       vm_pageout_throttle_up(m);
+#if CONFIG_PHANTOM_CACHE
+               if (dwp->dw_mask & DW_vm_phantom_cache_update)
+                       vm_phantom_cache_update(m);
+#endif
+               if (dwp->dw_mask & DW_vm_page_wire)
+                       vm_page_wire(m, tag, FALSE);
+               else if (dwp->dw_mask & DW_vm_page_unwire) {
+                       boolean_t       queueit;
+
+                       queueit = (dwp->dw_mask & (DW_vm_page_free | DW_vm_page_deactivate_internal)) ? FALSE : TRUE;
+
+                       vm_page_unwire(m, queueit);
+               }
+               if (dwp->dw_mask & DW_vm_page_free) {
+                       vm_page_free_prepare_queues(m);
+
+                       assert(m->pageq.next == NULL && m->pageq.prev == NULL);
+                       /*
+                        * Add this page to our list of reclaimed pages,
+                        * to be freed later.
+                        */
+                       m->pageq.next = (queue_entry_t) local_free_q;
+                       local_free_q = m;
+               } else {
+                       if (dwp->dw_mask & DW_vm_page_deactivate_internal)
+                               vm_page_deactivate_internal(m, FALSE);
+                       else if (dwp->dw_mask & DW_vm_page_activate) {
+                               if (m->active == FALSE) {
+                                       vm_page_activate(m);
+                               }
+                       }
+                       else if (dwp->dw_mask & DW_vm_page_speculate)
+                               vm_page_speculate(m, TRUE);
+                       else if (dwp->dw_mask & DW_enqueue_cleaned) {
+                               /*
+                                * if we didn't hold the object lock and did this,
+                                * we might disconnect the page, then someone might
+                                * soft fault it back in, then we would put it on the
+                                * cleaned queue, and so we would have a referenced (maybe even dirty)
+                                * page on that queue, which we don't want
+                                */
+                               int refmod_state = pmap_disconnect(m->phys_page);
+
+                               if ((refmod_state & VM_MEM_REFERENCED)) {
+                                       /*
+                                        * this page has been touched since it got cleaned; let's activate it
+                                        * if it hasn't already been
+                                        */
+                                       vm_pageout_enqueued_cleaned++;
+                                       vm_pageout_cleaned_reactivated++;
+                                       vm_pageout_cleaned_commit_reactivated++;
+
+                                       if (m->active == FALSE)
+                                               vm_page_activate(m);
+                               } else {
+                                       m->reference = FALSE;
+                                       vm_page_enqueue_cleaned(m);
+                               }
+                       }
+                       else if (dwp->dw_mask & DW_vm_page_lru)
+                               vm_page_lru(m);
+                       else if (dwp->dw_mask & DW_VM_PAGE_QUEUES_REMOVE) {
+                               if ( !m->pageout_queue)
+                                       vm_page_queues_remove(m);
+                       }
+                       if (dwp->dw_mask & DW_set_reference)
+                               m->reference = TRUE;
+                       else if (dwp->dw_mask & DW_clear_reference)
+                               m->reference = FALSE;
+
+                       if (dwp->dw_mask & DW_move_page) {
+                               if ( !m->pageout_queue) {
+                                       vm_page_queues_remove(m);
+
+                                       assert(m->object != kernel_object);
+
+                                       vm_page_enqueue_inactive(m, FALSE);
+                               }
+                       }
+                       if (dwp->dw_mask & DW_clear_busy)
+                               m->busy = FALSE;
+
+                       if (dwp->dw_mask & DW_PAGE_WAKEUP)
+                               PAGE_WAKEUP(m);
+               }
+       }
+       vm_page_unlock_queues();
+
+       if (local_free_q)
+               vm_page_free_list(local_free_q, TRUE);
+       
+       VM_CHECK_MEMORYSTATUS;
+
+}
+
+kern_return_t
+vm_page_alloc_list(
+       int     page_count,
+       int     flags,
+       vm_page_t *list)
+{
+       vm_page_t       lo_page_list = VM_PAGE_NULL;
+       vm_page_t       mem;
+       int             i;
+
+       if ( !(flags & KMA_LOMEM))
+               panic("vm_page_alloc_list: called w/o KMA_LOMEM");
+
+       for (i = 0; i < page_count; i++) {
+
+               mem = vm_page_grablo();
+
+               if (mem == VM_PAGE_NULL) {
+                       if (lo_page_list)
+                               vm_page_free_list(lo_page_list, FALSE);
+
+                       *list = VM_PAGE_NULL;
+
+                       return (KERN_RESOURCE_SHORTAGE);
+               }
+               mem->pageq.next = (queue_entry_t) lo_page_list;
+               lo_page_list = mem;
+       }
+       *list = lo_page_list;
+
+       return (KERN_SUCCESS);
+}
+
+void
+vm_page_set_offset(vm_page_t page, vm_object_offset_t offset)
+{
+       page->offset = offset;
+}
+
+vm_page_t
+vm_page_get_next(vm_page_t page)
+{
+       return ((vm_page_t) page->pageq.next);
+}
+
+vm_object_offset_t
+vm_page_get_offset(vm_page_t page)
+{
+       return (page->offset);
+}
+
+ppnum_t
+vm_page_get_phys_page(vm_page_t page)
+{
+       return (page->phys_page);
+}
+       
+       
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#if HIBERNATION
+
+static vm_page_t hibernate_gobble_queue;
+
+static int  hibernate_drain_pageout_queue(struct vm_pageout_queue *);
+static int  hibernate_flush_dirty_pages(int);
+static int  hibernate_flush_queue(queue_head_t *, int);
+
+void hibernate_flush_wait(void);
+void hibernate_mark_in_progress(void);
+void hibernate_clear_in_progress(void);
+
+void           hibernate_free_range(int, int);
+void           hibernate_hash_insert_page(vm_page_t);
+uint32_t       hibernate_mark_as_unneeded(addr64_t, addr64_t, hibernate_page_list_t *, hibernate_page_list_t *);
+void           hibernate_rebuild_vm_structs(void);
+uint32_t       hibernate_teardown_vm_structs(hibernate_page_list_t *, hibernate_page_list_t *);
+ppnum_t                hibernate_lookup_paddr(unsigned int);
+
+struct hibernate_statistics {
+       int hibernate_considered;
+       int hibernate_reentered_on_q;
+       int hibernate_found_dirty;
+       int hibernate_skipped_cleaning;
+       int hibernate_skipped_transient;
+       int hibernate_skipped_precious;
+       int hibernate_skipped_external;
+       int hibernate_queue_nolock;
+       int hibernate_queue_paused;
+       int hibernate_throttled;
+       int hibernate_throttle_timeout;
+       int hibernate_drained;
+       int hibernate_drain_timeout;
+       int cd_lock_failed;
+       int cd_found_precious;
+       int cd_found_wired;
+       int cd_found_busy;
+       int cd_found_unusual;
+       int cd_found_cleaning;
+       int cd_found_laundry;
+       int cd_found_dirty;
+       int cd_found_xpmapped;
+       int cd_skipped_xpmapped;
+       int cd_local_free;
+       int cd_total_free;
+       int cd_vm_page_wire_count;
+       int cd_vm_struct_pages_unneeded;
+       int cd_pages;
+       int cd_discarded;
+       int cd_count_wire;
+} hibernate_stats;
+
+
+/*
+ * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
+ * so that we don't overrun the estimated image size, which would
+ * result in a hibernation failure.
+ */
+#define        HIBERNATE_XPMAPPED_LIMIT        40000
+
+
+static int
+hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
+{
+       wait_result_t   wait_result;
+
+       vm_page_lock_queues();
+
+       while ( !queue_empty(&q->pgo_pending) ) {
+
+               q->pgo_draining = TRUE;
+
+               assert_wait_timeout((event_t) (&q->pgo_laundry+1), THREAD_INTERRUPTIBLE, 5000, 1000*NSEC_PER_USEC);
+
+               vm_page_unlock_queues();
+
+               wait_result = thread_block(THREAD_CONTINUE_NULL);
+
+               if (wait_result == THREAD_TIMED_OUT && !queue_empty(&q->pgo_pending)) {
+                       hibernate_stats.hibernate_drain_timeout++;
+                       
+                       if (q == &vm_pageout_queue_external)
+                               return (0);
+                       
+                       return (1);
+               }
+               vm_page_lock_queues();
+
+               hibernate_stats.hibernate_drained++;
+       }
+       vm_page_unlock_queues();
+
+       return (0);
+}
+
+
+boolean_t hibernate_skip_external = FALSE;
+
+static int
+hibernate_flush_queue(queue_head_t *q, int qcount)
+{
+       vm_page_t       m;
+       vm_object_t     l_object = NULL;
+       vm_object_t     m_object = NULL;
+       int             refmod_state = 0;
+       int             try_failed_count = 0;
+       int             retval = 0;
+       int             current_run = 0;
+       struct  vm_pageout_queue *iq;
+       struct  vm_pageout_queue *eq;
+       struct  vm_pageout_queue *tq;
+
+
+       KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_START, q, qcount, 0, 0, 0);
+       
+       iq = &vm_pageout_queue_internal;
+       eq = &vm_pageout_queue_external;
+
+       vm_page_lock_queues();
+
+       while (qcount && !queue_empty(q)) {
+
+               if (current_run++ == 1000) {
+                       if (hibernate_should_abort()) {
+                               retval = 1;
+                               break;
+                       }
+                       current_run = 0;
+               }
+
+               m = (vm_page_t) queue_first(q);
+               m_object = m->object;
+
+               /*
+                * check to see if we currently are working
+                * with the same object... if so, we've
+                * already got the lock
+                */
+               if (m_object != l_object) {
+                       /*
+                        * the object associated with candidate page is 
+                        * different from the one we were just working
+                        * with... dump the lock if we still own it
+                        */
+                       if (l_object != NULL) {
+                               vm_object_unlock(l_object);
+                               l_object = NULL;
+                       }
+                       /*
+                        * Try to lock object; since we've alread got the
+                        * page queues lock, we can only 'try' for this one.
+                        * if the 'try' fails, we need to do a mutex_pause
+                        * to allow the owner of the object lock a chance to
+                        * run... 
+                        */
+                       if ( !vm_object_lock_try_scan(m_object)) {
+
+                               if (try_failed_count > 20) {
+                                       hibernate_stats.hibernate_queue_nolock++;
+
+                                       goto reenter_pg_on_q;
+                               }
+
+                               vm_page_unlock_queues();
+                               mutex_pause(try_failed_count++);
+                               vm_page_lock_queues();
+
+                               hibernate_stats.hibernate_queue_paused++;
+                               continue;
+                       } else {
+                               l_object = m_object;
+                       }
+               }
+               if ( !m_object->alive || m->encrypted_cleaning || m->cleaning || m->laundry || m->busy || m->absent || m->error) {
+                       /*
+                        * page is not to be cleaned
+                        * put it back on the head of its queue
+                        */
+                       if (m->cleaning)
+                               hibernate_stats.hibernate_skipped_cleaning++;
+                       else
+                               hibernate_stats.hibernate_skipped_transient++;
+
+                       goto reenter_pg_on_q;
+               }
+               if (m_object->copy == VM_OBJECT_NULL) {
+                       if (m_object->purgable == VM_PURGABLE_VOLATILE || m_object->purgable == VM_PURGABLE_EMPTY) {
+                               /*
+                                * let the normal hibernate image path
+                                * deal with these
+                                */
+                               goto reenter_pg_on_q;
+                       }
+               }
+               if ( !m->dirty && m->pmapped) {
+                       refmod_state = pmap_get_refmod(m->phys_page);
+
+                       if ((refmod_state & VM_MEM_MODIFIED)) {
+                               SET_PAGE_DIRTY(m, FALSE);
+                       }
+               } else
+                       refmod_state = 0;
+
+               if ( !m->dirty) {
+                       /*
+                        * page is not to be cleaned
+                        * put it back on the head of its queue
+                        */
+                       if (m->precious)
+                               hibernate_stats.hibernate_skipped_precious++;
+
+                       goto reenter_pg_on_q;
+               }
+
+               if (hibernate_skip_external == TRUE && !m_object->internal) {
+
+                       hibernate_stats.hibernate_skipped_external++;
+                       
+                       goto reenter_pg_on_q;
+               }
+               tq = NULL;
+
+               if (m_object->internal) {
+                       if (VM_PAGE_Q_THROTTLED(iq))
+                               tq = iq;
+               } else if (VM_PAGE_Q_THROTTLED(eq))
+                       tq = eq;
+
+               if (tq != NULL) {
+                       wait_result_t   wait_result;
+                       int             wait_count = 5;
+
+                       if (l_object != NULL) {
+                               vm_object_unlock(l_object);
+                               l_object = NULL;
+                       }
+
+                       while (retval == 0) {
+
+                               tq->pgo_throttled = TRUE;
+
+                               assert_wait_timeout((event_t) &tq->pgo_laundry, THREAD_INTERRUPTIBLE, 1000, 1000*NSEC_PER_USEC);
+
+                               vm_page_unlock_queues();
+
+                               wait_result = thread_block(THREAD_CONTINUE_NULL);
+
+                               vm_page_lock_queues();
+
+                               if (wait_result != THREAD_TIMED_OUT)
+                                       break;
+                                if (!VM_PAGE_Q_THROTTLED(tq))
+                                        break;
+
+                               if (hibernate_should_abort())
+                                       retval = 1;
+
+                               if (--wait_count == 0) {
+
+                                       hibernate_stats.hibernate_throttle_timeout++;
+
+                                       if (tq == eq) {
+                                               hibernate_skip_external = TRUE;
+                                               break;
+                                       }
+                                       retval = 1;
+                               }
+                       }
+                       if (retval)
+                               break;
+
+                       hibernate_stats.hibernate_throttled++;
+
+                       continue;
+               }
+               /*
+                * we've already factored out pages in the laundry which
+                * means this page can't be on the pageout queue so it's
+                * safe to do the vm_page_queues_remove
+                */
+                assert(!m->pageout_queue);
+
+               vm_page_queues_remove(m);
+
+               if (COMPRESSED_PAGER_IS_ACTIVE && m_object->internal == TRUE)
+                       pmap_disconnect_options(m->phys_page, PMAP_OPTIONS_COMPRESSOR, NULL);
+
+               (void)vm_pageout_cluster(m, FALSE, FALSE, FALSE);
+
+               hibernate_stats.hibernate_found_dirty++;
+
+               goto next_pg;
+
+reenter_pg_on_q:
+               queue_remove(q, m, vm_page_t, pageq);
+               queue_enter(q, m, vm_page_t, pageq);
+
+               hibernate_stats.hibernate_reentered_on_q++;
+next_pg:
+               hibernate_stats.hibernate_considered++;
+
+               qcount--;
+               try_failed_count = 0;
+       }
+       if (l_object != NULL) {
+               vm_object_unlock(l_object);
+               l_object = NULL;
+       }
+
+       vm_page_unlock_queues();
+
+       KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_END, hibernate_stats.hibernate_found_dirty, retval, 0, 0, 0);
+
+       return (retval);
+}
+
+
+static int
+hibernate_flush_dirty_pages(int pass)
+{
+       struct vm_speculative_age_q     *aq;
+       uint32_t        i;
+
+       if (vm_page_local_q) {
+               for (i = 0; i < vm_page_local_q_count; i++)
+                       vm_page_reactivate_local(i, TRUE, FALSE);
+       }
+
+       for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
+               int             qcount;
+               vm_page_t       m;
+
+               aq = &vm_page_queue_speculative[i];
+
+               if (queue_empty(&aq->age_q))
+                       continue;
+               qcount = 0;
+
+               vm_page_lockspin_queues();
+
+               queue_iterate(&aq->age_q,
+                             m,
+                             vm_page_t,
+                             pageq)
+               {
+                       qcount++;
+               }
+               vm_page_unlock_queues();
+
+               if (qcount) {
+                       if (hibernate_flush_queue(&aq->age_q, qcount))
+                               return (1);
+               }
+       }
+       if (hibernate_flush_queue(&vm_page_queue_inactive, vm_page_inactive_count - vm_page_anonymous_count - vm_page_cleaned_count))
+               return (1);
+       if (hibernate_flush_queue(&vm_page_queue_anonymous, vm_page_anonymous_count))
+               return (1);
+       if (hibernate_flush_queue(&vm_page_queue_cleaned, vm_page_cleaned_count))
+               return (1);
+       if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal))
+               return (1);
+
+       if (COMPRESSED_PAGER_IS_ACTIVE && pass == 1)
+               vm_compressor_record_warmup_start();
+
+       if (hibernate_flush_queue(&vm_page_queue_active, vm_page_active_count)) {
+               if (COMPRESSED_PAGER_IS_ACTIVE && pass == 1)
+                       vm_compressor_record_warmup_end();
+               return (1);
+       }
+       if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
+               if (COMPRESSED_PAGER_IS_ACTIVE && pass == 1)
+                       vm_compressor_record_warmup_end();
+               return (1);
+       }
+       if (COMPRESSED_PAGER_IS_ACTIVE && pass == 1)
+               vm_compressor_record_warmup_end();
+
+       if (hibernate_skip_external == FALSE && hibernate_drain_pageout_queue(&vm_pageout_queue_external))
+               return (1);
+
+       return (0);
+}
+
+
+void
+hibernate_reset_stats()
+{
+       bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
+}
+
+
+int
+hibernate_flush_memory()
+{
+       int     retval;
+
+       KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_START, vm_page_free_count, 0, 0, 0, 0);
+
+       hibernate_cleaning_in_progress = TRUE;
+       hibernate_skip_external = FALSE;
+
+       if ((retval = hibernate_flush_dirty_pages(1)) == 0) {
+
+               if (COMPRESSED_PAGER_IS_ACTIVE) {
+
+                               KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_START, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
+
+                               vm_compressor_flush();
+
+                               KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_END, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
+               }
+               if (consider_buffer_cache_collect != NULL) {
+                       unsigned int orig_wire_count;
+
+                       KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, 0, 0, 0, 0, 0);
+                       orig_wire_count = vm_page_wire_count;
+                       
+                       (void)(*consider_buffer_cache_collect)(1);
+                       consider_zone_gc(TRUE);
+
+                       HIBLOG("hibernate_flush_memory: buffer_cache_gc freed up %d wired pages\n", orig_wire_count - vm_page_wire_count);
+
+                       KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_END, orig_wire_count - vm_page_wire_count, 0, 0, 0, 0);
+               }
+       }
+       hibernate_cleaning_in_progress = FALSE;
+
+       KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_END, vm_page_free_count, hibernate_stats.hibernate_found_dirty, retval, 0, 0);
+
+       if (retval && COMPRESSED_PAGER_IS_ACTIVE)
+               HIBLOG("hibernate_flush_memory() failed to finish - vm_page_compressor_count(%d)\n", VM_PAGE_COMPRESSOR_COUNT);
+
+
+    HIBPRINT("hibernate_flush_memory() considered(%d) reentered_on_q(%d) found_dirty(%d)\n",
+                hibernate_stats.hibernate_considered,
+                hibernate_stats.hibernate_reentered_on_q,
+                hibernate_stats.hibernate_found_dirty);
+    HIBPRINT("   skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) skipped_external(%d) queue_nolock(%d)\n",
+                hibernate_stats.hibernate_skipped_cleaning,
+                hibernate_stats.hibernate_skipped_transient,
+                hibernate_stats.hibernate_skipped_precious,
+                hibernate_stats.hibernate_skipped_external,
+                hibernate_stats.hibernate_queue_nolock);
+    HIBPRINT("   queue_paused(%d) throttled(%d) throttle_timeout(%d) drained(%d) drain_timeout(%d)\n",
+                hibernate_stats.hibernate_queue_paused,
+                hibernate_stats.hibernate_throttled,
+                hibernate_stats.hibernate_throttle_timeout,
+                hibernate_stats.hibernate_drained,
+                hibernate_stats.hibernate_drain_timeout);
+
+       return (retval);
+}
+
+
+static void
+hibernate_page_list_zero(hibernate_page_list_t *list)
+{
+    uint32_t             bank;
+    hibernate_bitmap_t * bitmap;
+
+    bitmap = &list->bank_bitmap[0];
+    for (bank = 0; bank < list->bank_count; bank++)
+    {
+        uint32_t last_bit;
+
+       bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2); 
+        // set out-of-bound bits at end of bitmap.
+        last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31);
+       if (last_bit)
+           bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit);
+
+       bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
+    }
+}
+
+void
+hibernate_free_gobble_pages(void)
+{
+    vm_page_t m, next;
+    uint32_t  count = 0;
+
+    m = (vm_page_t) hibernate_gobble_queue;
+    while(m)
+    {
+        next = (vm_page_t) m->pageq.next;
+        vm_page_free(m);
+        count++;
+        m = next;
+    }
+    hibernate_gobble_queue = VM_PAGE_NULL;
+    
+    if (count)
+        HIBLOG("Freed %d pages\n", count);
+}
+
+static boolean_t 
+hibernate_consider_discard(vm_page_t m, boolean_t preflight)
+{
+    vm_object_t object = NULL;
+    int                  refmod_state;
+    boolean_t            discard = FALSE;
+
+    do
+    {
+        if (m->private)
+            panic("hibernate_consider_discard: private");
+
+        if (!vm_object_lock_try(m->object)) {
+           if (!preflight) hibernate_stats.cd_lock_failed++;
+            break;
+       }
+        object = m->object;
+
+       if (VM_PAGE_WIRED(m)) {
+           if (!preflight) hibernate_stats.cd_found_wired++;
+            break;
+       }
+        if (m->precious) {
+           if (!preflight) hibernate_stats.cd_found_precious++;
+            break;
+       }
+        if (m->busy || !object->alive) {
+           /*
+            *  Somebody is playing with this page.
+            */
+           if (!preflight) hibernate_stats.cd_found_busy++;
+            break;
+       }
+        if (m->absent || m->unusual || m->error) {
+           /*
+            * If it's unusual in anyway, ignore it
+            */
+           if (!preflight) hibernate_stats.cd_found_unusual++;
+            break;
+       }
+        if (m->cleaning) {
+           if (!preflight) hibernate_stats.cd_found_cleaning++;
+            break;
+       }
+       if (m->laundry) {
+           if (!preflight) hibernate_stats.cd_found_laundry++;
+            break;
+       }
+        if (!m->dirty)
+        {
+            refmod_state = pmap_get_refmod(m->phys_page);
+        
+            if (refmod_state & VM_MEM_REFERENCED)
+                m->reference = TRUE;
+            if (refmod_state & VM_MEM_MODIFIED) {
+               SET_PAGE_DIRTY(m, FALSE);
+           }
+        }
+   
+        /*
+         * If it's clean or purgeable we can discard the page on wakeup.
+         */
+        discard = (!m->dirty) 
+                   || (VM_PURGABLE_VOLATILE == object->purgable)
+                   || (VM_PURGABLE_EMPTY    == object->purgable);
+
+
+        if (discard == FALSE) {
+               if (!preflight)
+                       hibernate_stats.cd_found_dirty++;
+        } else if (m->xpmapped && m->reference && !object->internal) {
+               if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) {
+                       if (!preflight)
+                               hibernate_stats.cd_found_xpmapped++;
+                       discard = FALSE;
+               } else {
+                       if (!preflight)
+                               hibernate_stats.cd_skipped_xpmapped++;
+               }
+        }
+    }
+    while (FALSE);
+
+    if (object)
+        vm_object_unlock(object);
+
+    return (discard);
+}
+
+
+static void
+hibernate_discard_page(vm_page_t m)
+{
+    if (m->absent || m->unusual || m->error)
+       /*
+        * If it's unusual in anyway, ignore
+        */
+        return;
+
+#if MACH_ASSERT || DEBUG
+    vm_object_t object = m->object;
+    if (!vm_object_lock_try(m->object))
+       panic("hibernate_discard_page(%p) !vm_object_lock_try", m);
+#else
+    /* No need to lock page queue for token delete, hibernate_vm_unlock() 
+       makes sure these locks are uncontended before sleep */
+#endif /* MACH_ASSERT || DEBUG */
+
+    if (m->pmapped == TRUE) 
+    {
+        __unused int refmod_state = pmap_disconnect(m->phys_page);
+    }
+
+    if (m->laundry)
+        panic("hibernate_discard_page(%p) laundry", m);
+    if (m->private)
+        panic("hibernate_discard_page(%p) private", m);
+    if (m->fictitious)
+        panic("hibernate_discard_page(%p) fictitious", m);
+
+    if (VM_PURGABLE_VOLATILE == m->object->purgable)
+    {
+       /* object should be on a queue */
+        assert((m->object->objq.next != NULL) && (m->object->objq.prev != NULL));
+        purgeable_q_t old_queue = vm_purgeable_object_remove(m->object);
+        assert(old_queue);
+       if (m->object->purgeable_when_ripe) {
+               vm_purgeable_token_delete_first(old_queue);
+       }
+        m->object->purgable = VM_PURGABLE_EMPTY;
+
+       /*
+        * Purgeable ledgers:  pages of VOLATILE and EMPTY objects are
+        * accounted in the "volatile" ledger, so no change here.
+        * We have to update vm_page_purgeable_count, though, since we're
+        * effectively purging this object.
+        */
+       unsigned int delta;
+       assert(m->object->resident_page_count >= m->object->wired_page_count);
+       delta = (m->object->resident_page_count - m->object->wired_page_count);
+       assert(vm_page_purgeable_count >= delta);
+       assert(delta > 0);
+       OSAddAtomic(-delta, (SInt32 *)&vm_page_purgeable_count);
+    }
+       
+    vm_page_free(m);
+
+#if MACH_ASSERT || DEBUG
+    vm_object_unlock(object);
+#endif /* MACH_ASSERT || DEBUG */
+}
+
+/*
+ Grab locks for hibernate_page_list_setall()
+*/
+void
+hibernate_vm_lock_queues(void)
+{
+    vm_object_lock(compressor_object);
+    vm_page_lock_queues();
+    lck_mtx_lock(&vm_page_queue_free_lock);
+
+    if (vm_page_local_q) {
+       uint32_t  i;
+       for (i = 0; i < vm_page_local_q_count; i++) {
+           struct vpl  *lq;
+           lq = &vm_page_local_q[i].vpl_un.vpl;
+           VPL_LOCK(&lq->vpl_lock);
+       }
+    }
+}
+
+void
+hibernate_vm_unlock_queues(void)
+{
+    if (vm_page_local_q) {
+       uint32_t  i;
+       for (i = 0; i < vm_page_local_q_count; i++) {
+           struct vpl  *lq;
+           lq = &vm_page_local_q[i].vpl_un.vpl;
+           VPL_UNLOCK(&lq->vpl_lock);
+       }
+    }
+    lck_mtx_unlock(&vm_page_queue_free_lock);
+    vm_page_unlock_queues();
+    vm_object_unlock(compressor_object);
+}
+
+/*
+ Bits zero in the bitmaps => page needs to be saved. All pages default to be saved,
+ pages known to VM to not need saving are subtracted.
+ Wired pages to be saved are present in page_list_wired, pageable in page_list.
+*/
+
+void
+hibernate_page_list_setall(hibernate_page_list_t * page_list,
+                          hibernate_page_list_t * page_list_wired,
+                          hibernate_page_list_t * page_list_pal,
+                          boolean_t preflight, 
+                          boolean_t will_discard,
+                          uint32_t * pagesOut)
+{
+    uint64_t start, end, nsec;
+    vm_page_t m;
+    vm_page_t next;
+    uint32_t pages = page_list->page_count;
+    uint32_t count_anonymous = 0, count_throttled = 0, count_compressor = 0;
+    uint32_t count_inactive = 0, count_active = 0, count_speculative = 0, count_cleaned = 0;
+    uint32_t count_wire = pages;
+    uint32_t count_discard_active    = 0;
+    uint32_t count_discard_inactive  = 0;
+    uint32_t count_discard_cleaned   = 0;
+    uint32_t count_discard_purgeable = 0;
+    uint32_t count_discard_speculative = 0;
+    uint32_t count_discard_vm_struct_pages = 0;
+    uint32_t i;
+    uint32_t             bank;
+    hibernate_bitmap_t * bitmap;
+    hibernate_bitmap_t * bitmap_wired;
+    boolean_t                   discard_all;
+    boolean_t            discard;
+
+    HIBLOG("hibernate_page_list_setall(preflight %d) start\n", preflight);
+
+    if (preflight) {
+        page_list       = NULL;
+        page_list_wired = NULL;
+        page_list_pal   = NULL;
+               discard_all     = FALSE;
+    } else {
+               discard_all     = will_discard;
+    }
+
+#if MACH_ASSERT || DEBUG
+    if (!preflight)
+    {
+        vm_page_lock_queues();
+       if (vm_page_local_q) {
+           for (i = 0; i < vm_page_local_q_count; i++) {
+               struct vpl      *lq;
+               lq = &vm_page_local_q[i].vpl_un.vpl;
+               VPL_LOCK(&lq->vpl_lock);
+           }
+       }
+    }
+#endif  /* MACH_ASSERT || DEBUG */
+
+
+    KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0);
+
+    clock_get_uptime(&start);
+
+    if (!preflight) {
+       hibernate_page_list_zero(page_list);
+       hibernate_page_list_zero(page_list_wired);
+       hibernate_page_list_zero(page_list_pal);
+    
+       hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count;
+       hibernate_stats.cd_pages = pages;
+    }
+
+    if (vm_page_local_q) {
+           for (i = 0; i < vm_page_local_q_count; i++)
+                   vm_page_reactivate_local(i, TRUE, !preflight);
+    }
+
+    if (preflight) {
+       vm_object_lock(compressor_object);
+       vm_page_lock_queues();
+       lck_mtx_lock(&vm_page_queue_free_lock);
+    }
+
+    m = (vm_page_t) hibernate_gobble_queue;
+    while (m)
+    {
+       pages--;
+       count_wire--;
+       if (!preflight) {
+           hibernate_page_bitset(page_list,       TRUE, m->phys_page);
+           hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+       }
+       m = (vm_page_t) m->pageq.next;
+    }
+
+    if (!preflight) for( i = 0; i < real_ncpus; i++ )
+    {
+       if (cpu_data_ptr[i] && cpu_data_ptr[i]->cpu_processor)
+       {
+           for (m = PROCESSOR_DATA(cpu_data_ptr[i]->cpu_processor, free_pages); m; m = (vm_page_t)m->pageq.next)
+           {
+               pages--;
+               count_wire--;
+               hibernate_page_bitset(page_list,       TRUE, m->phys_page);
+               hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+
+               hibernate_stats.cd_local_free++;
+               hibernate_stats.cd_total_free++;
+           }
+       }
+    }
+
+    for( i = 0; i < vm_colors; i++ )
+    {
+       queue_iterate(&vm_page_queue_free[i],
+                     m,
+                     vm_page_t,
+                     pageq)
+       {
+           pages--;
+           count_wire--;
+           if (!preflight) {
+               hibernate_page_bitset(page_list,       TRUE, m->phys_page);
+               hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+    
+               hibernate_stats.cd_total_free++;
+           }
+       }
+    }
+
+    queue_iterate(&vm_lopage_queue_free,
+                 m,
+                 vm_page_t,
+                 pageq)
+    {
+       pages--;
+       count_wire--;
+       if (!preflight) {
+           hibernate_page_bitset(page_list,       TRUE, m->phys_page);
+           hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+    
+           hibernate_stats.cd_total_free++;
+       }
+    }
+
+    m = (vm_page_t) queue_first(&vm_page_queue_throttled);
+    while (m && !queue_end(&vm_page_queue_throttled, (queue_entry_t)m))
+    {
+        next = (vm_page_t) m->pageq.next;
+       discard = FALSE;
+        if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
+         && hibernate_consider_discard(m, preflight))
+        {
+            if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
+            count_discard_inactive++;
+            discard = discard_all;
+        }
+        else
+            count_throttled++;
+       count_wire--;
+       if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+
+        if (discard) hibernate_discard_page(m);
+       m = next;
+    }
+
+    m = (vm_page_t) queue_first(&vm_page_queue_anonymous);
+    while (m && !queue_end(&vm_page_queue_anonymous, (queue_entry_t)m))
+    {
+        next = (vm_page_t) m->pageq.next;
+       discard = FALSE;
+        if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
+         && hibernate_consider_discard(m, preflight))
+        {
+            if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
+           if (m->dirty)
+               count_discard_purgeable++;
+           else
+               count_discard_inactive++;
+            discard = discard_all;
+        }
+        else
+            count_anonymous++;
+       count_wire--;
+       if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+        if (discard)    hibernate_discard_page(m);
+       m = next;
+    }
+
+    m = (vm_page_t) queue_first(&vm_page_queue_cleaned);
+    while (m && !queue_end(&vm_page_queue_cleaned, (queue_entry_t)m))
+    {
+        next = (vm_page_t) m->pageq.next;
+       discard = FALSE;
+        if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
+         && hibernate_consider_discard(m, preflight))
+        {
+            if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
+           if (m->dirty)
+               count_discard_purgeable++;
+           else
+               count_discard_cleaned++;
+            discard = discard_all;
+        }
+        else
+            count_cleaned++;
+       count_wire--;
+       if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+        if (discard)    hibernate_discard_page(m);
+       m = next;
+    }
+
+    m = (vm_page_t) queue_first(&vm_page_queue_active);
+    while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m))
+    {
+        next = (vm_page_t) m->pageq.next;
+       discard = FALSE;
+        if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) 
+         && hibernate_consider_discard(m, preflight))
+        {
+            if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
+           if (m->dirty)
+               count_discard_purgeable++;
+           else
+               count_discard_active++;
+            discard = discard_all;
+        }
+        else
+            count_active++;
+       count_wire--;
+       if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+        if (discard)    hibernate_discard_page(m);
+       m = next;
+    }
+
+    m = (vm_page_t) queue_first(&vm_page_queue_inactive);
+    while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m))
+    {
+        next = (vm_page_t) m->pageq.next;
+       discard = FALSE;
+        if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
+         && hibernate_consider_discard(m, preflight))
+        {
+            if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
+           if (m->dirty)
+               count_discard_purgeable++;
+           else
+               count_discard_inactive++;
+            discard = discard_all;
+        }
+        else
+            count_inactive++;
+       count_wire--;
+       if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+        if (discard)    hibernate_discard_page(m);
+       m = next;
+    }
+
+    for( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ )
+    {
+       m = (vm_page_t) queue_first(&vm_page_queue_speculative[i].age_q);
+       while (m && !queue_end(&vm_page_queue_speculative[i].age_q, (queue_entry_t)m))
+       {
+           next = (vm_page_t) m->pageq.next;
+           discard = FALSE;
+           if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
+            && hibernate_consider_discard(m, preflight))
+           {
+               if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
+               count_discard_speculative++;
+               discard = discard_all;
+           }
+           else
+               count_speculative++;
+           count_wire--;
+           if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+           if (discard)    hibernate_discard_page(m);
+           m = next;
+       }
+    }
+
+    queue_iterate(&compressor_object->memq, m, vm_page_t, listq)
+    {
+        count_compressor++;
+       count_wire--;
+       if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+    }
+
+    if (preflight == FALSE && discard_all == TRUE) {
+           KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_START, 0, 0, 0, 0, 0);
+
+           HIBLOG("hibernate_teardown started\n");
+           count_discard_vm_struct_pages = hibernate_teardown_vm_structs(page_list, page_list_wired);
+           HIBLOG("hibernate_teardown completed - discarded %d\n", count_discard_vm_struct_pages);
+
+           pages -= count_discard_vm_struct_pages;
+           count_wire -= count_discard_vm_struct_pages;
+
+           hibernate_stats.cd_vm_struct_pages_unneeded = count_discard_vm_struct_pages;
+
+           KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END, 0, 0, 0, 0, 0);
+    }
+
+    if (!preflight) {
+       // pull wired from hibernate_bitmap
+       bitmap = &page_list->bank_bitmap[0];
+       bitmap_wired = &page_list_wired->bank_bitmap[0];
+       for (bank = 0; bank < page_list->bank_count; bank++)
+       {
+           for (i = 0; i < bitmap->bitmapwords; i++)
+               bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
+           bitmap       = (hibernate_bitmap_t *) &bitmap->bitmap      [bitmap->bitmapwords];
+           bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
+       }
+    }
+
+    // machine dependent adjustments
+    hibernate_page_list_setall_machine(page_list, page_list_wired, preflight, &pages);
+
+    if (!preflight) {
+       hibernate_stats.cd_count_wire = count_wire;
+       hibernate_stats.cd_discarded = count_discard_active + count_discard_inactive + count_discard_purgeable +
+               count_discard_speculative + count_discard_cleaned + count_discard_vm_struct_pages;
+    }
+
+    clock_get_uptime(&end);
+    absolutetime_to_nanoseconds(end - start, &nsec);
+    HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
+
+    HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n  %s discard act %d inact %d purgeable %d spec %d cleaned %d\n", 
+          pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative, count_anonymous, count_throttled, count_compressor, hibernate_stats.cd_found_xpmapped,
+               discard_all ? "did" : "could",
+               count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
+
+    if (hibernate_stats.cd_skipped_xpmapped)
+           HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped);
+
+    *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned;
+
+    if (preflight && will_discard) *pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;
+
+#if MACH_ASSERT || DEBUG
+    if (!preflight)
+    {
+       if (vm_page_local_q) {
+           for (i = 0; i < vm_page_local_q_count; i++) {
+               struct vpl      *lq;
+               lq = &vm_page_local_q[i].vpl_un.vpl;
+               VPL_UNLOCK(&lq->vpl_lock);
+           }
+       }
+        vm_page_unlock_queues();
+    }
+#endif  /* MACH_ASSERT || DEBUG */
+
+    if (preflight) {
+       lck_mtx_unlock(&vm_page_queue_free_lock);
+       vm_page_unlock_queues();
+       vm_object_unlock(compressor_object);
+    }
+
+    KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_END, count_wire, *pagesOut, 0, 0, 0);
+}
+
+void
+hibernate_page_list_discard(hibernate_page_list_t * page_list)
+{
+    uint64_t  start, end, nsec;
+    vm_page_t m;
+    vm_page_t next;
+    uint32_t  i;
+    uint32_t  count_discard_active    = 0;
+    uint32_t  count_discard_inactive  = 0;
+    uint32_t  count_discard_purgeable = 0;
+    uint32_t  count_discard_cleaned   = 0;
+    uint32_t  count_discard_speculative = 0;
+
+
+#if MACH_ASSERT || DEBUG
+        vm_page_lock_queues();
+       if (vm_page_local_q) {
+           for (i = 0; i < vm_page_local_q_count; i++) {
+               struct vpl      *lq;
+               lq = &vm_page_local_q[i].vpl_un.vpl;
+               VPL_LOCK(&lq->vpl_lock);
+           }
+       }
+#endif  /* MACH_ASSERT || DEBUG */
+
+    clock_get_uptime(&start);
+
+    m = (vm_page_t) queue_first(&vm_page_queue_anonymous);
+    while (m && !queue_end(&vm_page_queue_anonymous, (queue_entry_t)m))
+    {
+        next = (vm_page_t) m->pageq.next;
+        if (hibernate_page_bittst(page_list, m->phys_page))
+        {
+           if (m->dirty)
+               count_discard_purgeable++;
+           else
+               count_discard_inactive++;
+            hibernate_discard_page(m);
+        }
+        m = next;
+    }
+
+    for( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ )
+    {
+       m = (vm_page_t) queue_first(&vm_page_queue_speculative[i].age_q);
+       while (m && !queue_end(&vm_page_queue_speculative[i].age_q, (queue_entry_t)m))
+       {
+           next = (vm_page_t) m->pageq.next;
+           if (hibernate_page_bittst(page_list, m->phys_page))
+           {
+               count_discard_speculative++;
+               hibernate_discard_page(m);
+           }
+           m = next;
+       }
+    }
+
+    m = (vm_page_t) queue_first(&vm_page_queue_inactive);
+    while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m))
+    {
+        next = (vm_page_t) m->pageq.next;
+        if (hibernate_page_bittst(page_list, m->phys_page))
+        {
+           if (m->dirty)
+               count_discard_purgeable++;
+           else
+               count_discard_inactive++;
+            hibernate_discard_page(m);
+        }
+        m = next;
+    }
+
+    m = (vm_page_t) queue_first(&vm_page_queue_active);
+    while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m))
+    {
+        next = (vm_page_t) m->pageq.next;
+        if (hibernate_page_bittst(page_list, m->phys_page))
+        {
+           if (m->dirty)
+               count_discard_purgeable++;
+           else
+               count_discard_active++;
+            hibernate_discard_page(m);
+        }
+        m = next;
+    }
+
+    m = (vm_page_t) queue_first(&vm_page_queue_cleaned);
+    while (m && !queue_end(&vm_page_queue_cleaned, (queue_entry_t)m))
+    {
+        next = (vm_page_t) m->pageq.next;
+        if (hibernate_page_bittst(page_list, m->phys_page))
+        {
+           if (m->dirty)
+               count_discard_purgeable++;
+           else
+               count_discard_cleaned++;
+            hibernate_discard_page(m);
+        }
+        m = next;
+    }
+
+#if MACH_ASSERT || DEBUG
+       if (vm_page_local_q) {
+           for (i = 0; i < vm_page_local_q_count; i++) {
+               struct vpl      *lq;
+               lq = &vm_page_local_q[i].vpl_un.vpl;
+               VPL_UNLOCK(&lq->vpl_lock);
+           }
+       }
+        vm_page_unlock_queues();
+#endif  /* MACH_ASSERT || DEBUG */
+
+    clock_get_uptime(&end);
+    absolutetime_to_nanoseconds(end - start, &nsec);
+    HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d cleaned %d\n",
+                nsec / 1000000ULL,
+               count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
+}
+
+boolean_t       hibernate_paddr_map_inited = FALSE;
+boolean_t       hibernate_rebuild_needed = FALSE;
+unsigned int   hibernate_teardown_last_valid_compact_indx = -1;
+vm_page_t      hibernate_rebuild_hash_list = NULL;
+
+unsigned int   hibernate_teardown_found_tabled_pages = 0;
+unsigned int   hibernate_teardown_found_created_pages = 0;
+unsigned int   hibernate_teardown_found_free_pages = 0;
+unsigned int   hibernate_teardown_vm_page_free_count;
+
+
+struct ppnum_mapping {
+       struct ppnum_mapping    *ppnm_next;
+       ppnum_t                 ppnm_base_paddr;
+       unsigned int            ppnm_sindx;
+       unsigned int            ppnm_eindx;
+};
+
+struct ppnum_mapping   *ppnm_head;
+struct ppnum_mapping   *ppnm_last_found = NULL;
+
+
+void
+hibernate_create_paddr_map() 
+{
+       unsigned int    i;
+       ppnum_t         next_ppnum_in_run = 0;
+       struct ppnum_mapping *ppnm = NULL;
+
+       if (hibernate_paddr_map_inited == FALSE) {
+
+               for (i = 0; i < vm_pages_count; i++) {
+
+                       if (ppnm)
+                               ppnm->ppnm_eindx = i;
+
+                       if (ppnm == NULL || vm_pages[i].phys_page != next_ppnum_in_run) {
+
+                               ppnm = kalloc(sizeof(struct ppnum_mapping));
+
+                               ppnm->ppnm_next = ppnm_head;
+                               ppnm_head = ppnm;
+
+                               ppnm->ppnm_sindx = i;
+                               ppnm->ppnm_base_paddr = vm_pages[i].phys_page;
+                       }
+                       next_ppnum_in_run = vm_pages[i].phys_page + 1;
+               }
+               ppnm->ppnm_eindx++;
+
+               hibernate_paddr_map_inited = TRUE;
+       }
+}
+
+ppnum_t
+hibernate_lookup_paddr(unsigned int indx)
+{
+       struct ppnum_mapping *ppnm = NULL;
+       
+       ppnm = ppnm_last_found;
+
+       if (ppnm) {
+               if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx)
+                       goto done;
+       }
+       for (ppnm = ppnm_head; ppnm; ppnm = ppnm->ppnm_next) {
+
+               if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
+                       ppnm_last_found = ppnm;
+                       break;
+               }
+       }
+       if (ppnm == NULL)
+               panic("hibernate_lookup_paddr of %d failed\n", indx);
+done:
+       return (ppnm->ppnm_base_paddr + (indx - ppnm->ppnm_sindx));
+}
+
+
+uint32_t
+hibernate_mark_as_unneeded(addr64_t saddr, addr64_t eaddr, hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
+{
+       addr64_t        saddr_aligned;
+       addr64_t        eaddr_aligned;
+       addr64_t        addr;
+       ppnum_t         paddr;
+       unsigned int    mark_as_unneeded_pages = 0;
+
+       saddr_aligned = (saddr + PAGE_MASK_64) & ~PAGE_MASK_64;
+       eaddr_aligned = eaddr & ~PAGE_MASK_64;
+
+       for (addr = saddr_aligned; addr < eaddr_aligned; addr += PAGE_SIZE_64) {
+
+               paddr = pmap_find_phys(kernel_pmap, addr);
+
+               assert(paddr);
+
+               hibernate_page_bitset(page_list,       TRUE, paddr);
+               hibernate_page_bitset(page_list_wired, TRUE, paddr);
+
+               mark_as_unneeded_pages++;
+       }
+       return (mark_as_unneeded_pages);
+}
+
+
+void
+hibernate_hash_insert_page(vm_page_t mem)
+{
+       vm_page_bucket_t *bucket;
+       int             hash_id;
+
+       assert(mem->hashed);
+       assert(mem->object);
+       assert(mem->offset != (vm_object_offset_t) -1);
+
+       /*
+        *      Insert it into the object_object/offset hash table
+        */
+       hash_id = vm_page_hash(mem->object, mem->offset);
+       bucket = &vm_page_buckets[hash_id];
+
+       mem->next_m = bucket->page_list;
+       bucket->page_list = VM_PAGE_PACK_PTR(mem);
+}
+
+
+void
+hibernate_free_range(int sindx, int eindx)
+{
+       vm_page_t       mem;
+       unsigned int    color;
+
+       while (sindx < eindx) {
+               mem = &vm_pages[sindx];
+
+               vm_page_init(mem, hibernate_lookup_paddr(sindx), FALSE);
+
+               mem->lopage = FALSE;
+               mem->free = TRUE;
+
+               color = mem->phys_page & vm_color_mask;
+               queue_enter_first(&vm_page_queue_free[color],
+                                 mem,
+                                 vm_page_t,
+                                 pageq);
+               vm_page_free_count++;
+
+               sindx++;
+       }
+}
+
+
+extern void hibernate_rebuild_pmap_structs(void);
 
-               /* prepend to the very front of sorted list? */
-               else if (m->phys_page < sort_list->phys_page)
-               {
-                       vm_page_queue_free = NEXT_PAGE(m);
-                       cpm_counter(++vpfls_general_insertions);
-                       prevcontaddr = PPNUM_PREV(m->phys_page);
-                       nextcontaddr = PPNUM_NEXT(m->phys_page);
-                       SET_NEXT_PAGE(m, sort_list);
-                       contfirstprev = &sort_list;
-                       contlast = sort_list = m;
-                       npages = 1;
-               }
+void
+hibernate_rebuild_vm_structs(void)
+{
+       int             cindx, sindx, eindx;
+       vm_page_t       mem, tmem, mem_next;
+       AbsoluteTime    startTime, endTime;
+       uint64_t        nsec;
 
-               else /* get to proper place for insertion */
-               {
-                       if (m->phys_page < nextcontaddr)
-                       {
-                               prevcontaddr = PPNUM_PREV(sort_list->phys_page);
-                               nextcontaddr = PPNUM_NEXT(sort_list->phys_page);
-                               contfirstprev = &sort_list;
-                               contlast = sort_list;
-                               npages = 1;
-                       }
-                       for (m1 = NEXT_PAGE(contlast);
-                            npages < contig_pages &&
-                            m1 != VM_PAGE_NULL && m1->phys_page < m->phys_page;
-                            m1 = NEXT_PAGE(m1))
-                       {
-                               if (m1->phys_page != nextcontaddr) {
-                                       prevcontaddr = PPNUM_PREV(m1->phys_page);
-                                       contfirstprev = NEXT_PAGE_PTR(contlast);
-                                       npages = 1;
-                               } else {
-                                       npages++;
-                               }
-                               nextcontaddr = PPNUM_NEXT(m1->phys_page);
-                               contlast = m1;
-                       }
+       if (hibernate_rebuild_needed == FALSE)
+               return;
 
-                       /*
-                        * We may actually already have enough.
-                        * This could happen if a previous prepend
-                        * joined up two runs to meet our needs.
-                        * If so, bail before we take the current
-                        * page off the free queue.
-                        */
-                       if (npages == contig_pages)
-                               break;
+       KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_START, 0, 0, 0, 0, 0);
+       HIBLOG("hibernate_rebuild started\n");
 
-                       if (m->phys_page != nextcontaddr) 
-                       {
-                               contfirstprev = NEXT_PAGE_PTR(contlast);
-                               prevcontaddr = PPNUM_PREV(m->phys_page);
-                               nextcontaddr = PPNUM_NEXT(m->phys_page);
-                               npages = 1;
-                       } else {
-                               nextcontaddr = PPNUM_NEXT(nextcontaddr);
-                               npages++;
-                       }
-                       vm_page_queue_free = NEXT_PAGE(m);
-                       cpm_counter(++vpfls_general_insertions);
-                       SET_NEXT_PAGE(m, NEXT_PAGE(contlast));
-                       SET_NEXT_PAGE(contlast, m);
-                       contlast = m;
-               }
+       clock_get_uptime(&startTime);
+
+       hibernate_rebuild_pmap_structs();
+
+       bzero(&vm_page_buckets[0], vm_page_bucket_count * sizeof(vm_page_bucket_t));
+       eindx = vm_pages_count;
+
+       for (cindx = hibernate_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
                
-               /* See how many pages are now contiguous after the insertion */
-               for (m1 = NEXT_PAGE(m);
-                    npages < contig_pages &&
-                    m1 != VM_PAGE_NULL && m1->phys_page == nextcontaddr;
-                    m1 = NEXT_PAGE(m1))
-               {
-                       nextcontaddr = PPNUM_NEXT(nextcontaddr);
-                       contlast = m1;
-                       npages++;
+               mem = &vm_pages[cindx];
+               /*
+                * hibernate_teardown_vm_structs leaves the location where
+                * this vm_page_t must be located in "next".
+                */
+               tmem = VM_PAGE_UNPACK_PTR(mem->next_m);
+               mem->next_m = VM_PAGE_PACK_PTR(NULL);
+
+               sindx = (int)(tmem - &vm_pages[0]);
+
+               if (mem != tmem) {
+                       /*
+                        * this vm_page_t was moved by hibernate_teardown_vm_structs,
+                        * so move it back to its real location
+                        */
+                       *tmem = *mem;
+                       mem = tmem;
                }
-       }
+               if (mem->hashed)
+                       hibernate_hash_insert_page(mem);
+               /*
+                * the 'hole' between this vm_page_t and the previous
+                * vm_page_t we moved needs to be initialized as 
+                * a range of free vm_page_t's
+                */
+               hibernate_free_range(sindx + 1, eindx);
 
-       /* how did we do? */
-       if (npages == contig_pages)
-       {
-               cpm_counter(++vpfc_satisfied);
+               eindx = sindx;
+       }
+       if (sindx)
+               hibernate_free_range(0, sindx);
 
-               /* remove the contiguous range from the sorted list */
-               m = *contfirstprev;
-               *contfirstprev = NEXT_PAGE(contlast);
-               SET_NEXT_PAGE(contlast, VM_PAGE_NULL);
-               assert(vm_page_verify_contiguous(m, npages));
+       assert(vm_page_free_count == hibernate_teardown_vm_page_free_count);
 
-               /* inline vm_page_gobble() for each returned page */
-               for (m1 = m; m1 != VM_PAGE_NULL; m1 = NEXT_PAGE(m1)) {
-                       assert(m1->free);
-                       assert(!m1->wanted);
-                       assert(!m1->laundry);
-                       m1->free = FALSE;
-                       m1->no_isync = TRUE;
-                       m1->gobbled = TRUE;
-               }
-               vm_page_wire_count += npages;
-               vm_page_gobble_count += npages;
-               vm_page_free_count -= npages;
+       /*
+        * process the list of vm_page_t's that were entered in the hash,
+        * but were not located in the vm_pages arrary... these are 
+        * vm_page_t's that were created on the fly (i.e. fictitious)
+        */
+       for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) {
+               mem_next = VM_PAGE_UNPACK_PTR(mem->next_m);
 
-               /* stick free list at the tail of the sorted list  */
-               while ((m1 = *contfirstprev) != VM_PAGE_NULL)
-                       contfirstprev = (vm_page_t *)&m1->pageq.next;
-               *contfirstprev = vm_page_queue_free;
+               mem->next_m = VM_PAGE_PACK_PTR(NULL);
+               hibernate_hash_insert_page(mem);
        }
+       hibernate_rebuild_hash_list = NULL;
 
-       vm_page_queue_free = sort_list;
-       return m;
+        clock_get_uptime(&endTime);
+        SUB_ABSOLUTETIME(&endTime, &startTime);
+        absolutetime_to_nanoseconds(endTime, &nsec);
+
+       HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec / 1000000ULL);
+
+       hibernate_rebuild_needed = FALSE;
+
+       KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END, 0, 0, 0, 0, 0);
 }
 
-/*
- *     Allocate a list of contiguous, wired pages.
- */
-kern_return_t
-cpm_allocate(
-       vm_size_t       size,
-       vm_page_t       *list,
-       boolean_t       wire)
+
+extern void hibernate_teardown_pmap_structs(addr64_t *, addr64_t *);
+
+uint32_t
+hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
 {
-       register vm_page_t      m;
-       vm_page_t               pages;
-       unsigned int            npages;
-       unsigned int            vm_pages_available;
-       boolean_t               wakeup;
+       unsigned int    i;
+       unsigned int    compact_target_indx;
+       vm_page_t       mem, mem_next;
+       vm_page_bucket_t *bucket;
+       unsigned int    mark_as_unneeded_pages = 0;
+       unsigned int    unneeded_vm_page_bucket_pages = 0;
+       unsigned int    unneeded_vm_pages_pages = 0;
+       unsigned int    unneeded_pmap_pages = 0;
+       addr64_t        start_of_unneeded = 0;
+       addr64_t        end_of_unneeded = 0;
 
-       if (size % page_size != 0)
-               return KERN_INVALID_ARGUMENT;
+       
+       if (hibernate_should_abort())
+               return (0);
 
-       vm_page_lock_queues();
-       mutex_lock(&vm_page_queue_free_lock);
+       HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, active_pages %d, inactive_pages %d, speculative_pages %d, cleaned_pages %d, compressor_pages %d\n",
+              vm_page_wire_count, vm_page_free_count, vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count,
+              vm_page_cleaned_count, compressor_object->resident_page_count);
 
-       /*
-        *      Should also take active and inactive pages
-        *      into account...  One day...
-        */
-       npages = size / page_size;
-       vm_pages_available = vm_page_free_count - vm_page_free_reserved;
+       for (i = 0; i < vm_page_bucket_count; i++) {
 
-       if (npages > vm_pages_available) {
-               mutex_unlock(&vm_page_queue_free_lock);
-               vm_page_unlock_queues();
-               return KERN_RESOURCE_SHORTAGE;
+               bucket = &vm_page_buckets[i];
+
+               for (mem = VM_PAGE_UNPACK_PTR(bucket->page_list); mem != VM_PAGE_NULL; mem = mem_next) {
+                       assert(mem->hashed);
+
+                       mem_next = VM_PAGE_UNPACK_PTR(mem->next_m);
+
+                       if (mem < &vm_pages[0] || mem >= &vm_pages[vm_pages_count]) {
+                               mem->next_m = VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list);
+                               hibernate_rebuild_hash_list = mem;
+                       }
+               }
        }
+       unneeded_vm_page_bucket_pages = hibernate_mark_as_unneeded((addr64_t)&vm_page_buckets[0], (addr64_t)&vm_page_buckets[vm_page_bucket_count], page_list, page_list_wired);
+       mark_as_unneeded_pages += unneeded_vm_page_bucket_pages;
 
-       /*
-        *      Obtain a pointer to a subset of the free
-        *      list large enough to satisfy the request;
-        *      the region will be physically contiguous.
-        */
-       pages = vm_page_find_contiguous(npages);
+       hibernate_teardown_vm_page_free_count = vm_page_free_count;
 
-       /* adjust global freelist counts and determine need for wakeups */
-       if (vm_page_free_count < vm_page_free_count_minimum)
-               vm_page_free_count_minimum = vm_page_free_count;
+       compact_target_indx = 0;
 
-       wakeup = ((vm_page_free_count < vm_page_free_min) ||
-                 ((vm_page_free_count < vm_page_free_target) &&
-                  (vm_page_inactive_count < vm_page_inactive_target)));
-               
-       mutex_unlock(&vm_page_queue_free_lock);
+       for (i = 0; i < vm_pages_count; i++) {
 
-       if (pages == VM_PAGE_NULL) {
-               vm_page_unlock_queues();
-               return KERN_NO_SPACE;
-       }
+               mem = &vm_pages[i];
 
-       /*
-        *      Walk the returned list, wiring the pages.
-        */
-       if (wire == TRUE)
-               for (m = pages; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
+               if (mem->free) {
+                       unsigned int color;
+
+                       assert(mem->busy);
+                       assert(!mem->lopage);
+
+                       color = mem->phys_page & vm_color_mask;
+
+                       queue_remove(&vm_page_queue_free[color],
+                                    mem,
+                                    vm_page_t,
+                                    pageq);
+                       mem->pageq.next = NULL;
+                       mem->pageq.prev = NULL;
+
+                       vm_page_free_count--;
+
+                       hibernate_teardown_found_free_pages++;
+
+                       if ( !vm_pages[compact_target_indx].free)
+                               compact_target_indx = i;
+               } else {
                        /*
-                        *      Essentially inlined vm_page_wire.
+                        * record this vm_page_t's original location
+                        * we need this even if it doesn't get moved
+                        * as an indicator to the rebuild function that
+                        * we don't have to move it
                         */
-                       assert(!m->active);
-                       assert(!m->inactive);
-                       assert(!m->private);
-                       assert(!m->fictitious);
-                       assert(m->wire_count == 0);
-                       assert(m->gobbled);
-                       m->gobbled = FALSE;
-                       m->wire_count++;
-                       --vm_page_gobble_count;
+                       mem->next_m = VM_PAGE_PACK_PTR(mem);
+
+                       if (vm_pages[compact_target_indx].free) {
+                               /*
+                                * we've got a hole to fill, so
+                                * move this vm_page_t to it's new home
+                                */
+                               vm_pages[compact_target_indx] = *mem;
+                               mem->free = TRUE;
+
+                               hibernate_teardown_last_valid_compact_indx = compact_target_indx;
+                               compact_target_indx++;
+                       } else
+                               hibernate_teardown_last_valid_compact_indx = i;
                }
-       vm_page_unlock_queues();
+       }
+       unneeded_vm_pages_pages = hibernate_mark_as_unneeded((addr64_t)&vm_pages[hibernate_teardown_last_valid_compact_indx+1],
+                                                            (addr64_t)&vm_pages[vm_pages_count-1], page_list, page_list_wired);
+       mark_as_unneeded_pages += unneeded_vm_pages_pages;
 
-       if (wakeup)
-               thread_wakeup((event_t) &vm_page_free_wanted);
+       hibernate_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded);
 
-       /*
-        *      The CPM pages should now be available and
-        *      ordered by ascending physical address.
-        */
-       assert(vm_page_verify_contiguous(pages, npages));
+       if (start_of_unneeded) {
+               unneeded_pmap_pages = hibernate_mark_as_unneeded(start_of_unneeded, end_of_unneeded, page_list, page_list_wired);
+               mark_as_unneeded_pages += unneeded_pmap_pages;
+       }
+       HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n", unneeded_vm_page_bucket_pages, unneeded_vm_pages_pages, unneeded_pmap_pages);
 
-       *list = pages;
-       return KERN_SUCCESS;
+       hibernate_rebuild_needed = TRUE;
+
+       return (mark_as_unneeded_pages);
 }
 
 
+#endif /* HIBERNATION */
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
 #include <mach_vm_debug.h>
 #if    MACH_VM_DEBUG
 
@@ -2486,6 +6681,7 @@ vm_page_info(
        unsigned int count)
 {
        unsigned int i;
+       lck_spin_t      *bucket_lock;
 
        if (vm_page_bucket_count < count)
                count = vm_page_bucket_count;
@@ -2495,10 +6691,13 @@ vm_page_info(
                unsigned int bucket_count = 0;
                vm_page_t m;
 
-               simple_lock(&vm_page_bucket_lock);
-               for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next)
+               bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
+               lck_spin_lock(bucket_lock);
+
+               for (m = VM_PAGE_UNPACK_PTR(bucket->page_list); m != VM_PAGE_NULL; m = VM_PAGE_UNPACK_PTR(m->next_m))
                        bucket_count++;
-               simple_unlock(&vm_page_bucket_lock);
+
+               lck_spin_unlock(bucket_lock);
 
                /* don't touch pageable memory while holding locks */
                info[i].hib_count = bucket_count;
@@ -2508,65 +6707,653 @@ vm_page_info(
 }
 #endif /* MACH_VM_DEBUG */
 
-#include <mach_kdb.h>
-#if    MACH_KDB
+#if VM_PAGE_BUCKETS_CHECK
+void
+vm_page_buckets_check(void)
+{
+       unsigned int i;
+       vm_page_t p;
+       unsigned int p_hash;
+       vm_page_bucket_t *bucket;
+       lck_spin_t      *bucket_lock;
+
+       if (!vm_page_buckets_check_ready) {
+               return;
+       }
+
+#if HIBERNATION
+       if (hibernate_rebuild_needed ||
+           hibernate_rebuild_hash_list) {
+               panic("BUCKET_CHECK: hibernation in progress: "
+                     "rebuild_needed=%d rebuild_hash_list=%p\n",
+                     hibernate_rebuild_needed,
+                     hibernate_rebuild_hash_list);
+       }
+#endif /* HIBERNATION */
+
+#if VM_PAGE_FAKE_BUCKETS
+       char *cp;
+       for (cp = (char *) vm_page_fake_buckets_start;
+            cp < (char *) vm_page_fake_buckets_end;
+            cp++) {
+               if (*cp != 0x5a) {
+                       panic("BUCKET_CHECK: corruption at %p in fake buckets "
+                             "[0x%llx:0x%llx]\n",
+                             cp,
+                             (uint64_t) vm_page_fake_buckets_start,
+                             (uint64_t) vm_page_fake_buckets_end);
+               }
+       }
+#endif /* VM_PAGE_FAKE_BUCKETS */
+
+       for (i = 0; i < vm_page_bucket_count; i++) {
+               bucket = &vm_page_buckets[i];
+               if (!bucket->page_list) {
+                       continue;
+               }
+
+               bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
+               lck_spin_lock(bucket_lock);
+               p = VM_PAGE_UNPACK_PTR(bucket->page_list);
+               while (p != VM_PAGE_NULL) {
+                       if (!p->hashed) {
+                               panic("BUCKET_CHECK: page %p (%p,0x%llx) "
+                                     "hash %d in bucket %d at %p "
+                                     "is not hashed\n",
+                                     p, p->object, p->offset,
+                                     p_hash, i, bucket);
+                       }
+                       p_hash = vm_page_hash(p->object, p->offset);
+                       if (p_hash != i) {
+                               panic("BUCKET_CHECK: corruption in bucket %d "
+                                     "at %p: page %p object %p offset 0x%llx "
+                                     "hash %d\n",
+                                     i, bucket, p, p->object, p->offset,
+                                     p_hash);
+                       }
+                       p = VM_PAGE_UNPACK_PTR(p->next_m);
+               }
+               lck_spin_unlock(bucket_lock);
+       }
 
-#include <ddb/db_output.h>
-#include <vm/vm_print.h>
-#define        printf  kdbprintf
+//     printf("BUCKET_CHECK: checked buckets\n");
+}
+#endif /* VM_PAGE_BUCKETS_CHECK */
 
 /*
- *     Routine:        vm_page_print [exported]
+ * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the
+ * local queues if they exist... its the only spot in the system where we add pages
+ * to those queues...  once on those queues, those pages can only move to one of the
+ * global page queues or the free queues... they NEVER move from local q to local q.
+ * the 'local' state is stable when vm_page_queues_remove is called since we're behind
+ * the global vm_page_queue_lock at this point...  we still need to take the local lock
+ * in case this operation is being run on a different CPU then the local queue's identity,
+ * but we don't have to worry about the page moving to a global queue or becoming wired
+ * while we're grabbing the local lock since those operations would require the global
+ * vm_page_queue_lock to be held, and we already own it.
+ *
+ * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id...
+ * 'wired' and local are ALWAYS mutually exclusive conditions.
  */
 void
-vm_page_print(
-       db_addr_t       db_addr)
+vm_page_queues_remove(vm_page_t mem)
 {
-       vm_page_t       p;
+       boolean_t       was_pageable;
+
+       VM_PAGE_QUEUES_ASSERT(mem, 1);
+       assert(!mem->pageout_queue);
+       /*
+        *      if (mem->pageout_queue)
+        *              NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue...
+        *              the caller is responsible for determing if the page is on that queue, and if so, must
+        *              either first remove it (it needs both the page queues lock and the object lock to do
+        *              this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove
+        */
+       if (mem->local) {
+               struct vpl      *lq;
+               assert(mem->object != kernel_object);
+               assert(mem->object != compressor_object);
+               assert(!mem->inactive && !mem->speculative);
+               assert(!mem->active && !mem->throttled);
+               assert(!mem->clean_queue);
+               assert(!mem->fictitious);
+               lq = &vm_page_local_q[mem->local_id].vpl_un.vpl;
+               VPL_LOCK(&lq->vpl_lock);
+               queue_remove(&lq->vpl_queue,
+                            mem, vm_page_t, pageq);
+               mem->local = FALSE;
+               mem->local_id = 0;
+               lq->vpl_count--;
+               if (mem->object->internal) {
+                       lq->vpl_internal_count--;
+               } else {
+                       lq->vpl_external_count--;
+               }
+               VPL_UNLOCK(&lq->vpl_lock);
+               was_pageable = FALSE;
+       }
 
-       p = (vm_page_t) (long) db_addr;
+       else if (mem->active) {
+               assert(mem->object != kernel_object);
+               assert(mem->object != compressor_object);
+               assert(!mem->inactive && !mem->speculative);
+               assert(!mem->clean_queue);
+               assert(!mem->throttled);
+               assert(!mem->fictitious);
+               queue_remove(&vm_page_queue_active,
+                       mem, vm_page_t, pageq);
+               mem->active = FALSE;
+               vm_page_active_count--;
+               was_pageable = TRUE;
+       }
 
-       iprintf("page 0x%x\n", p);
+       else if (mem->inactive) {
+               assert(mem->object != kernel_object);
+               assert(mem->object != compressor_object);
+               assert(!mem->active && !mem->speculative);
+               assert(!mem->throttled);
+               assert(!mem->fictitious);
+               vm_page_inactive_count--;
+               if (mem->clean_queue) {
+                       queue_remove(&vm_page_queue_cleaned,
+                        mem, vm_page_t, pageq);
+                       mem->clean_queue = FALSE;
+                       vm_page_cleaned_count--;
+               } else {
+                       if (mem->object->internal) {
+                               queue_remove(&vm_page_queue_anonymous,
+                               mem, vm_page_t, pageq);
+                               vm_page_anonymous_count--;
+                       } else {
+                               queue_remove(&vm_page_queue_inactive,
+                               mem, vm_page_t, pageq);
+                       }
+                       vm_purgeable_q_advance_all();
+               }
+               mem->inactive = FALSE;
+               was_pageable = TRUE;
+       }
 
-       db_indent += 2;
+       else if (mem->throttled) {
+               assert(mem->object != compressor_object);
+               assert(!mem->active && !mem->inactive);
+               assert(!mem->speculative);
+               assert(!mem->fictitious);
+               queue_remove(&vm_page_queue_throttled,
+                            mem, vm_page_t, pageq);
+               mem->throttled = FALSE;
+               vm_page_throttled_count--;
+               was_pageable = FALSE;
+       }
 
-       iprintf("object=0x%x", p->object);
-       printf(", offset=0x%x", p->offset);
-       printf(", wire_count=%d", p->wire_count);
+       else if (mem->speculative) {
+               assert(mem->object != compressor_object);
+               assert(!mem->active && !mem->inactive);
+               assert(!mem->throttled);
+               assert(!mem->fictitious);
+                remque(&mem->pageq);
+               mem->speculative = FALSE;
+               vm_page_speculative_count--;
+               was_pageable = TRUE;
+       }
 
-       iprintf("%sinactive, %sactive, %sgobbled, %slaundry, %sfree, %sref, %sencrypted\n",
-               (p->inactive ? "" : "!"),
-               (p->active ? "" : "!"),
-               (p->gobbled ? "" : "!"),
-               (p->laundry ? "" : "!"),
-               (p->free ? "" : "!"),
-               (p->reference ? "" : "!"),
-               (p->encrypted ? "" : "!"));
-       iprintf("%sbusy, %swanted, %stabled, %sfictitious, %sprivate, %sprecious\n",
-               (p->busy ? "" : "!"),
-               (p->wanted ? "" : "!"),
-               (p->tabled ? "" : "!"),
-               (p->fictitious ? "" : "!"),
-               (p->private ? "" : "!"),
-               (p->precious ? "" : "!"));
-       iprintf("%sabsent, %serror, %sdirty, %scleaning, %spageout, %sclustered\n",
-               (p->absent ? "" : "!"),
-               (p->error ? "" : "!"),
-               (p->dirty ? "" : "!"),
-               (p->cleaning ? "" : "!"),
-               (p->pageout ? "" : "!"),
-               (p->clustered ? "" : "!"));
-       iprintf("%slock_supplied, %soverwriting, %srestart, %sunusual\n",
-               (p->lock_supplied ? "" : "!"),
-               (p->overwriting ? "" : "!"),
-               (p->restart ? "" : "!"),
-               (p->unusual ? "" : "!"));
-
-       iprintf("phys_page=0x%x", p->phys_page);
-       printf(", page_error=0x%x", p->page_error);
-       printf(", page_lock=0x%x", p->page_lock);
-       printf(", unlock_request=%d\n", p->unlock_request);
-
-       db_indent -= 2;
-}
-#endif /* MACH_KDB */
+       else if (mem->pageq.next || mem->pageq.prev) {
+               was_pageable = FALSE;
+               panic("vm_page_queues_remove: unmarked page on Q");
+       } else {
+               was_pageable = FALSE;
+       }
+
+       mem->pageq.next = NULL;
+       mem->pageq.prev = NULL;
+       VM_PAGE_QUEUES_ASSERT(mem, 0);
+       if (was_pageable) {
+               if (mem->object->internal) {
+                       vm_page_pageable_internal_count--;
+               } else {
+                       vm_page_pageable_external_count--;
+               }
+       }
+}
+
+void
+vm_page_remove_internal(vm_page_t page)
+{
+       vm_object_t __object = page->object;
+       if (page == __object->memq_hint) {
+               vm_page_t       __new_hint;
+               queue_entry_t   __qe;
+               __qe = queue_next(&page->listq);
+               if (queue_end(&__object->memq, __qe)) {
+                       __qe = queue_prev(&page->listq);
+                       if (queue_end(&__object->memq, __qe)) {
+                               __qe = NULL;
+                       }
+               }
+               __new_hint = (vm_page_t) __qe;
+               __object->memq_hint = __new_hint;
+       }
+       queue_remove(&__object->memq, page, vm_page_t, listq);
+}
+
+void
+vm_page_enqueue_inactive(vm_page_t mem, boolean_t first)
+{
+       VM_PAGE_QUEUES_ASSERT(mem, 0);
+       assert(!mem->fictitious);
+       assert(!mem->laundry);
+       assert(!mem->pageout_queue);
+       vm_page_check_pageable_safe(mem);
+       if (mem->object->internal) {
+               if (first == TRUE)
+                       queue_enter_first(&vm_page_queue_anonymous, mem, vm_page_t, pageq);
+               else
+                       queue_enter(&vm_page_queue_anonymous, mem, vm_page_t, pageq);
+               vm_page_anonymous_count++;
+               vm_page_pageable_internal_count++;
+       } else {
+               if (first == TRUE)
+                       queue_enter_first(&vm_page_queue_inactive, mem, vm_page_t, pageq);
+               else
+                       queue_enter(&vm_page_queue_inactive, mem, vm_page_t, pageq);
+               vm_page_pageable_external_count++;
+       }
+       mem->inactive = TRUE;
+       vm_page_inactive_count++;
+       token_new_pagecount++;
+}
+
+/*
+ * Pages from special kernel objects shouldn't
+ * be placed on pageable queues.
+ */
+void
+vm_page_check_pageable_safe(vm_page_t page)
+{
+       if (page->object == kernel_object) {
+               panic("vm_page_check_pageable_safe: trying to add page" \
+                        "from kernel object (%p) to pageable queue", kernel_object);
+       }
+
+       if (page->object == compressor_object) {
+               panic("vm_page_check_pageable_safe: trying to add page" \
+                        "from compressor object (%p) to pageable queue", compressor_object);
+       }
+
+       if (page->object == vm_submap_object) {
+               panic("vm_page_check_pageable_safe: trying to add page" \
+                       "from submap object (%p) to pageable queue", vm_submap_object);
+       }
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * wired page diagnose
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include <libkern/OSKextLibPrivate.h>
+
+vm_allocation_site_t * 
+vm_allocation_sites[VM_KERN_MEMORY_COUNT];
+
+vm_tag_t 
+vm_tag_bt(void)
+{
+    uintptr_t* frameptr;
+    uintptr_t* frameptr_next;
+    uintptr_t retaddr;
+    uintptr_t kstackb, kstackt;
+    const vm_allocation_site_t * site;
+    thread_t cthread;
+    
+    cthread = current_thread();
+    if (__improbable(cthread == NULL)) return VM_KERN_MEMORY_OSFMK;
+
+    kstackb = cthread->kernel_stack;
+    kstackt = kstackb + kernel_stack_size;
+
+    /* Load stack frame pointer (EBP on x86) into frameptr */
+    frameptr = __builtin_frame_address(0);
+    site = NULL;
+    while (frameptr != NULL) 
+    {
+       /* Verify thread stack bounds */
+       if (((uintptr_t)(frameptr + 2) > kstackt) || ((uintptr_t)frameptr < kstackb)) break;
+
+       /* Next frame pointer is pointed to by the previous one */
+       frameptr_next = (uintptr_t*) *frameptr;
+
+       /* Pull return address from one spot above the frame pointer */
+       retaddr = *(frameptr + 1);
+
+       if ((retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top))
+       {
+           site = OSKextGetAllocationSiteForCaller(retaddr);
+           break;
+       }
+
+       frameptr = frameptr_next;
+    }
+    return (site ? site->tag : VM_KERN_MEMORY_NONE);
+}
+
+static uint64_t free_tag_bits[256/64];
+
+void
+vm_tag_alloc_locked(vm_allocation_site_t * site)
+{
+    vm_tag_t tag;
+    uint64_t avail;
+    uint64_t idx;
+
+    if (site->tag) return;
+
+    idx = 0;
+    while (TRUE)
+    {
+       avail = free_tag_bits[idx];
+       if (avail)
+       {
+           tag = __builtin_clzll(avail);
+           avail &= ~(1ULL << (63 - tag));
+           free_tag_bits[idx] = avail;
+           tag += (idx << 6);
+           break;
+       }
+       idx++;
+       if (idx >= (sizeof(free_tag_bits) / sizeof(free_tag_bits[0])))
+       {
+            tag = VM_KERN_MEMORY_ANY;
+            break;
+       }
+    }
+    site->tag = tag;
+    if (VM_KERN_MEMORY_ANY != tag)
+    {
+       assert(!vm_allocation_sites[tag]);
+       vm_allocation_sites[tag] = site;
+    }
+}
+
+static void
+vm_tag_free_locked(vm_tag_t tag)
+{
+    uint64_t avail;
+    uint32_t idx;
+    uint64_t bit;
+
+    if (VM_KERN_MEMORY_ANY == tag) return;
+
+    idx = (tag >> 6);
+    avail = free_tag_bits[idx];
+    tag &= 63;
+    bit = (1ULL << (63 - tag));
+    assert(!(avail & bit));
+    free_tag_bits[idx] = (avail | bit);
+}
+
+static void
+vm_tag_init(void)
+{
+    vm_tag_t tag;
+    for (tag = VM_KERN_MEMORY_FIRST_DYNAMIC; tag < VM_KERN_MEMORY_ANY; tag++)
+    {
+        vm_tag_free_locked(tag);
+    }
+}
+
+vm_tag_t
+vm_tag_alloc(vm_allocation_site_t * site)
+{
+    vm_tag_t tag;
+
+    if (VM_TAG_BT & site->flags)
+    {
+       tag = vm_tag_bt();
+       if (VM_KERN_MEMORY_NONE != tag) return (tag);
+    }
+
+    if (!site->tag) 
+    {
+       lck_spin_lock(&vm_allocation_sites_lock);
+       vm_tag_alloc_locked(site);
+       lck_spin_unlock(&vm_allocation_sites_lock);
+    }
+
+    return (site->tag);
+}
+
+static void 
+vm_page_count_object(mach_memory_info_t * sites, unsigned int __unused num_sites, vm_object_t object)
+{
+    if (!object->wired_page_count) return;
+    if (object != kernel_object)
+    {
+       assert(object->wire_tag < num_sites);
+       sites[object->wire_tag].size += ptoa_64(object->wired_page_count);
+    }
+}
+
+typedef void (*vm_page_iterate_proc)(mach_memory_info_t * sites, 
+                                    unsigned int num_sites, vm_object_t object);
+
+static void 
+vm_page_iterate_purgeable_objects(mach_memory_info_t * sites, unsigned int num_sites,
+                                 vm_page_iterate_proc proc, purgeable_q_t queue, 
+                                 int group)
+{
+    vm_object_t object;
+
+    for (object = (vm_object_t) queue_first(&queue->objq[group]);
+       !queue_end(&queue->objq[group], (queue_entry_t) object);
+       object = (vm_object_t) queue_next(&object->objq))
+    {
+       proc(sites, num_sites, object);
+    }
+}
+
+static void 
+vm_page_iterate_objects(mach_memory_info_t * sites, unsigned int num_sites,
+                       vm_page_iterate_proc proc)
+{
+    purgeable_q_t   volatile_q;
+    queue_head_t  * nonvolatile_q;
+    vm_object_t     object;
+    int             group;
+
+    lck_spin_lock(&vm_objects_wired_lock);
+    queue_iterate(&vm_objects_wired,
+                 object,
+                 vm_object_t,
+                 objq)
+    {
+       proc(sites, num_sites, object);
+    }
+    lck_spin_unlock(&vm_objects_wired_lock);
+
+    lck_mtx_lock(&vm_purgeable_queue_lock);
+    nonvolatile_q = &purgeable_nonvolatile_queue;
+    for (object = (vm_object_t) queue_first(nonvolatile_q);
+        !queue_end(nonvolatile_q, (queue_entry_t) object);
+        object = (vm_object_t) queue_next(&object->objq))
+    {
+       proc(sites, num_sites, object);
+    }
+
+    volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
+    vm_page_iterate_purgeable_objects(sites, num_sites, proc, volatile_q, 0);
+
+    volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
+    for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
+    {
+       vm_page_iterate_purgeable_objects(sites, num_sites, proc, volatile_q, group);
+    }
+
+    volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
+    for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
+    {
+       vm_page_iterate_purgeable_objects(sites, num_sites, proc, volatile_q, group);
+    }
+    lck_mtx_unlock(&vm_purgeable_queue_lock);
+}
+
+static uint64_t
+process_account(mach_memory_info_t * sites, unsigned int __unused num_sites)
+{
+    uint64_t found;
+    unsigned int idx;
+    vm_allocation_site_t * site;
+
+    assert(num_sites >= VM_KERN_MEMORY_COUNT);
+    found = 0;
+    for (idx = 0; idx < VM_KERN_MEMORY_COUNT; idx++) 
+    {
+       found += sites[idx].size;
+       if (idx < VM_KERN_MEMORY_FIRST_DYNAMIC)
+       {
+           sites[idx].site   = idx;
+           sites[idx].flags |= VM_KERN_SITE_TAG;
+           if (VM_KERN_MEMORY_ZONE == idx) sites[idx].flags |= VM_KERN_SITE_HIDE;
+           else                            sites[idx].flags |= VM_KERN_SITE_WIRED;
+           continue;
+       }
+       lck_spin_lock(&vm_allocation_sites_lock);
+       if ((site = vm_allocation_sites[idx]))
+       {
+           if (sites[idx].size)
+           {
+               sites[idx].flags |= VM_KERN_SITE_WIRED;
+               if (VM_TAG_KMOD == (VM_KERN_SITE_TYPE & site->flags))
+               {
+                   sites[idx].site   = OSKextGetKmodIDForSite(site);
+                   sites[idx].flags |= VM_KERN_SITE_KMOD;
+               }
+               else
+               {
+                   sites[idx].site   = VM_KERNEL_UNSLIDE(site);
+                   sites[idx].flags |= VM_KERN_SITE_KERNEL;
+               }
+               site = NULL;
+           }
+           else
+           {
+               vm_tag_free_locked(site->tag);
+               site->tag = VM_KERN_MEMORY_NONE;
+               vm_allocation_sites[idx] = NULL;
+               if (!(VM_TAG_UNLOAD & site->flags)) site = NULL;
+           }
+       }
+       lck_spin_unlock(&vm_allocation_sites_lock);
+        if (site) OSKextFreeSite(site);
+    }
+    return (found);
+}
+
+kern_return_t 
+vm_page_diagnose(mach_memory_info_t * sites, unsigned int num_sites)
+{
+    enum                  { kMaxKernelDepth = 1 };
+    vm_map_t                        maps   [kMaxKernelDepth];
+    vm_map_entry_t                  entries[kMaxKernelDepth];
+    vm_map_t                        map;
+    vm_map_entry_t                  entry;
+    vm_object_offset_t              offset;
+    vm_page_t                       page;
+    int                             stackIdx, count;
+    uint64_t                wired_size;
+    uint64_t                wired_managed_size;
+    uint64_t                wired_reserved_size;
+    mach_memory_info_t     * counts;
+
+    bzero(sites, num_sites * sizeof(mach_memory_info_t));
+
+    vm_page_iterate_objects(sites, num_sites, &vm_page_count_object);
+
+    wired_size          = ptoa_64(vm_page_wire_count + vm_lopage_free_count + vm_page_throttled_count);
+    wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count + vm_page_throttled_count);
+    wired_managed_size  = ptoa_64(vm_page_wire_count - vm_page_wire_count_initial);
+
+    assert(num_sites >= (VM_KERN_MEMORY_COUNT + VM_KERN_COUNTER_COUNT));
+    counts = &sites[VM_KERN_MEMORY_COUNT];
+
+#define SET_COUNT(xcount, xsize, xflags)                       \
+    counts[xcount].site  = (xcount);                   \
+    counts[xcount].size  = (xsize);                    \
+    counts[xcount].flags = VM_KERN_SITE_COUNTER | xflags;
+
+    SET_COUNT(VM_KERN_COUNT_MANAGED,             ptoa_64(vm_page_pages),        0);
+    SET_COUNT(VM_KERN_COUNT_WIRED,               wired_size,                    0);
+    SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED,       wired_managed_size,            0);
+    SET_COUNT(VM_KERN_COUNT_RESERVED,            wired_reserved_size,           VM_KERN_SITE_WIRED);
+    SET_COUNT(VM_KERN_COUNT_STOLEN,              ptoa_64(vm_page_stolen_count), VM_KERN_SITE_WIRED);
+    SET_COUNT(VM_KERN_COUNT_LOPAGE,              ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED);
+
+#define SET_MAP(xcount, xsize, xfree, xlargest)                \
+    counts[xcount].site    = (xcount);                 \
+    counts[xcount].size    = (xsize);                  \
+    counts[xcount].free    = (xfree);                  \
+    counts[xcount].largest = (xlargest);               \
+    counts[xcount].flags   = VM_KERN_SITE_COUNTER;
+
+    vm_map_size_t map_size, map_free, map_largest;
+
+    vm_map_sizes(kernel_map, &map_size, &map_free, &map_largest);
+    SET_MAP(VM_KERN_COUNT_MAP_KERNEL, map_size, map_free, map_largest);
+
+    vm_map_sizes(zone_map, &map_size, &map_free, &map_largest);
+    SET_MAP(VM_KERN_COUNT_MAP_ZONE, map_size, map_free, map_largest);
+
+    vm_map_sizes(kalloc_map, &map_size, &map_free, &map_largest);
+    SET_MAP(VM_KERN_COUNT_MAP_KALLOC, map_size, map_free, map_largest);
+
+    map = kernel_map;
+    stackIdx = 0;
+    while (map)
+    {
+       vm_map_lock(map);
+       for (entry = map->hdr.links.next; map; entry = entry->links.next)
+       {
+           if (entry->is_sub_map)
+           {
+               assert(stackIdx < kMaxKernelDepth);
+               maps[stackIdx] = map;
+               entries[stackIdx] = entry;
+               stackIdx++;
+               map = VME_SUBMAP(entry);
+               entry = NULL;
+               break;
+           }
+           if (VME_OBJECT(entry) == kernel_object)
+           {
+               count = 0;
+               vm_object_lock(VME_OBJECT(entry));
+               for (offset = entry->links.start; offset < entry->links.end; offset += page_size)
+               {
+                       page = vm_page_lookup(VME_OBJECT(entry), offset);
+                       if (page && VM_PAGE_WIRED(page)) count++;
+               }
+               vm_object_unlock(VME_OBJECT(entry));
+
+               if (count)
+               {
+                   assert(VME_ALIAS(entry) < num_sites);
+                   sites[VME_ALIAS(entry)].size += ptoa_64(count);
+               }
+           }
+           if (entry == vm_map_last_entry(map))
+           {
+               vm_map_unlock(map);
+               if (!stackIdx) map = NULL;
+               else
+               {
+                   --stackIdx;
+                   map = maps[stackIdx];
+                   entry = entries[stackIdx];
+               }
+           }
+       }
+    }
+
+    process_account(sites, num_sites);
+    
+    return (KERN_SUCCESS);
+}