X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/0b4c1975fb5e4eccf1012a35081f7e7799b81046..c7d2c2c6ee645e10cbccdd01c6191873ec77239d:/osfmk/vm/vm_resident.c

diff --git a/osfmk/vm/vm_resident.c b/osfmk/vm/vm_resident.c
index dae49ac1a..25c1edb26 100644
--- a/osfmk/vm/vm_resident.c
+++ b/osfmk/vm/vm_resident.c
@@ -64,6 +64,7 @@
 
 #include <debug.h>
 #include <libkern/OSAtomic.h>
+#include <libkern/OSDebug.h>
 
 #include <mach/clock_types.h>
 #include <mach/vm_prot.h>
@@ -76,6 +77,7 @@
 #include <kern/kalloc.h>
 #include <kern/zalloc.h>
 #include <kern/xpr.h>
+#include <kern/ledger.h>
 #include <vm/pmap.h>
 #include <vm/vm_init.h>
 #include <vm/vm_map.h>
@@ -84,28 +86,29 @@
 #include <vm/vm_kern.h>			/* kernel_memory_allocate() */
 #include <kern/misc_protos.h>
 #include <zone_debug.h>
+#include <mach_debug/zone_info.h>
 #include <vm/cpm.h>
-#include <ppc/mappings.h>		/* (BRINGUP) */
-#include <pexpert/pexpert.h>	/* (BRINGUP) */
+#include <pexpert/pexpert.h>
 
 #include <vm/vm_protos.h>
 #include <vm/memory_object.h>
 #include <vm/vm_purgeable_internal.h>
+#include <vm/vm_compressor.h>
 
-#include <IOKit/IOHibernatePrivate.h>
-
-
-#if CONFIG_EMBEDDED
-#include <sys/kern_memorystatus.h>
+#if CONFIG_PHANTOM_CACHE
+#include <vm/vm_phantom_cache.h>
 #endif
 
+#include <IOKit/IOHibernatePrivate.h>
+
 #include <sys/kdebug.h>
 
+boolean_t	hibernate_cleaning_in_progress = FALSE;
 boolean_t	vm_page_free_verify = TRUE;
 
-uint_t		vm_lopage_free_count = 0;
-uint_t		vm_lopage_free_limit = 0;
-uint_t		vm_lopage_lowater    = 0;
+uint32_t	vm_lopage_free_count = 0;
+uint32_t	vm_lopage_free_limit = 0;
+uint32_t	vm_lopage_lowater    = 0;
 boolean_t	vm_lopage_refill = FALSE;
 boolean_t	vm_lopage_needed = FALSE;
 
@@ -120,9 +123,12 @@ struct vm_speculative_age_q vm_page_queue_speculative[VM_PAGE_MAX_SPECULATIVE_AG
 
 __private_extern__ void		vm_page_init_lck_grp(void);
 
-static void			vm_page_free_prepare(vm_page_t	page);
+static void		vm_page_free_prepare(vm_page_t	page);
+static vm_page_t	vm_page_grab_fictitious_common(ppnum_t phys_addr);
 
+static void vm_tag_init(void);
 
+uint64_t	vm_min_kernel_and_kext_address = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
 
 /*
  *	Associated with page of user-allocatable memory is a
@@ -137,7 +143,7 @@ static void			vm_page_free_prepare(vm_page_t	page);
 
 vm_offset_t virtual_space_start;
 vm_offset_t virtual_space_end;
-int	vm_page_pages;
+uint32_t	vm_page_pages;
 
 /*
  *	The vm_page_lookup() routine, which provides for fast
@@ -148,7 +154,7 @@ int	vm_page_pages;
  *	or VP, table.]
  */
 typedef struct {
-	vm_page_t	pages;
+	vm_page_packed_t page_list;
 #if	MACH_PAGE_HASH_STATS
 	int		cur_count;		/* current count */
 	int		hi_count;		/* high water mark */
@@ -166,6 +172,18 @@ uint32_t	vm_page_bucket_hash;		/* Basic bucket hash */
 unsigned int	vm_page_bucket_lock_count = 0;		/* How big is array of locks? */
 
 lck_spin_t	*vm_page_bucket_locks;
+lck_spin_t	vm_objects_wired_lock;
+lck_spin_t	vm_allocation_sites_lock;
+
+#if VM_PAGE_BUCKETS_CHECK
+boolean_t vm_page_buckets_check_ready = FALSE;
+#if VM_PAGE_FAKE_BUCKETS
+vm_page_bucket_t *vm_page_fake_buckets;	/* decoy buckets */
+vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
+extern int not_in_kdp;
 
 
 #if	MACH_PAGE_HASH_STATS
@@ -240,15 +258,13 @@ ppnum_t		vm_page_lowest = 0;
 unsigned int	vm_colors;
 unsigned int    vm_color_mask;			/* mask is == (vm_colors-1) */
 unsigned int	vm_cache_geometry_colors = 0;	/* set by hw dependent code during startup */
+unsigned int	vm_free_magazine_refill_limit = 0;
 queue_head_t	vm_page_queue_free[MAX_COLORS];
-vm_page_t       vm_page_queue_fictitious;
 unsigned int	vm_page_free_wanted;
 unsigned int	vm_page_free_wanted_privileged;
 unsigned int	vm_page_free_count;
 unsigned int	vm_page_fictitious_count;
 
-unsigned int	vm_page_free_count_minimum;	/* debugging */
-
 /*
  *	Occasionally, the virtual memory system uses
  *	resident page structures that do not refer to
@@ -261,6 +277,8 @@ unsigned int	vm_page_free_count_minimum;	/* debugging */
 zone_t	vm_page_zone;
 vm_locks_array_t vm_page_locks;
 decl_lck_mtx_data(,vm_page_alloc_lock)
+lck_mtx_ext_t vm_page_alloc_lock_ext;
+
 unsigned int io_throttle_zero_fill;
 
 unsigned int	vm_page_local_q_count = 0;
@@ -268,6 +286,9 @@ unsigned int	vm_page_local_q_soft_limit = 250;
 unsigned int	vm_page_local_q_hard_limit = 500;
 struct vplq     *vm_page_local_q = NULL;
 
+/* N.B. Guard and fictitious pages must not
+ * be assigned a zero phys_page value.
+ */
 /*
  *	Fictitious pages don't have a physical address,
  *	but we must initialize phys_page to something.
@@ -292,35 +313,53 @@ ppnum_t vm_page_guard_addr = (ppnum_t) -2;
  *	system (pageout daemon).  These queues are
  *	defined here, but are shared by the pageout
  *	module.  The inactive queue is broken into 
- *	inactive and zf for convenience as the 
+ *	file backed and anonymous for convenience as the 
  *	pageout daemon often assignes a higher 
- *	affinity to zf pages
+ *	importance to anonymous pages (less likely to pick)
  */
 queue_head_t	vm_page_queue_active;
 queue_head_t	vm_page_queue_inactive;
-queue_head_t	vm_page_queue_zf;	/* inactive memory queue for zero fill */
+queue_head_t	vm_page_queue_anonymous;	/* inactive memory queue for anonymous pages */
 queue_head_t	vm_page_queue_throttled;
 
+queue_head_t	vm_objects_wired;
+
 unsigned int	vm_page_active_count;
 unsigned int	vm_page_inactive_count;
+unsigned int	vm_page_anonymous_count;
 unsigned int	vm_page_throttled_count;
 unsigned int	vm_page_speculative_count;
+
 unsigned int	vm_page_wire_count;
+unsigned int	vm_page_stolen_count;
 unsigned int	vm_page_wire_count_initial;
+unsigned int	vm_page_pages_initial;
 unsigned int	vm_page_gobble_count = 0;
-unsigned int	vm_page_wire_count_warning = 0;
-unsigned int	vm_page_gobble_count_warning = 0;
+
+#define	VM_PAGE_WIRE_COUNT_WARNING	0
+#define VM_PAGE_GOBBLE_COUNT_WARNING	0
 
 unsigned int	vm_page_purgeable_count = 0; /* # of pages purgeable now */
 unsigned int	vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */
 uint64_t	vm_page_purged_count = 0;    /* total count of purged pages */
 
+unsigned int	vm_page_xpmapped_external_count = 0;
+unsigned int	vm_page_external_count = 0;
+unsigned int	vm_page_internal_count = 0;
+unsigned int	vm_page_pageable_external_count = 0;
+unsigned int	vm_page_pageable_internal_count = 0;
+
 #if DEVELOPMENT || DEBUG
 unsigned int	vm_page_speculative_recreated = 0;
 unsigned int	vm_page_speculative_created = 0;
 unsigned int	vm_page_speculative_used = 0;
 #endif
 
+queue_head_t    vm_page_queue_cleaned;
+
+unsigned int	vm_page_cleaned_count = 0;
+unsigned int	vm_pageout_enqueued_cleaned = 0;
+
 uint64_t	max_valid_dma_address = 0xffffffffffffffffULL;
 ppnum_t		max_valid_low_ppnum = 0xffffffff;
 
@@ -334,12 +373,13 @@ ppnum_t		max_valid_low_ppnum = 0xffffffff;
 unsigned int	vm_page_free_target = 0;
 unsigned int	vm_page_free_min = 0;
 unsigned int	vm_page_throttle_limit = 0;
-uint32_t	vm_page_creation_throttle = 0;
 unsigned int	vm_page_inactive_target = 0;
+unsigned int	vm_page_anonymous_min = 0;
 unsigned int	vm_page_inactive_min = 0;
 unsigned int	vm_page_free_reserved = 0;
 unsigned int	vm_page_throttle_count = 0;
 
+
 /*
  *	The VM system has a couple of heuristics for deciding
  *	that pages are "uninteresting" and should be placed
@@ -364,7 +404,9 @@ struct vm_page_stats_reusable vm_page_stats_reusable;
 void
 vm_set_page_size(void)
 {
-	page_mask = page_size - 1;
+	page_size  = PAGE_SIZE;
+	page_mask  = PAGE_MASK;
+	page_shift = PAGE_SHIFT;
 
 	if ((page_mask & page_size) != 0)
 		panic("vm_set_page_size: page size not a power of two");
@@ -374,6 +416,8 @@ vm_set_page_size(void)
 			break;
 }
 
+#define COLOR_GROUPS_TO_STEAL	4
+
 
 /* Called once during statup, once the cache geometry is known.
  */
@@ -399,6 +443,8 @@ vm_page_set_colors( void )
 	
 	vm_colors = n;
 	vm_color_mask = n - 1;
+
+	vm_free_magazine_refill_limit = vm_colors * COLOR_GROUPS_TO_STEAL;
 }
 
 
@@ -426,6 +472,9 @@ vm_page_init_lck_grp(void)
 	lck_grp_init(&vm_page_lck_grp_alloc, "vm_page_alloc", &vm_page_lck_grp_attr);
 	lck_grp_init(&vm_page_lck_grp_bucket, "vm_page_bucket", &vm_page_lck_grp_attr);
 	lck_attr_setdefault(&vm_page_lck_attr);
+	lck_mtx_init_ext(&vm_page_alloc_lock, &vm_page_alloc_lock_ext, &vm_page_lck_grp_alloc, &vm_page_lck_attr);
+
+	vm_compressor_init_locks();
 }
 
 void
@@ -450,6 +499,8 @@ vm_page_init_local_q()
 			VPL_LOCK_INIT(lq, &vm_page_lck_grp_local, &vm_page_lck_attr);
 			queue_init(&lq->vpl_queue);
 			lq->vpl_count = 0;
+			lq->vpl_internal_count = 0;
+			lq->vpl_external_count = 0;
 		}
 		vm_page_local_q_count = num_cpus;
 
@@ -458,11 +509,6 @@ vm_page_init_local_q()
 }
 
 
-uint64_t initial_max_mem;
-int initial_wire_count;
-int initial_free_count;
-int initial_lopage_count;
-
 /*
  *	vm_page_bootstrap:
  *
@@ -496,7 +542,7 @@ vm_page_bootstrap(
 	m->pageq.prev = NULL;
 	m->listq.next = NULL;
 	m->listq.prev = NULL;
-	m->next = VM_PAGE_NULL;
+	m->next_m = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
 
 	m->object = VM_OBJECT_NULL;		/* reset later */
 	m->offset = (vm_object_offset_t) -1;	/* reset later */
@@ -520,6 +566,7 @@ vm_page_bootstrap(
 	m->busy = TRUE;
 	m->wanted = FALSE;
 	m->tabled = FALSE;
+	m->hashed = FALSE;
 	m->fictitious = FALSE;
 	m->pmapped = FALSE;
 	m->wpmapped = FALSE;
@@ -535,16 +582,17 @@ vm_page_bootstrap(
 	m->unusual = FALSE;
 	m->encrypted = FALSE;
 	m->encrypted_cleaning = FALSE;
-	m->list_req_pending = FALSE;
-	m->dump_cleaning = FALSE;
 	m->cs_validated = FALSE;
 	m->cs_tainted = FALSE;
+	m->cs_nx = FALSE;
 	m->no_cache = FALSE;
-	m->zero_fill = FALSE;
 	m->reusable = FALSE;
+	m->slid = FALSE;
+	m->xpmapped = FALSE;
+	m->compressor = FALSE;
+	m->written_by_kernel = FALSE;
 	m->__unused_object_bits = 0;
 
-
 	/*
 	 *	Initialize the page queues.
 	 */
@@ -569,15 +617,19 @@ vm_page_bootstrap(
 		purgeable_queues[i].debug_count_objects = 0;
 #endif
 	};
+	purgeable_nonvolatile_count = 0;
+	queue_init(&purgeable_nonvolatile_queue);
     
 	for (i = 0; i < MAX_COLORS; i++ )
 		queue_init(&vm_page_queue_free[i]);
+
 	queue_init(&vm_lopage_queue_free);
-	vm_page_queue_fictitious = VM_PAGE_NULL;
 	queue_init(&vm_page_queue_active);
 	queue_init(&vm_page_queue_inactive);
+	queue_init(&vm_page_queue_cleaned);
 	queue_init(&vm_page_queue_throttled);
-	queue_init(&vm_page_queue_zf);
+	queue_init(&vm_page_queue_anonymous);
+	queue_init(&vm_objects_wired);
 
 	for ( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ ) {
 	        queue_init(&vm_page_queue_speculative[i].age_q);
@@ -594,9 +646,10 @@ vm_page_bootstrap(
 	/*
 	 *	Steal memory for the map and zone subsystems.
 	 */
-
-	vm_map_steal_memory();
+	kernel_debug_string_simple("zone_steal_memory");
 	zone_steal_memory();
+	kernel_debug_string_simple("vm_map_steal_memory");
+	vm_map_steal_memory();
 
 	/*
 	 *	Allocate (and initialize) the virtual-to-physical
@@ -641,10 +694,36 @@ vm_page_bootstrap(
 	if (vm_page_hash_mask & vm_page_bucket_count)
 		printf("vm_page_bootstrap: WARNING -- strange page hash\n");
 
+#if VM_PAGE_BUCKETS_CHECK
+#if VM_PAGE_FAKE_BUCKETS
+	/*
+	 * Allocate a decoy set of page buckets, to detect
+	 * any stomping there.
+	 */
+	vm_page_fake_buckets = (vm_page_bucket_t *)
+		pmap_steal_memory(vm_page_bucket_count *
+				  sizeof(vm_page_bucket_t));
+	vm_page_fake_buckets_start = (vm_map_offset_t) vm_page_fake_buckets;
+	vm_page_fake_buckets_end =
+		vm_map_round_page((vm_page_fake_buckets_start +
+				   (vm_page_bucket_count *
+				    sizeof (vm_page_bucket_t))),
+				  PAGE_MASK);
+	char *cp;
+	for (cp = (char *)vm_page_fake_buckets_start;
+	     cp < (char *)vm_page_fake_buckets_end;
+	     cp++) {
+		*cp = 0x5a;
+	}
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
+	kernel_debug_string_simple("vm_page_buckets");
 	vm_page_buckets = (vm_page_bucket_t *)
 		pmap_steal_memory(vm_page_bucket_count *
 				  sizeof(vm_page_bucket_t));
 
+	kernel_debug_string_simple("vm_page_bucket_locks");
 	vm_page_bucket_locks = (lck_spin_t *)
 		pmap_steal_memory(vm_page_bucket_lock_count *
 				  sizeof(lck_spin_t));
@@ -652,7 +731,7 @@ vm_page_bootstrap(
 	for (i = 0; i < vm_page_bucket_count; i++) {
 		register vm_page_bucket_t *bucket = &vm_page_buckets[i];
 
-		bucket->pages = VM_PAGE_NULL;
+		bucket->page_list = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
 #if     MACH_PAGE_HASH_STATS
 		bucket->cur_count = 0;
 		bucket->hi_count = 0;
@@ -662,6 +741,14 @@ vm_page_bootstrap(
 	for (i = 0; i < vm_page_bucket_lock_count; i++)
 	        lck_spin_init(&vm_page_bucket_locks[i], &vm_page_lck_grp_bucket, &vm_page_lck_attr);
 
+	lck_spin_init(&vm_objects_wired_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
+	lck_spin_init(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
+	vm_tag_init();
+
+#if VM_PAGE_BUCKETS_CHECK
+	vm_page_buckets_check_ready = TRUE;
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
 	/*
 	 *	Machine-dependent code allocates the resident page table.
 	 *	It uses vm_page_init to initialize the page frames.
@@ -670,6 +757,7 @@ vm_page_bootstrap(
 	 *	to get the alignment right.
 	 */
 
+	kernel_debug_string_simple("pmap_startup");
 	pmap_startup(&virtual_space_start, &virtual_space_end);
 	virtual_space_start = round_page(virtual_space_start);
 	virtual_space_end = trunc_page(virtual_space_end);
@@ -687,16 +775,12 @@ vm_page_bootstrap(
 	assert((unsigned int) atop_64(max_mem) == atop_64(max_mem));
 	vm_page_wire_count = ((unsigned int) atop_64(max_mem)) - vm_page_free_count - vm_lopage_free_count;	/* initial value */
 	vm_page_wire_count_initial = vm_page_wire_count;
-	vm_page_free_count_minimum = vm_page_free_count;
-
-	initial_max_mem = max_mem;
-	initial_wire_count = vm_page_wire_count;
-	initial_free_count = vm_page_free_count;
-	initial_lopage_count = vm_lopage_free_count;
+	vm_page_pages_initial = vm_page_pages;
 
 	printf("vm_page_bootstrap: %d free pages and %d wired pages\n",
 	       vm_page_free_count, vm_page_wire_count);
 
+	kernel_debug_string_simple("vm_page_bootstrap complete");
 	simple_lock_init(&vm_paging_lock, 0);
 }
 
@@ -743,7 +827,7 @@ pmap_steal_memory(
 	addr = virtual_space_start;
 	virtual_space_start += size;
 
-	kprintf("pmap_steal_memory: %08lX - %08lX; size=%08lX\n", (long)addr, (long)virtual_space_start, (long)size);	/* (TEST/DEBUG) */
+	//kprintf("pmap_steal_memory: %08lX - %08lX; size=%08lX\n", (long)addr, (long)virtual_space_start, (long)size);	/* (TEST/DEBUG) */
 
 	/*
 	 *	Allocate and map physical pages to back new virtual pages.
@@ -765,18 +849,19 @@ pmap_steal_memory(
 #endif
 
 		pmap_enter(kernel_pmap, vaddr, phys_page,
-			   VM_PROT_READ|VM_PROT_WRITE, 
+			   VM_PROT_READ|VM_PROT_WRITE, VM_PROT_NONE,
 				VM_WIMG_USE_DEFAULT, FALSE);
 		/*
 		 * Account for newly stolen memory
 		 */
 		vm_page_wire_count++;
-
+		vm_page_stolen_count++;
 	}
 
 	return (void *) addr;
 }
 
+void vm_page_release_startup(vm_page_t mem);
 void
 pmap_startup(
 	vm_offset_t *startp,
@@ -786,6 +871,22 @@ pmap_startup(
 	ppnum_t		phys_page;
 	addr64_t	tmpaddr;
 
+
+#if    defined(__LP64__)
+	/*
+	 * struct vm_page must be of size 64 due to VM_PAGE_PACK_PTR use
+	 */
+	assert(sizeof(struct vm_page) == 64);
+
+	/*
+	 * make sure we are aligned on a 64 byte boundary
+	 * for VM_PAGE_PACK_PTR (it clips off the low-order
+	 * 6 bits of the pointer)
+	 */
+	if (virtual_space_start != virtual_space_end)
+		virtual_space_start = round_page(virtual_space_start);
+#endif
+
 	/*
 	 *	We calculate how many page frames we will have
 	 *	and then allocate the page structures in one chunk.
@@ -800,6 +901,7 @@ pmap_startup(
 	/*
 	 *	Initialize the page frames.
 	 */
+	kernel_debug_string_simple("Initialize the page frames");
 	for (i = 0, pages_initialized = 0; i < npages; i++) {
 		if (!pmap_next_page(&phys_page))
 			break;
@@ -812,18 +914,38 @@ pmap_startup(
 	}
 	vm_pages_count = pages_initialized;
 
+#if    defined(__LP64__)
+
+	if (VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[0])) != &vm_pages[0])
+		panic("VM_PAGE_PACK_PTR failed on &vm_pages[0] - %p", (void *)&vm_pages[0]);
+
+	if (VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[vm_pages_count-1])) != &vm_pages[vm_pages_count-1])
+		panic("VM_PAGE_PACK_PTR failed on &vm_pages[vm_pages_count-1] - %p", (void *)&vm_pages[vm_pages_count-1]);
+#endif
+	kernel_debug_string_simple("page fill/release");
 	/*
 	 * Check if we want to initialize pages to a known value
 	 */
 	fill = 0;								/* Assume no fill */
 	if (PE_parse_boot_argn("fill", &fillval, sizeof (fillval))) fill = 1;			/* Set fill */
-	
+#if	DEBUG
+	/* This slows down booting the DEBUG kernel, particularly on
+	 * large memory systems, but is worthwhile in deterministically
+	 * trapping uninitialized memory usage.
+	 */
+	if (fill == 0) {
+		fill = 1;
+		fillval = 0xDEB8F177;
+	}
+#endif
+	if (fill)
+		kprintf("Filling vm_pages with pattern: 0x%x\n", fillval);
 	// -debug code remove
 	if (2 == vm_himemory_mode) {
 		// free low -> high so high is preferred
 		for (i = 1; i <= pages_initialized; i++) {
 			if(fill) fillPage(vm_pages[i - 1].phys_page, fillval);		/* Fill the page with a know value if requested at boot */			
-			vm_page_release(&vm_pages[i - 1]);
+			vm_page_release_startup(&vm_pages[i - 1]);
 		}
 	}
 	else
@@ -837,9 +959,11 @@ pmap_startup(
 	 */
 	for (i = pages_initialized; i > 0; i--) {
 		if(fill) fillPage(vm_pages[i - 1].phys_page, fillval);		/* Fill the page with a know value if requested at boot */			
-		vm_page_release(&vm_pages[i - 1]);
+		vm_page_release_startup(&vm_pages[i - 1]);
 	}
 
+	VM_CHECK_MEMORYSTATUS;
+	
 #if 0
 	{
 		vm_page_t xx, xxo, xxl;
@@ -903,6 +1027,7 @@ pmap_startup(
 void
 vm_page_module_init(void)
 {
+	uint64_t vm_page_zone_pages, vm_page_zone_data_size;
 	vm_page_zone = zinit((vm_size_t) sizeof(struct vm_page),
 			     0, PAGE_SIZE, "vm pages");
 
@@ -910,18 +1035,23 @@ vm_page_module_init(void)
 	zone_debug_disable(vm_page_zone);
 #endif	/* ZONE_DEBUG */
 
+	zone_change(vm_page_zone, Z_CALLERACCT, FALSE);
 	zone_change(vm_page_zone, Z_EXPAND, FALSE);
 	zone_change(vm_page_zone, Z_EXHAUST, TRUE);
 	zone_change(vm_page_zone, Z_FOREIGN, TRUE);
-
-        /*
-         * Adjust zone statistics to account for the real pages allocated
-         * in vm_page_create(). [Q: is this really what we want?]
-         */
-        vm_page_zone->count += vm_page_pages;
-        vm_page_zone->cur_size += vm_page_pages * vm_page_zone->elem_size;
-
-	lck_mtx_init(&vm_page_alloc_lock, &vm_page_lck_grp_alloc, &vm_page_lck_attr);
+	zone_change(vm_page_zone, Z_GZALLOC_EXEMPT, TRUE);
+	/*
+	 * Adjust zone statistics to account for the real pages allocated
+	 * in vm_page_create(). [Q: is this really what we want?]
+	 */
+	vm_page_zone->count += vm_page_pages;
+	vm_page_zone->sum_count += vm_page_pages;
+	vm_page_zone_data_size = vm_page_pages * vm_page_zone->elem_size;
+	vm_page_zone->cur_size += vm_page_zone_data_size;
+	vm_page_zone_pages = ((round_page(vm_page_zone_data_size)) / PAGE_SIZE);
+	OSAddAtomic64(vm_page_zone_pages, &(vm_page_zone->page_count));
+	/* since zone accounts for these, take them out of stolen */
+	VM_PAGE_MOVE_STOLEN(vm_page_zone_pages);
 }
 
 /*
@@ -944,12 +1074,13 @@ vm_page_create(
 	for (phys_page = start;
 	     phys_page < end;
 	     phys_page++) {
-		while ((m = (vm_page_t) vm_page_grab_fictitious())
+		while ((m = (vm_page_t) vm_page_grab_fictitious_common(phys_page))
 			== VM_PAGE_NULL)
 			vm_page_more_fictitious();
 
-		vm_page_init(m, phys_page, FALSE);
+		m->fictitious = FALSE;
 		pmap_clear_noencrypt(phys_page);
+
 		vm_page_pages++;
 		vm_page_release(m);
 	}
@@ -981,7 +1112,17 @@ vm_page_insert(
 	vm_object_t		object,
 	vm_object_offset_t	offset)
 {
-	vm_page_insert_internal(mem, object, offset, FALSE, TRUE);
+	vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, FALSE, FALSE, NULL);
+}
+
+void
+vm_page_insert_wired(
+	vm_page_t		mem,
+	vm_object_t		object,
+	vm_object_offset_t	offset,
+	vm_tag_t                tag)
+{
+	vm_page_insert_internal(mem, object, offset, tag, FALSE, TRUE, FALSE, FALSE, NULL);
 }
 
 void
@@ -989,23 +1130,35 @@ vm_page_insert_internal(
 	vm_page_t		mem,
 	vm_object_t		object,
 	vm_object_offset_t	offset,
+	vm_tag_t                tag,
 	boolean_t		queues_lock_held,
-	boolean_t		insert_in_hash)
+	boolean_t		insert_in_hash,
+	boolean_t		batch_pmap_op,
+        boolean_t               batch_accounting,
+	uint64_t		*delayed_ledger_update)
 {
-	vm_page_bucket_t *bucket;
-	lck_spin_t	*bucket_lock;
-	int	hash_id;
+	vm_page_bucket_t	*bucket;
+	lck_spin_t		*bucket_lock;
+	int			hash_id;
+	task_t			owner;
 
         XPR(XPR_VM_PAGE,
                 "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n",
                 object, offset, mem, 0,0);
-
+#if 0
+	/*
+	 * we may not hold the page queue lock
+	 * so this check isn't safe to make
+	 */
 	VM_PAGE_CHECK(mem);
+#endif
 
-	if (object == vm_submap_object) {
-		/* the vm_submap_object is only a placeholder for submaps */
-		panic("vm_page_insert(vm_submap_object,0x%llx)\n", offset);
-	}
+	assert(page_aligned(offset));
+
+	assert(!VM_PAGE_WIRED(mem) || mem->private || mem->fictitious || (tag != VM_KERN_MEMORY_NONE));
+
+	/* the vm_submap_object is only a placeholder for submaps */
+	assert(object != vm_submap_object);
 
 	vm_object_lock_assert_exclusive(object);
 #if DEBUG
@@ -1013,19 +1166,26 @@ vm_page_insert_internal(
 		       queues_lock_held ? LCK_MTX_ASSERT_OWNED
 		       			: LCK_MTX_ASSERT_NOTOWNED);
 #endif	/* DEBUG */
-	
+
 	if (insert_in_hash == TRUE) {
-#if DEBUG
+#if DEBUG || VM_PAGE_CHECK_BUCKETS
 		if (mem->tabled || mem->object != VM_OBJECT_NULL)
 			panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
 			      "already in (obj=%p,off=0x%llx)",
 			      mem, object, offset, mem->object, mem->offset);
 #endif
-		assert(!object->internal || offset < object->size);
+		assert(!object->internal || offset < object->vo_size);
 
 		/* only insert "pageout" pages into "pageout" objects,
 		 * and normal pages into normal objects */
+#if 00
+		/*
+		 * For some reason, this assertion gets tripped
+		 * but it's mostly harmless, so let's disable it
+		 * for now.
+		 */
 		assert(object->pageout == mem->pageout);
+#endif /* 00 */
 
 		assert(vm_page_lookup(object, offset) == VM_PAGE_NULL);
 		
@@ -1045,20 +1205,32 @@ vm_page_insert_internal(
 	
 		lck_spin_lock(bucket_lock);
 
-		mem->next = bucket->pages;
-		bucket->pages = mem;
+		mem->next_m = bucket->page_list;
+		bucket->page_list = VM_PAGE_PACK_PTR(mem);
+		assert(mem == VM_PAGE_UNPACK_PTR(bucket->page_list));
+
 #if     MACH_PAGE_HASH_STATS
 		if (++bucket->cur_count > bucket->hi_count)
 			bucket->hi_count = bucket->cur_count;
 #endif /* MACH_PAGE_HASH_STATS */
-
+		mem->hashed = TRUE;
 		lck_spin_unlock(bucket_lock);
 	}
+
+	{	
+		unsigned int    cache_attr;
+
+		cache_attr = object->wimg_bits & VM_WIMG_MASK;
+
+		if (cache_attr != VM_WIMG_USE_DEFAULT) {
+			PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op);
+		}
+	}
 	/*
 	 *	Now link into the object's list of backed pages.
 	 */
-
-	VM_PAGE_INSERT(mem, object);
+	queue_enter(&object->memq, mem, vm_page_t, listq);
+	object->memq_hint = mem;
 	mem->tabled = TRUE;
 
 	/*
@@ -1067,17 +1239,81 @@ vm_page_insert_internal(
 
 	object->resident_page_count++;
 	if (VM_PAGE_WIRED(mem)) {
-		object->wired_page_count++;
+	    if (!mem->private && !mem->fictitious) 
+	    {
+		if (!object->wired_page_count)
+		{
+		    assert(VM_KERN_MEMORY_NONE != tag);
+		    object->wire_tag = tag;
+		    VM_OBJECT_WIRED(object);
+		}
+	    }
+	    object->wired_page_count++;
 	}
 	assert(object->resident_page_count >= object->wired_page_count);
 
+        if (batch_accounting == FALSE) {
+		if (object->internal) {
+			OSAddAtomic(1, &vm_page_internal_count);
+		} else {
+			OSAddAtomic(1, &vm_page_external_count);
+		}
+	}
+
+	/*
+	 * It wouldn't make sense to insert a "reusable" page in
+	 * an object (the page would have been marked "reusable" only
+	 * at the time of a madvise(MADV_FREE_REUSABLE) if it was already
+	 * in the object at that time).
+	 * But a page could be inserted in a "all_reusable" object, if
+	 * something faults it in (a vm_read() from another task or a
+	 * "use-after-free" issue in user space, for example).  It can
+	 * also happen if we're relocating a page from that object to
+	 * a different physical page during a physically-contiguous
+	 * allocation.
+	 */
 	assert(!mem->reusable);
+	if (mem->object->all_reusable) {
+		OSAddAtomic(+1, &vm_page_stats_reusable.reusable_count);
+	}
+
+	if (object->purgable == VM_PURGABLE_DENY) {
+		owner = TASK_NULL;
+	} else {
+		owner = object->vo_purgeable_owner;
+	}
+	if (owner &&
+	    (object->purgable == VM_PURGABLE_NONVOLATILE ||
+	     VM_PAGE_WIRED(mem))) {
+
+		if (delayed_ledger_update)
+			*delayed_ledger_update += PAGE_SIZE;
+		else {
+			/* more non-volatile bytes */
+			ledger_credit(owner->ledger,
+				      task_ledgers.purgeable_nonvolatile,
+				      PAGE_SIZE);
+			/* more footprint */
+			ledger_credit(owner->ledger,
+				      task_ledgers.phys_footprint,
+				      PAGE_SIZE);
+		}
+
+	} else if (owner &&
+		   (object->purgable == VM_PURGABLE_VOLATILE ||
+		    object->purgable == VM_PURGABLE_EMPTY)) {
+		assert(! VM_PAGE_WIRED(mem));
+		/* more volatile bytes */
+		ledger_credit(owner->ledger,
+			      task_ledgers.purgeable_volatile,
+			      PAGE_SIZE);
+	}
 
 	if (object->purgable == VM_PURGABLE_VOLATILE) {
 		if (VM_PAGE_WIRED(mem)) {
-			OSAddAtomic(1, &vm_page_purgeable_wired_count);
+			OSAddAtomic(+1, &vm_page_purgeable_wired_count);
 		} else {
-			OSAddAtomic(1, &vm_page_purgeable_count);
+			OSAddAtomic(+1, &vm_page_purgeable_count);
 		}
 	} else if (object->purgable == VM_PURGABLE_EMPTY &&
 		   mem->throttled) {
@@ -1095,6 +1331,25 @@ vm_page_insert_internal(
 		if (queues_lock_held == FALSE)
 			vm_page_unlock_queues();
 	}
+
+#if VM_OBJECT_TRACKING_OP_MODIFIED
+	if (vm_object_tracking_inited &&
+	    object->internal &&
+	    object->resident_page_count == 0 &&
+	    object->pager == NULL &&
+	    object->shadow != NULL &&
+	    object->shadow->copy == object) {
+		void *bt[VM_OBJECT_TRACKING_BTDEPTH];
+		int numsaved = 0;
+
+		numsaved =OSBacktrace(bt, VM_OBJECT_TRACKING_BTDEPTH);
+		btlog_add_entry(vm_object_tracking_btlog,
+				object,
+				VM_OBJECT_TRACKING_OP_MODIFIED,
+				bt,
+				numsaved);
+	}
+#endif /* VM_OBJECT_TRACKING_OP_MODIFIED */
 }
 
 /*
@@ -1116,9 +1371,15 @@ vm_page_replace(
 	lck_spin_t	*bucket_lock;
 	int		hash_id;
 
+#if 0
+	/*
+	 * we don't hold the page queue lock
+	 * so this check isn't safe to make
+	 */
 	VM_PAGE_CHECK(mem);
+#endif
 	vm_object_lock_assert_exclusive(object);
-#if DEBUG
+#if DEBUG || VM_PAGE_CHECK_BUCKETS
 	if (mem->tabled || mem->object != VM_OBJECT_NULL)
 		panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
 		      "already in (obj=%p,off=0x%llx)",
@@ -1143,31 +1404,33 @@ vm_page_replace(
 
 	lck_spin_lock(bucket_lock);
 
-	if (bucket->pages) {
-		vm_page_t *mp = &bucket->pages;
-		vm_page_t m = *mp;
+	if (bucket->page_list) {
+		vm_page_packed_t *mp = &bucket->page_list;
+		vm_page_t m = VM_PAGE_UNPACK_PTR(*mp);
 
 		do {
 			if (m->object == object && m->offset == offset) {
 				/*
 				 * Remove old page from hash list
 				 */
-				*mp = m->next;
+				*mp = m->next_m;
+				m->hashed = FALSE;
 
 				found_m = m;
 				break;
 			}
-			mp = &m->next;
-		} while ((m = *mp));
+			mp = &m->next_m;
+		} while ((m = VM_PAGE_UNPACK_PTR(*mp)));
 
-		mem->next = bucket->pages;
+		mem->next_m = bucket->page_list;
 	} else {
-		mem->next = VM_PAGE_NULL;
+		mem->next_m = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
 	}
 	/*
 	 * insert new page at head of hash list
 	 */
-	bucket->pages = mem;
+	bucket->page_list = VM_PAGE_PACK_PTR(mem);
+	mem->hashed = TRUE;
 
 	lck_spin_unlock(bucket_lock);
 
@@ -1179,7 +1442,7 @@ vm_page_replace(
 		 */
 		vm_page_free_unlocked(found_m, FALSE);
 	}
-	vm_page_insert_internal(mem, object, offset, FALSE, FALSE);
+	vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, FALSE, FALSE, FALSE, NULL);
 }
 
 /*
@@ -1200,6 +1463,7 @@ vm_page_remove(
 	vm_page_t	this;
 	lck_spin_t	*bucket_lock;
 	int		hash_id;
+	task_t		owner;
 
         XPR(XPR_VM_PAGE,
                 "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n",
@@ -1209,8 +1473,14 @@ vm_page_remove(
 	vm_object_lock_assert_exclusive(mem->object);
 	assert(mem->tabled);
 	assert(!mem->cleaning);
+	assert(!mem->laundry);
+#if 0
+	/*
+	 * we don't hold the page queue lock
+	 * so this check isn't safe to make
+	 */
 	VM_PAGE_CHECK(mem);
-
+#endif
 	if (remove_from_hash == TRUE) {
 		/*
 		 *	Remove from the object_object/offset hash table
@@ -1221,30 +1491,30 @@ vm_page_remove(
 
 		lck_spin_lock(bucket_lock);
 
-		if ((this = bucket->pages) == mem) {
+		if ((this = VM_PAGE_UNPACK_PTR(bucket->page_list)) == mem) {
 			/* optimize for common case */
 
-			bucket->pages = mem->next;
+			bucket->page_list = mem->next_m;
 		} else {
-			vm_page_t	*prev;
+			vm_page_packed_t	*prev;
 
-			for (prev = &this->next;
-			     (this = *prev) != mem;
-			     prev = &this->next)
+			for (prev = &this->next_m;
+			     (this = VM_PAGE_UNPACK_PTR(*prev)) != mem;
+			     prev = &this->next_m)
 				continue;
-			*prev = this->next;
+			*prev = this->next_m;
 		}
 #if     MACH_PAGE_HASH_STATS
 		bucket->cur_count--;
 #endif /* MACH_PAGE_HASH_STATS */
-
+		mem->hashed = FALSE;
 		lck_spin_unlock(bucket_lock);
 	}
 	/*
 	 *	Now remove from the object's list of backed pages.
 	 */
 
-	VM_PAGE_REMOVE(mem);
+	vm_page_remove_internal(mem);
 
 	/*
 	 *	And show that the object has one fewer resident
@@ -1253,9 +1523,33 @@ vm_page_remove(
 
 	assert(mem->object->resident_page_count > 0);
 	mem->object->resident_page_count--;
+
+	if (mem->object->internal) {
+#if DEBUG
+		assert(vm_page_internal_count);
+#endif /* DEBUG */
+
+		OSAddAtomic(-1, &vm_page_internal_count);
+	} else {
+		assert(vm_page_external_count);
+		OSAddAtomic(-1, &vm_page_external_count);
+
+		if (mem->xpmapped) {
+			assert(vm_page_xpmapped_external_count);
+			OSAddAtomic(-1, &vm_page_xpmapped_external_count);
+		}
+	}
+	if (!mem->object->internal && (mem->object->objq.next || mem->object->objq.prev)) {
+		if (mem->object->resident_page_count == 0)
+			vm_object_cache_remove(mem->object);
+	}
+
 	if (VM_PAGE_WIRED(mem)) {
 		assert(mem->object->wired_page_count > 0);
 		mem->object->wired_page_count--;
+		if (!mem->object->wired_page_count) {
+		    VM_OBJECT_UNWIRED(mem->object);
+		}
 	}
 	assert(mem->object->resident_page_count >=
 	       mem->object->wired_page_count);
@@ -1272,6 +1566,31 @@ vm_page_remove(
 		vm_page_stats_reusable.reused_remove++;
 	}
 
+	if (mem->object->purgable == VM_PURGABLE_DENY) {
+		owner = TASK_NULL;
+	} else {
+		owner = mem->object->vo_purgeable_owner;
+	}
+	if (owner &&
+	    (mem->object->purgable == VM_PURGABLE_NONVOLATILE ||
+	     VM_PAGE_WIRED(mem))) {
+		/* less non-volatile bytes */
+		ledger_debit(owner->ledger,
+			     task_ledgers.purgeable_nonvolatile,
+			     PAGE_SIZE);
+		/* less footprint */
+		ledger_debit(owner->ledger,
+			     task_ledgers.phys_footprint,
+			     PAGE_SIZE);
+	} else if (owner &&
+		   (mem->object->purgable == VM_PURGABLE_VOLATILE ||
+		    mem->object->purgable == VM_PURGABLE_EMPTY)) {
+		assert(! VM_PAGE_WIRED(mem));
+		/* less volatile bytes */
+		ledger_debit(owner->ledger,
+			     task_ledgers.purgeable_volatile,
+			     PAGE_SIZE);
+	}
 	if (mem->object->purgable == VM_PURGABLE_VOLATILE) {
 		if (VM_PAGE_WIRED(mem)) {
 			assert(vm_page_purgeable_wired_count > 0);
@@ -1281,6 +1600,9 @@ vm_page_remove(
 			OSAddAtomic(-1, &vm_page_purgeable_count);
 		}
 	}
+	if (mem->object->set_cache_attr == TRUE)
+		pmap_set_cache_attributes(mem->phys_page, 0);
+
 	mem->tabled = FALSE;
 	mem->object = VM_OBJECT_NULL;
 	mem->offset = (vm_object_offset_t) -1;
@@ -1296,13 +1618,55 @@ vm_page_remove(
  *	The object must be locked.  No side effects.
  */
 
-unsigned long vm_page_lookup_hint = 0;
-unsigned long vm_page_lookup_hint_next = 0;
-unsigned long vm_page_lookup_hint_prev = 0;
-unsigned long vm_page_lookup_hint_miss = 0;
-unsigned long vm_page_lookup_bucket_NULL = 0;
-unsigned long vm_page_lookup_miss = 0;
+#define	VM_PAGE_HASH_LOOKUP_THRESHOLD	10
+
+#if DEBUG_VM_PAGE_LOOKUP
+
+struct {
+	uint64_t	vpl_total;
+	uint64_t	vpl_empty_obj;
+	uint64_t	vpl_bucket_NULL;
+	uint64_t	vpl_hit_hint;
+	uint64_t	vpl_hit_hint_next;
+	uint64_t	vpl_hit_hint_prev;
+	uint64_t	vpl_fast;
+	uint64_t	vpl_slow;
+	uint64_t	vpl_hit;
+	uint64_t	vpl_miss;
+
+	uint64_t	vpl_fast_elapsed;
+	uint64_t	vpl_slow_elapsed;
+} vm_page_lookup_stats __attribute__((aligned(8)));
+
+#endif
+
+#define	KDP_VM_PAGE_WALK_MAX	1000
+
+vm_page_t
+kdp_vm_page_lookup(
+	vm_object_t		object,
+	vm_object_offset_t	offset)
+{
+	vm_page_t cur_page;
+	int num_traversed = 0;
+
+	if (not_in_kdp) {
+		panic("panic: kdp_vm_page_lookup done outside of kernel debugger");
+	}
+
+	queue_iterate(&object->memq, cur_page, vm_page_t, listq) {
+		if (cur_page->offset == offset) {
+			return cur_page;
+		}
+		num_traversed++;
+
+		if (num_traversed >= KDP_VM_PAGE_WALK_MAX) {
+			return VM_PAGE_NULL;
+		}
+	}
 
+	return VM_PAGE_NULL;
+}
 
 vm_page_t
 vm_page_lookup(
@@ -1312,18 +1676,32 @@ vm_page_lookup(
 	vm_page_t	mem;
 	vm_page_bucket_t *bucket;
 	queue_entry_t	qe;
-	lck_spin_t	*bucket_lock;
+	lck_spin_t	*bucket_lock = NULL;
 	int		hash_id;
+#if DEBUG_VM_PAGE_LOOKUP
+	uint64_t	start, elapsed;
 
+	OSAddAtomic64(1, &vm_page_lookup_stats.vpl_total);
+#endif
 	vm_object_lock_assert_held(object);
+
+	if (object->resident_page_count == 0) {
+#if DEBUG_VM_PAGE_LOOKUP
+		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_empty_obj);
+#endif
+		return (VM_PAGE_NULL);
+	}
+
 	mem = object->memq_hint;
 
 	if (mem != VM_PAGE_NULL) {
 		assert(mem->object == object);
 
 		if (mem->offset == offset) {
-			vm_page_lookup_hint++;
-			return mem;
+#if DEBUG_VM_PAGE_LOOKUP
+			OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint);
+#endif
+			return (mem);
 		}
 		qe = queue_next(&mem->listq);
 
@@ -1334,9 +1712,11 @@ vm_page_lookup(
 			assert(next_page->object == object);
 
 			if (next_page->offset == offset) {
-				vm_page_lookup_hint_next++;
 				object->memq_hint = next_page; /* new hint */
-				return next_page;
+#if DEBUG_VM_PAGE_LOOKUP
+				OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_next);
+#endif
+				return (next_page);
 			}
 		}
 		qe = queue_prev(&mem->listq);
@@ -1348,9 +1728,11 @@ vm_page_lookup(
 			assert(prev_page->object == object);
 
 			if (prev_page->offset == offset) {
-				vm_page_lookup_hint_prev++;
 				object->memq_hint = prev_page; /* new hint */
-				return prev_page;
+#if DEBUG_VM_PAGE_LOOKUP
+				OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_prev);
+#endif
+				return (prev_page);
 			}
 		}
 	}
@@ -1368,32 +1750,73 @@ vm_page_lookup(
 	 * at outside the scope of the hash bucket lock... this is a 
 	 * really cheap optimiztion to avoid taking the lock
 	 */
-	if (bucket->pages == VM_PAGE_NULL) {
-	        vm_page_lookup_bucket_NULL++;
-
+	if (!bucket->page_list) {
+#if DEBUG_VM_PAGE_LOOKUP
+		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_bucket_NULL);
+#endif
 	        return (VM_PAGE_NULL);
 	}
-	bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
 
-	lck_spin_lock(bucket_lock);
+#if DEBUG_VM_PAGE_LOOKUP
+	start = mach_absolute_time();
+#endif
+	if (object->resident_page_count <= VM_PAGE_HASH_LOOKUP_THRESHOLD) {
+		/*
+		 * on average, it's roughly 3 times faster to run a short memq list
+		 * than to take the spin lock and go through the hash list
+		 */
+		mem = (vm_page_t)queue_first(&object->memq);
 
-	for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
-		VM_PAGE_CHECK(mem);
-		if ((mem->object == object) && (mem->offset == offset))
-			break;
+		while (!queue_end(&object->memq, (queue_entry_t)mem)) {
+
+			if (mem->offset == offset)
+				break;
+
+			mem = (vm_page_t)queue_next(&mem->listq);
+		}
+		if (queue_end(&object->memq, (queue_entry_t)mem))
+			mem = NULL;
+	} else {
+
+		bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
+
+		lck_spin_lock(bucket_lock);
+
+		for (mem = VM_PAGE_UNPACK_PTR(bucket->page_list); mem != VM_PAGE_NULL; mem = VM_PAGE_UNPACK_PTR(mem->next_m)) {
+#if 0
+			/*
+			 * we don't hold the page queue lock
+			 * so this check isn't safe to make
+			 */
+			VM_PAGE_CHECK(mem);
+#endif
+			if ((mem->object == object) && (mem->offset == offset))
+				break;
+		}
+		lck_spin_unlock(bucket_lock);
 	}
-	lck_spin_unlock(bucket_lock);
 
+#if DEBUG_VM_PAGE_LOOKUP
+	elapsed = mach_absolute_time() - start;
+
+	if (bucket_lock) {
+		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_slow);
+		OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_slow_elapsed);
+	} else {
+		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_fast);
+		OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_fast_elapsed);
+	}
+	if (mem != VM_PAGE_NULL)
+		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit);
+	else
+	        OSAddAtomic64(1, &vm_page_lookup_stats.vpl_miss);
+#endif
 	if (mem != VM_PAGE_NULL) {
-		if (object->memq_hint != VM_PAGE_NULL) {
-			vm_page_lookup_hint_miss++;
-		}
 		assert(mem->object == object);
-		object->memq_hint = mem;
-	} else
-	        vm_page_lookup_miss++;
 
-	return(mem);
+		object->memq_hint = mem;
+	}
+	return (mem);
 }
 
 
@@ -1412,8 +1835,13 @@ vm_page_rename(
 	vm_object_offset_t		new_offset,
 	boolean_t			encrypted_ok)
 {
+	boolean_t internal_to_external, external_to_internal;
+	vm_tag_t  tag;
+
 	assert(mem->object != new_object);
 
+        assert(mem->object);
+
 	/*
 	 * ENCRYPTED SWAP:
 	 * The encryption key is based on the page's memory object
@@ -1442,8 +1870,39 @@ vm_page_rename(
 	 */
 	vm_page_lockspin_queues();
 
+	internal_to_external = FALSE;
+	external_to_internal = FALSE;
+
+	if (mem->local) {
+		/*
+		 * it's much easier to get the vm_page_pageable_xxx accounting correct
+		 * if we first move the page to the active queue... it's going to end
+		 * up there anyway, and we don't do vm_page_rename's frequently enough
+		 * for this to matter.
+		 */
+		vm_page_queues_remove(mem);
+		vm_page_activate(mem);
+	}
+	if (mem->active || mem->inactive || mem->speculative) {
+		if (mem->object->internal && !new_object->internal) {
+			internal_to_external = TRUE;
+		}
+		if (!mem->object->internal && new_object->internal) {
+			external_to_internal = TRUE;
+		}
+	}
+
+	tag = mem->object->wire_tag;
     	vm_page_remove(mem, TRUE);
-	vm_page_insert_internal(mem, new_object, new_offset, TRUE, TRUE);
+	vm_page_insert_internal(mem, new_object, new_offset, tag, TRUE, TRUE, FALSE, FALSE, NULL);
+
+	if (internal_to_external) {
+		vm_page_pageable_internal_count--;
+		vm_page_pageable_external_count++;
+	} else if (external_to_internal) {
+		vm_page_pageable_external_count--;
+		vm_page_pageable_internal_count++;
+	}
 
 	vm_page_unlock_queues();
 }
@@ -1463,8 +1922,34 @@ vm_page_init(
 {
 	assert(phys_page);
 
+#if	DEBUG
+	if ((phys_page != vm_page_fictitious_addr) && (phys_page != vm_page_guard_addr)) {
+		if (!(pmap_valid_page(phys_page))) {
+			panic("vm_page_init: non-DRAM phys_page 0x%x\n", phys_page);
+		}
+	}
+#endif
 	*mem = vm_page_template;
 	mem->phys_page = phys_page;
+#if 0
+	/*
+	 * we're leaving this turned off for now... currently pages
+	 * come off the free list and are either immediately dirtied/referenced
+	 * due to zero-fill or COW faults, or are used to read or write files...
+	 * in the file I/O case, the UPL mechanism takes care of clearing
+	 * the state of the HW ref/mod bits in a somewhat fragile way.
+	 * Since we may change the way this works in the future (to toughen it up),
+	 * I'm leaving this as a reminder of where these bits could get cleared
+	 */
+
+	/*
+	 * make sure both the h/w referenced and modified bits are
+	 * clear at this point... we are especially dependent on 
+	 * not finding a 'stale' h/w modified in a number of spots
+	 * once this page goes back into use
+	 */
+	pmap_clear_refmod(phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
+#endif
 	mem->lopage = lopage;
 }
 
@@ -1475,24 +1960,25 @@ vm_page_init(
  *	Returns VM_PAGE_NULL if there are no free pages.
  */
 int	c_vm_page_grab_fictitious = 0;
+int	c_vm_page_grab_fictitious_failed = 0;
 int	c_vm_page_release_fictitious = 0;
 int	c_vm_page_more_fictitious = 0;
 
-extern vm_page_t vm_page_grab_fictitious_common(ppnum_t phys_addr);
-
 vm_page_t
 vm_page_grab_fictitious_common(
 	ppnum_t phys_addr)
 {
-	register vm_page_t m;
+	vm_page_t	m;
+
+	if ((m = (vm_page_t)zget(vm_page_zone))) {
 
-	m = (vm_page_t)zget(vm_page_zone);
-	if (m) {
 		vm_page_init(m, phys_addr, FALSE);
 		m->fictitious = TRUE;
-	}
 
-	c_vm_page_grab_fictitious++;
+		c_vm_page_grab_fictitious++;
+	} else
+		c_vm_page_grab_fictitious_failed++;
+
 	return m;
 }
 
@@ -1508,35 +1994,30 @@ vm_page_grab_guard(void)
 	return vm_page_grab_fictitious_common(vm_page_guard_addr);
 }
 
+
 /*
  *	vm_page_release_fictitious:
  *
- *	Release a fictitious page to the free list.
+ *	Release a fictitious page to the zone pool
  */
-
 void
 vm_page_release_fictitious(
-	register vm_page_t m)
+	vm_page_t m)
 {
 	assert(!m->free);
-	assert(m->busy);
 	assert(m->fictitious);
 	assert(m->phys_page == vm_page_fictitious_addr ||
 	       m->phys_page == vm_page_guard_addr);
 
 	c_vm_page_release_fictitious++;
-#if DEBUG
-	if (m->free)
-		panic("vm_page_release_fictitious");
-#endif
-	m->free = TRUE;
+
 	zfree(vm_page_zone, m);
 }
 
 /*
  *	vm_page_more_fictitious:
  *
- *	Add more fictitious pages to the free list.
+ *	Add more fictitious pages to the zone.
  *	Allowed to block. This routine is way intimate
  *	with the zones code, for several reasons:
  *	1. we need to carve some page structures out of physical
@@ -1550,23 +2031,13 @@ vm_page_release_fictitious(
  *	   permanent allocation of a resource.
  *	3. To smooth allocation humps, we allocate single pages
  *	   with kernel_memory_allocate(), and cram them into the
- *	   zone. This also allows us to initialize the vm_page_t's
- *	   on the way into the zone, so that zget() always returns
- *	   an initialized structure. The zone free element pointer
- *	   and the free page pointer are both the first item in the
- *	   vm_page_t.
- *	4. By having the pages in the zone pre-initialized, we need
- *	   not keep 2 levels of lists. The garbage collector simply
- *	   scans our list, and reduces physical memory usage as it
- *	   sees fit.
+ *	   zone.
  */
 
 void vm_page_more_fictitious(void)
 {
-	register vm_page_t m;
-	vm_offset_t addr;
-	kern_return_t retval;
-	int i;
+	vm_offset_t	addr;
+	kern_return_t	retval;
 
 	c_vm_page_more_fictitious++;
 
@@ -1602,10 +2073,10 @@ void vm_page_more_fictitious(void)
 
 	retval = kernel_memory_allocate(zone_map,
 					&addr, PAGE_SIZE, VM_PROT_ALL,
-					KMA_KOBJECT|KMA_NOPAGEWAIT);
+					KMA_KOBJECT|KMA_NOPAGEWAIT, VM_KERN_MEMORY_ZONE);
 	if (retval != KERN_SUCCESS) { 
 		/*
-		 * No page was available. Tell the pageout daemon, drop the
+		 * No page was available. Drop the
 		 * lock to give another thread a chance at it, and
 		 * wait for the pageout daemon to make progress.
 		 */
@@ -1613,18 +2084,9 @@ void vm_page_more_fictitious(void)
 		vm_page_wait(THREAD_UNINT);
 		return;
 	}
-	/*
-	 * Initialize as many vm_page_t's as will fit on this page. This
-	 * depends on the zone code disturbing ONLY the first item of
-	 * each zone element.
-	 */
-	m = (vm_page_t)addr;
-	for (i = PAGE_SIZE/sizeof(struct vm_page); i > 0; i--) {
-		vm_page_init(m, vm_page_fictitious_addr, FALSE);
-		m->fictitious = TRUE;
-		m++;
-	}
-	zcram(vm_page_zone, (void *) addr, PAGE_SIZE);
+
+	zcram(vm_page_zone, addr, PAGE_SIZE);
+
 	lck_mtx_unlock(&vm_page_alloc_lock);
 }
 
@@ -1649,7 +2111,7 @@ vm_pool_low(void)
  * this is an interface to support bring-up of drivers
  * on platforms with physical memory > 4G...
  */
-int		vm_himemory_mode = 0;
+int		vm_himemory_mode = 2;
 
 
 /*
@@ -1708,11 +2170,11 @@ vm_page_grablo(void)
 		vm_lopages_allocated_cpm_success++;
 		vm_page_unlock_queues();
 	}
-	assert(mem->gobbled);
 	assert(mem->busy);
 	assert(!mem->free);
 	assert(!mem->pmapped);
 	assert(!mem->wpmapped);
+	assert(!pmap_is_noencrypt(mem->phys_page));
 
 	mem->pageq.next = NULL;
 	mem->pageq.prev = NULL;
@@ -1720,6 +2182,7 @@ vm_page_grablo(void)
 	return (mem);
 }
 
+
 /*
  *	vm_page_grab:
  *
@@ -1741,8 +2204,6 @@ vm_page_grablo(void)
  *	request from the per-cpu queue.
  */
 
-#define COLOR_GROUPS_TO_STEAL	4
-
 
 vm_page_t
 vm_page_grab( void )
@@ -1756,9 +2217,9 @@ vm_page_grab( void )
 return_page_from_cpu_list:
 	        PROCESSOR_DATA(current_processor(), page_grab_count) += 1;
 	        PROCESSOR_DATA(current_processor(), free_pages) = mem->pageq.next;
-		mem->pageq.next = NULL;
 
 	        enable_preemption();
+		mem->pageq.next = NULL;
 
 		assert(mem->listq.next == NULL && mem->listq.prev == NULL);
 		assert(mem->tabled == FALSE);
@@ -1770,6 +2231,11 @@ return_page_from_cpu_list:
 		assert(!mem->encrypted);
 		assert(!mem->pmapped);
 		assert(!mem->wpmapped);
+		assert(!mem->active);
+		assert(!mem->inactive);
+		assert(!mem->throttled);
+		assert(!mem->speculative);
+		assert(!pmap_is_noencrypt(mem->phys_page));
 
 		return mem;
 	}
@@ -1780,19 +2246,18 @@ return_page_from_cpu_list:
 	 *	Optionally produce warnings if the wire or gobble
 	 *	counts exceed some threshold.
 	 */
-	if (vm_page_wire_count_warning > 0
-	    && vm_page_wire_count >= vm_page_wire_count_warning) {
+#if VM_PAGE_WIRE_COUNT_WARNING
+	if (vm_page_wire_count >= VM_PAGE_WIRE_COUNT_WARNING) {
 		printf("mk: vm_page_grab(): high wired page count of %d\n",
 			vm_page_wire_count);
-		assert(vm_page_wire_count < vm_page_wire_count_warning);
 	}
-	if (vm_page_gobble_count_warning > 0
-	    && vm_page_gobble_count >= vm_page_gobble_count_warning) {
+#endif
+#if VM_PAGE_GOBBLE_COUNT_WARNING
+	if (vm_page_gobble_count >= VM_PAGE_GOBBLE_COUNT_WARNING) {
 		printf("mk: vm_page_grab(): high gobbled page count of %d\n",
 			vm_page_gobble_count);
-		assert(vm_page_gobble_count < vm_page_gobble_count_warning);
 	}
-
+#endif
 	lck_mtx_lock_spin(&vm_page_queue_free_lock);
 
 	/*
@@ -1837,17 +2302,17 @@ return_page_from_cpu_list:
 		if (vm_page_free_count <= vm_page_free_reserved)
 		        pages_to_steal = 1;
 		else {
-		        pages_to_steal = COLOR_GROUPS_TO_STEAL * vm_colors;
-		
-			if (pages_to_steal > (vm_page_free_count - vm_page_free_reserved))
+			if (vm_free_magazine_refill_limit <= (vm_page_free_count - vm_page_free_reserved))
+				pages_to_steal = vm_free_magazine_refill_limit;
+			else
 			        pages_to_steal = (vm_page_free_count - vm_page_free_reserved);
 		}
 		color = PROCESSOR_DATA(current_processor(), start_color);
 		head = tail = NULL;
 
+		vm_page_free_count -= pages_to_steal;
+
 		while (pages_to_steal--) {
-		        if (--vm_page_free_count < vm_page_free_count_minimum)
-			        vm_page_free_count_minimum = vm_page_free_count;
 
 			while (queue_empty(&vm_page_queue_free[color]))
 			        color = (color + 1) & vm_color_mask;
@@ -1859,6 +2324,11 @@ return_page_from_cpu_list:
 			mem->pageq.next = NULL;
 			mem->pageq.prev = NULL;
 
+			assert(!mem->active);
+			assert(!mem->inactive);
+			assert(!mem->throttled);
+			assert(!mem->speculative);			
+
 			color = (color + 1) & vm_color_mask;
 
 			if (head == NULL)
@@ -1867,7 +2337,6 @@ return_page_from_cpu_list:
 			        tail->pageq.next = (queue_t)mem;
 		        tail = mem;
 
-			mem->pageq.prev = NULL;
 			assert(mem->listq.next == NULL && mem->listq.prev == NULL);
 			assert(mem->tabled == FALSE);
 			assert(mem->object == VM_OBJECT_NULL);
@@ -1881,7 +2350,10 @@ return_page_from_cpu_list:
 			assert(!mem->encrypted);
 			assert(!mem->pmapped);
 			assert(!mem->wpmapped);
+			assert(!pmap_is_noencrypt(mem->phys_page));
 		}
+		lck_mtx_unlock(&vm_page_queue_free_lock);
+
 		PROCESSOR_DATA(current_processor(), free_pages) = head->pageq.next;
 		PROCESSOR_DATA(current_processor(), start_color) = color;
 
@@ -1892,8 +2364,6 @@ return_page_from_cpu_list:
 		mem = head;
 		mem->pageq.next = NULL;
 
-		lck_mtx_unlock(&vm_page_queue_free_lock);
-
 		enable_preemption();
 	}
 	/*
@@ -1907,29 +2377,12 @@ return_page_from_cpu_list:
 	 *	it doesn't really matter.
 	 */
 	if ((vm_page_free_count < vm_page_free_min) ||
-	    ((vm_page_free_count < vm_page_free_target) &&
-	     ((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_min)))
-	        thread_wakeup((event_t) &vm_page_free_wanted);
-
-#if CONFIG_EMBEDDED
-	{
-	int 	percent_avail;
-
-	/*
-	 * Decide if we need to poke the memorystatus notification thread.
-	 */
-	percent_avail = 
-		(vm_page_active_count + vm_page_inactive_count + 
-		 vm_page_speculative_count + vm_page_free_count +
-		 (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
-		atop_64(max_mem);
-	if (percent_avail <= (kern_memorystatus_level - 5)) {
-		kern_memorystatus_level = percent_avail;
-		thread_wakeup((event_t)&kern_memorystatus_wakeup);
-	}
-	}
-#endif
+	     ((vm_page_free_count < vm_page_free_target) &&
+	      ((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_min)))
+	         thread_wakeup((event_t) &vm_page_free_wanted);
 
+	VM_CHECK_MEMORYSTATUS;
+	
 //	dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 4);	/* (TEST/DEBUG) */
 
 	return mem;
@@ -1948,28 +2401,22 @@ vm_page_release(
 	unsigned int	color;
 	int	need_wakeup = 0;
 	int	need_priv_wakeup = 0;
-#if 0
-	unsigned int pindex;
-	phys_entry *physent;
 
-	physent = mapping_phys_lookup(mem->phys_page, &pindex);		/* (BRINGUP) */
-	if(physent->ppLink & ppN) {											/* (BRINGUP) */
-		panic("vm_page_release: already released - %08X %08X\n", mem, mem->phys_page);
-	}
-	physent->ppLink = physent->ppLink | ppN;							/* (BRINGUP) */
-#endif
+
 	assert(!mem->private && !mem->fictitious);
 	if (vm_page_free_verify) {
 		assert(pmap_verify_free(mem->phys_page));
 	}
 //	dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 5);	/* (TEST/DEBUG) */
 
+	pmap_clear_noencrypt(mem->phys_page);
 
 	lck_mtx_lock_spin(&vm_page_queue_free_lock);
 #if DEBUG
 	if (mem->free)
 		panic("vm_page_release");
 #endif
+
 	assert(mem->busy);
 	assert(!mem->laundry);
 	assert(mem->object == VM_OBJECT_NULL);
@@ -1978,7 +2425,7 @@ vm_page_release(
 	assert(mem->listq.next == NULL &&
 	       mem->listq.prev == NULL);
 	
-	if ((mem->lopage || vm_lopage_refill == TRUE) &&
+	if ((mem->lopage == TRUE || vm_lopage_refill == TRUE) &&
 	    vm_lopage_free_count < vm_lopage_free_limit &&
 	    mem->phys_page < max_valid_low_ppnum) {
 	        /*
@@ -1997,7 +2444,7 @@ vm_page_release(
 
 		mem->lopage = TRUE;
 	} else {	  
-	        mem->lopage = FALSE;
+		mem->lopage = FALSE;
 		mem->free = TRUE;
 
 	        color = mem->phys_page & vm_color_mask;
@@ -2043,25 +2490,33 @@ vm_page_release(
 	else if (need_wakeup)
 		thread_wakeup_one((event_t) &vm_page_free_count);
 
-#if CONFIG_EMBEDDED
-	{
-	int	percent_avail;
+	VM_CHECK_MEMORYSTATUS;
+}
 
-	/*
-	 * Decide if we need to poke the memorystatus notification thread.
-	 * Locking is not a big issue, as only a single thread delivers these.
-	 */
-	percent_avail = 
-		(vm_page_active_count + vm_page_inactive_count + 
-		 vm_page_speculative_count + vm_page_free_count +
-		 (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count)  ) * 100 /
-		atop_64(max_mem);
-	if (percent_avail >= (kern_memorystatus_level + 5)) {
-		kern_memorystatus_level = percent_avail;
-		thread_wakeup((event_t)&kern_memorystatus_wakeup);
-	}
+/*
+ * This version of vm_page_release() is used only at startup
+ * when we are single-threaded and pages are being released 
+ * for the first time. Hence, no locking or unnecessary checks are made.
+ * Note: VM_CHECK_MEMORYSTATUS invoked by the caller.
+ */
+void
+vm_page_release_startup(
+	register vm_page_t	mem)
+{
+	queue_t	queue_free;
+
+	if (vm_lopage_free_count < vm_lopage_free_limit &&
+	    mem->phys_page < max_valid_low_ppnum) {
+		mem->lopage = TRUE;
+		vm_lopage_free_count++;
+		queue_free = &vm_lopage_queue_free;
+	} else {	  
+		mem->lopage = FALSE;
+		mem->free = TRUE;
+		vm_page_free_count++;
+		queue_free = &vm_page_queue_free[mem->phys_page & vm_color_mask];
 	}
-#endif
+	queue_enter_first(queue_free, mem, vm_page_t, pageq);
 }
 
 /*
@@ -2113,8 +2568,12 @@ vm_page_wait(
 		if (need_wakeup)
 			thread_wakeup((event_t)&vm_page_free_wanted);
 
-		if (wait_result == THREAD_WAITING)
+		if (wait_result == THREAD_WAITING) {
+			VM_DEBUG_EVENT(vm_page_wait_block, VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
+				       vm_page_free_wanted_privileged, vm_page_free_wanted, 0, 0);
 			wait_result = thread_block(THREAD_CONTINUE_NULL);
+			VM_DEBUG_EVENT(vm_page_wait_block, VM_PAGE_WAIT_BLOCK, DBG_FUNC_END, 0, 0, 0, 0);
+		}
 
 		return(wait_result == THREAD_AWAKENED);
 	} else {
@@ -2149,24 +2608,6 @@ vm_page_alloc(
 	return(mem);
 }
 
-vm_page_t
-vm_page_alloclo(
-	vm_object_t		object,
-	vm_object_offset_t	offset)
-{
-	register vm_page_t	mem;
-
-	vm_object_lock_assert_exclusive(object);
-	mem = vm_page_grablo();
-	if (mem == VM_PAGE_NULL)
-		return VM_PAGE_NULL;
-
-	vm_page_insert(mem, object, offset);
-
-	return(mem);
-}
-
-
 /*
  *	vm_page_alloc_guard:
  *	
@@ -2196,16 +2637,16 @@ vm_page_alloc_guard(
 counter(unsigned int c_laundry_pages_freed = 0;)
 
 /*
- *	vm_page_free:
+ *	vm_page_free_prepare:
  *
- *	Returns the given page to the free list,
- *	disassociating it with any VM object.
+ *	Removes page from any queue it may be on
+ *	and disassociates it from its VM object.
  *
  *	Object and page queues must be locked prior to entry.
  */
 static void
 vm_page_free_prepare(
-	register vm_page_t	mem)
+	vm_page_t	mem)
 {
 	vm_page_free_prepare_queues(mem);
 	vm_page_free_prepare_object(mem, TRUE);
@@ -2219,35 +2660,69 @@ vm_page_free_prepare_queues(
 	VM_PAGE_CHECK(mem);
 	assert(!mem->free);
 	assert(!mem->cleaning);
-	assert(!mem->pageout);
-#if DEBUG
+
+#if MACH_ASSERT || DEBUG
 	lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
 	if (mem->free)
 		panic("vm_page_free: freeing page on free list\n");
-#endif
+#endif /* MACH_ASSERT || DEBUG */
 	if (mem->object) {
 		vm_object_lock_assert_exclusive(mem->object);
 	}
-
 	if (mem->laundry) {
 		/*
 		 * We may have to free a page while it's being laundered
 		 * if we lost its pager (due to a forced unmount, for example).
-		 * We need to call vm_pageout_throttle_up() before removing
-		 * the page from its VM object, so that we can find out on
-		 * which pageout queue the page is on.
+		 * We need to call vm_pageout_steal_laundry() before removing
+		 * the page from its VM object, so that we can remove it
+		 * from its pageout queue and adjust the laundry accounting
 		 */
-		vm_pageout_throttle_up(mem);
+		vm_pageout_steal_laundry(mem, TRUE);
 		counter(++c_laundry_pages_freed);
 	}
-	VM_PAGE_QUEUES_REMOVE(mem);	/* clears local/active/inactive/throttled/speculative */
+	
+	vm_page_queues_remove(mem);	/* clears local/active/inactive/throttled/speculative */
 
 	if (VM_PAGE_WIRED(mem)) {
 		if (mem->object) {
 			assert(mem->object->wired_page_count > 0);
 			mem->object->wired_page_count--;
+			if (!mem->object->wired_page_count) {
+			    VM_OBJECT_UNWIRED(mem->object);
+			}
+
 			assert(mem->object->resident_page_count >=
 			       mem->object->wired_page_count);
+
+			if (mem->object->purgable == VM_PURGABLE_VOLATILE) {
+				OSAddAtomic(+1, &vm_page_purgeable_count);
+				assert(vm_page_purgeable_wired_count > 0);
+				OSAddAtomic(-1, &vm_page_purgeable_wired_count);
+			}
+			if ((mem->object->purgable == VM_PURGABLE_VOLATILE ||
+			     mem->object->purgable == VM_PURGABLE_EMPTY) &&
+			    mem->object->vo_purgeable_owner != TASK_NULL) {
+				task_t owner;
+
+				owner = mem->object->vo_purgeable_owner;
+				/*
+				 * While wired, this page was accounted
+				 * as "non-volatile" but it should now
+				 * be accounted as "volatile".
+				 */
+				/* one less "non-volatile"... */
+				ledger_debit(owner->ledger,
+					     task_ledgers.purgeable_nonvolatile,
+					     PAGE_SIZE);
+				/* ... and "phys_footprint" */
+				ledger_debit(owner->ledger,
+					     task_ledgers.phys_footprint,
+					     PAGE_SIZE);
+				/* one more "volatile" */
+				ledger_credit(owner->ledger,
+					      task_ledgers.purgeable_volatile,
+					      PAGE_SIZE);
+			}
 		}
 		if (!mem->private && !mem->fictitious)
 			vm_page_wire_count--;
@@ -2266,10 +2741,6 @@ vm_page_free_prepare_object(
 	vm_page_t	mem,
 	boolean_t	remove_from_hash)
 {
-	if (mem->object) {
-	        vm_object_lock_assert_exclusive(mem->object);
-	}
-
 	if (mem->tabled)
 		vm_page_remove(mem, remove_from_hash);	/* clears tabled, object, offset */
 
@@ -2280,33 +2751,26 @@ vm_page_free_prepare_object(
 		mem->fictitious = TRUE;
 		mem->phys_page = vm_page_fictitious_addr;
 	}
-	if (mem->fictitious) {
-		/* Some of these may be unnecessary */
-		mem->gobbled = FALSE;
-		mem->busy = TRUE;
-		mem->absent = FALSE;
-		mem->error = FALSE;
-		mem->dirty = FALSE;
-		mem->precious = FALSE;
-		mem->reference = FALSE;
-		mem->encrypted = FALSE;
-		mem->encrypted_cleaning = FALSE;
-		mem->pmapped = FALSE;
-		mem->wpmapped = FALSE;
-		mem->reusable = FALSE;
-	} else {
-		if (mem->zero_fill == TRUE)
-		        VM_ZF_COUNT_DECR();
+	if ( !mem->fictitious) {
 		vm_page_init(mem, mem->phys_page, mem->lopage);
 	}
 }
 
 
+/*
+ *	vm_page_free:
+ *
+ *	Returns the given page to the free list,
+ *	disassociating it with any VM object.
+ *
+ *	Object and page queues must be locked prior to entry.
+ */
 void
 vm_page_free(
 	vm_page_t	mem)
 {
 	vm_page_free_prepare(mem);
+
 	if (mem->fictitious) {
 		vm_page_release_fictitious(mem);
 	} else {
@@ -2333,210 +2797,163 @@ vm_page_free_unlocked(
 	}
 }
 
+
 /*
  * Free a list of pages.  The list can be up to several hundred pages,
  * as blocked up by vm_pageout_scan().
  * The big win is not having to take the free list lock once
- * per page.  We sort the incoming pages into n lists, one for
- * each color.
+ * per page.
  */
 void
 vm_page_free_list(
-	vm_page_t	mem,
+	vm_page_t	freeq,
 	boolean_t	prepare_object)
 {
+        vm_page_t	mem;
         vm_page_t	nxt;
-	int		pg_count = 0;
-	int		color;
-	int		inuse_list_head = -1;
+	vm_page_t	local_freeq;
+	int		pg_count;
 
-	queue_head_t	free_list[MAX_COLORS];
-	int		inuse[MAX_COLORS];
+	while (freeq) {
 
-	for (color = 0; color < (signed) vm_colors; color++) {
-		queue_init(&free_list[color]);
-	}
-	
-	while (mem) {
-		assert(!mem->inactive);
-		assert(!mem->active);
-		assert(!mem->throttled);
-		assert(!mem->free);
-		assert(!mem->speculative);
-		assert(!VM_PAGE_WIRED(mem));
-		assert(mem->pageq.prev == NULL);
+		pg_count = 0;
+		local_freeq = VM_PAGE_NULL;
+		mem = freeq;
+
+		/*
+		 * break up the processing into smaller chunks so
+		 * that we can 'pipeline' the pages onto the
+		 * free list w/o introducing too much
+		 * contention on the global free queue lock
+		 */
+		while (mem && pg_count < 64) {
+
+			assert(!mem->inactive);
+			assert(!mem->active);
+			assert(!mem->throttled);
+			assert(!mem->free);
+			assert(!mem->speculative);
+			assert(!VM_PAGE_WIRED(mem));
+			assert(mem->pageq.prev == NULL);
 
-		nxt = (vm_page_t)(mem->pageq.next);
+			nxt = (vm_page_t)(mem->pageq.next);
 		
-		if (prepare_object == TRUE)
-			vm_page_free_prepare_object(mem, TRUE);
+			if (vm_page_free_verify && !mem->fictitious && !mem->private) {
+				assert(pmap_verify_free(mem->phys_page));
+			}
+			if (prepare_object == TRUE)
+				vm_page_free_prepare_object(mem, TRUE);
 
-		if (vm_page_free_verify && !mem->fictitious && !mem->private) {
-			assert(pmap_verify_free(mem->phys_page));
-		}
-		assert(mem->busy);
+			if (!mem->fictitious) {
+				assert(mem->busy);
 
-		if (!mem->fictitious) {
-			if ((mem->lopage == TRUE || vm_lopage_refill == TRUE) &&
-			    vm_lopage_free_count < vm_lopage_free_limit &&
-			    mem->phys_page < max_valid_low_ppnum) {
-				mem->pageq.next = NULL;
-				vm_page_release(mem);
-			} else {
+				if ((mem->lopage == TRUE || vm_lopage_refill == TRUE) &&
+				    vm_lopage_free_count < vm_lopage_free_limit &&
+				    mem->phys_page < max_valid_low_ppnum) {
+					mem->pageq.next = NULL;
+					vm_page_release(mem);
+				} else {
+					/*
+					 * IMPORTANT: we can't set the page "free" here
+					 * because that would make the page eligible for
+					 * a physically-contiguous allocation (see
+					 * vm_page_find_contiguous()) right away (we don't
+					 * hold the vm_page_queue_free lock).  That would
+					 * cause trouble because the page is not actually
+					 * in the free queue yet...
+					 */
+					mem->pageq.next = (queue_entry_t)local_freeq;
+					local_freeq = mem;
+					pg_count++;
 
-			/*
-			 * IMPORTANT: we can't set the page "free" here
-			 * because that would make the page eligible for
-			 * a physically-contiguous allocation (see
-			 * vm_page_find_contiguous()) right away (we don't
-			 * hold the vm_page_queue_free lock).  That would
-			 * cause trouble because the page is not actually
-			 * in the free queue yet...
-			 */
-				color = mem->phys_page & vm_color_mask;
-				if (queue_empty(&free_list[color])) {
-					inuse[color] = inuse_list_head;
-					inuse_list_head = color;
+					pmap_clear_noencrypt(mem->phys_page);
 				}
-				queue_enter_first(&free_list[color],
-						  mem,
-						  vm_page_t,
-						  pageq);
-				pg_count++;
+			} else {
+				assert(mem->phys_page == vm_page_fictitious_addr ||
+				       mem->phys_page == vm_page_guard_addr);
+				vm_page_release_fictitious(mem);
 			}
-		} else {
-			assert(mem->phys_page == vm_page_fictitious_addr ||
-			       mem->phys_page == vm_page_guard_addr);
-		        vm_page_release_fictitious(mem);
+			mem = nxt;
 		}
-		mem = nxt;
-	}
-	if (pg_count) {
-	        unsigned int	avail_free_count;
-		unsigned int	need_wakeup = 0;
-		unsigned int	need_priv_wakeup = 0;
+		freeq = mem;
+
+		if ( (mem = local_freeq) ) {
+			unsigned int	avail_free_count;
+			unsigned int	need_wakeup = 0;
+			unsigned int	need_priv_wakeup = 0;
 	  
-	        lck_mtx_lock_spin(&vm_page_queue_free_lock);
+			lck_mtx_lock_spin(&vm_page_queue_free_lock);
 
-		color = inuse_list_head;
-		
-		while( color != -1 ) {
-			vm_page_t first, last;
-			vm_page_t first_free;
+			while (mem) {
+				int	color;
+
+				nxt = (vm_page_t)(mem->pageq.next);
 
-			/*
-			 * Now that we hold the vm_page_queue_free lock,
-			 * it's safe to mark all pages in our local queue
-			 * as "free"...
-			 */
-			queue_iterate(&free_list[color],
-				      mem,
-				      vm_page_t,
-				      pageq) {
 				assert(!mem->free);
 				assert(mem->busy);
 				mem->free = TRUE;
-			}
 
-			/*
-			 * ... and insert our local queue at the head of
-			 * the global free queue.
-			 */
-			first = (vm_page_t) queue_first(&free_list[color]);
-			last = (vm_page_t) queue_last(&free_list[color]);
-			first_free = (vm_page_t) queue_first(&vm_page_queue_free[color]);
-			if (queue_empty(&vm_page_queue_free[color])) {
-				queue_last(&vm_page_queue_free[color]) =
-					(queue_entry_t) last;
-			} else {
-				queue_prev(&first_free->pageq) =
-					(queue_entry_t) last;
-			}
-			queue_first(&vm_page_queue_free[color]) =
-				(queue_entry_t) first;
-			queue_prev(&first->pageq) =
-				(queue_entry_t) &vm_page_queue_free[color];
-			queue_next(&last->pageq) =
-				(queue_entry_t) first_free;
-
-			/* next color */
-			color = inuse[color];
-		}
-		
-		vm_page_free_count += pg_count;
-		avail_free_count = vm_page_free_count;
-
-		if (vm_page_free_wanted_privileged > 0 &&
-		    avail_free_count > 0) {
-			if (avail_free_count < vm_page_free_wanted_privileged) {
-				need_priv_wakeup = avail_free_count;
-				vm_page_free_wanted_privileged -=
-					avail_free_count;
-				avail_free_count = 0;
-			} else {
-				need_priv_wakeup = vm_page_free_wanted_privileged;
-				vm_page_free_wanted_privileged = 0;
-				avail_free_count -=
-					vm_page_free_wanted_privileged;
+				color = mem->phys_page & vm_color_mask;
+				queue_enter_first(&vm_page_queue_free[color],
+						  mem,
+						  vm_page_t,
+						  pageq);
+				mem = nxt;
 			}
-		}
+			vm_page_free_count += pg_count;
+			avail_free_count = vm_page_free_count;
+
+			if (vm_page_free_wanted_privileged > 0 && avail_free_count > 0) {
 
-		if (vm_page_free_wanted > 0 &&
-		    avail_free_count > vm_page_free_reserved) {
-		        unsigned int  available_pages;
+				if (avail_free_count < vm_page_free_wanted_privileged) {
+					need_priv_wakeup = avail_free_count;
+					vm_page_free_wanted_privileged -= avail_free_count;
+					avail_free_count = 0;
+				} else {
+					need_priv_wakeup = vm_page_free_wanted_privileged;
+					vm_page_free_wanted_privileged = 0;
+					avail_free_count -= vm_page_free_wanted_privileged;
+				}
+			}
+			if (vm_page_free_wanted > 0 && avail_free_count > vm_page_free_reserved) {
+				unsigned int  available_pages;
 
-			available_pages = (avail_free_count -
-					   vm_page_free_reserved);
+				available_pages = avail_free_count - vm_page_free_reserved;
 
-			if (available_pages >= vm_page_free_wanted) {
-				need_wakeup = vm_page_free_wanted;
-			        vm_page_free_wanted = 0;
-			} else {
-				need_wakeup = available_pages;
-				vm_page_free_wanted -= available_pages;
+				if (available_pages >= vm_page_free_wanted) {
+					need_wakeup = vm_page_free_wanted;
+					vm_page_free_wanted = 0;
+				} else {
+					need_wakeup = available_pages;
+					vm_page_free_wanted -= available_pages;
+				}
 			}
-		}
-		lck_mtx_unlock(&vm_page_queue_free_lock);
+			lck_mtx_unlock(&vm_page_queue_free_lock);
 
-		if (need_priv_wakeup != 0) {
-			/*
-			 * There shouldn't be that many VM-privileged threads,
-			 * so let's wake them all up, even if we don't quite
-			 * have enough pages to satisfy them all.
-			 */
-			thread_wakeup((event_t)&vm_page_free_wanted_privileged);
-		}
-		if (need_wakeup != 0 && vm_page_free_wanted == 0) {
-			/*
-			 * We don't expect to have any more waiters
-			 * after this, so let's wake them all up at
-			 * once.
-			 */
-			thread_wakeup((event_t) &vm_page_free_count);
-		} else for (; need_wakeup != 0; need_wakeup--) {
-			/*
-			 * Wake up one waiter per page we just released.
-			 */
-			thread_wakeup_one((event_t) &vm_page_free_count);
-		}
-#if CONFIG_EMBEDDED
-		{
-		int percent_avail;
+			if (need_priv_wakeup != 0) {
+				/*
+				 * There shouldn't be that many VM-privileged threads,
+				 * so let's wake them all up, even if we don't quite
+				 * have enough pages to satisfy them all.
+				 */
+				thread_wakeup((event_t)&vm_page_free_wanted_privileged);
+			}
+			if (need_wakeup != 0 && vm_page_free_wanted == 0) {
+				/*
+				 * We don't expect to have any more waiters
+				 * after this, so let's wake them all up at
+				 * once.
+				 */
+				thread_wakeup((event_t) &vm_page_free_count);
+			} else for (; need_wakeup != 0; need_wakeup--) {
+				/*
+				 * Wake up one waiter per page we just released.
+				 */
+				thread_wakeup_one((event_t) &vm_page_free_count);
+			}
 
-		/*
-		 * Decide if we need to poke the memorystatus notification thread.
-		 */
-		percent_avail = 
-			(vm_page_active_count + vm_page_inactive_count + 
-			 vm_page_speculative_count + vm_page_free_count +
-			 (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count)  ) * 100 /
-			atop_64(max_mem);
-		if (percent_avail >= (kern_memorystatus_level + 5)) {
-			kern_memorystatus_level = percent_avail;
-			thread_wakeup((event_t)&kern_memorystatus_wakeup);
+			VM_CHECK_MEMORYSTATUS;
 		}
-		}
-#endif
 	}
 }
 
@@ -2550,9 +2967,13 @@ vm_page_free_list(
  *
  *	The page's object and the page queues must be locked.
  */
+
+
 void
 vm_page_wire(
-	register vm_page_t	mem)
+	register vm_page_t mem,
+	vm_tag_t           tag,
+	boolean_t	   check_memorystatus)
 {
 
 //	dbgLog(current_thread(), mem->offset, mem->object, 1);	/* (TEST/DEBUG) */
@@ -2575,10 +2996,26 @@ vm_page_wire(
 	lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
 #endif
 	if ( !VM_PAGE_WIRED(mem)) {
-		VM_PAGE_QUEUES_REMOVE(mem);
+
+		if (mem->pageout_queue) {
+			mem->pageout = FALSE;
+			vm_pageout_throttle_up(mem);
+		}
+		vm_page_queues_remove(mem);
 
 		if (mem->object) {
+
+			if (!mem->private && !mem->fictitious) 
+			{
+			    if (!mem->object->wired_page_count)
+			    {
+				assert(VM_KERN_MEMORY_NONE != tag);
+				mem->object->wire_tag = tag;
+				VM_OBJECT_WIRED(mem->object);
+			    }
+			}
 			mem->object->wired_page_count++;
+
 			assert(mem->object->resident_page_count >=
 			       mem->object->wired_page_count);
 			if (mem->object->purgable == VM_PURGABLE_VOLATILE) {
@@ -2586,6 +3023,25 @@ vm_page_wire(
 				OSAddAtomic(-1, &vm_page_purgeable_count);
 				OSAddAtomic(1, &vm_page_purgeable_wired_count);
 			}
+			if ((mem->object->purgable == VM_PURGABLE_VOLATILE ||
+			     mem->object->purgable == VM_PURGABLE_EMPTY) &&
+			    mem->object->vo_purgeable_owner != TASK_NULL) {
+				task_t owner;
+
+				owner = mem->object->vo_purgeable_owner;
+				/* less volatile bytes */
+				ledger_debit(owner->ledger,
+					     task_ledgers.purgeable_volatile,
+					     PAGE_SIZE);
+				/* more not-quite-volatile bytes */
+				ledger_credit(owner->ledger,
+					      task_ledgers.purgeable_nonvolatile,
+					      PAGE_SIZE);
+				/* more footprint */
+				ledger_credit(owner->ledger,
+					      task_ledgers.phys_footprint,
+					      PAGE_SIZE);
+			}
 			if (mem->object->all_reusable) {
 				/*
 				 * Wired pages are not counted as "re-usable"
@@ -2611,28 +3067,10 @@ vm_page_wire(
 		if (mem->gobbled)
 			vm_page_gobble_count--;
 		mem->gobbled = FALSE;
-		if (mem->zero_fill == TRUE) {
-			mem->zero_fill = FALSE;
-		        VM_ZF_COUNT_DECR();
-		}
-#if CONFIG_EMBEDDED
-		{
-		int 	percent_avail;
 
-		/*
-		 * Decide if we need to poke the memorystatus notification thread.
-		 */
-		percent_avail = 
-			(vm_page_active_count + vm_page_inactive_count + 
-			 vm_page_speculative_count + vm_page_free_count +
-			 (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
-			atop_64(max_mem);
-		if (percent_avail <= (kern_memorystatus_level - 5)) {
-			kern_memorystatus_level = percent_avail;
-			thread_wakeup((event_t)&kern_memorystatus_wakeup);
-		}
+		if (check_memorystatus == TRUE) {
+			VM_CHECK_MEMORYSTATUS;
 		}
-#endif
 		/* 
 		 * ENCRYPTED SWAP:
 		 * The page could be encrypted, but
@@ -2648,32 +3086,6 @@ vm_page_wire(
 	VM_PAGE_CHECK(mem);
 }
 
-/*
- *      vm_page_gobble:
- *
- *      Mark this page as consumed by the vm/ipc/xmm subsystems.
- *
- *      Called only for freshly vm_page_grab()ed pages - w/ nothing locked.
- */
-void
-vm_page_gobble(
-        register vm_page_t      mem)
-{
-        vm_page_lockspin_queues();
-        VM_PAGE_CHECK(mem);
-
-	assert(!mem->gobbled);
-	assert( !VM_PAGE_WIRED(mem));
-
-        if (!mem->gobbled && !VM_PAGE_WIRED(mem)) {
-                if (!mem->private && !mem->fictitious)
-                        vm_page_wire_count++;
-        }
-	vm_page_gobble_count++;
-        mem->gobbled = TRUE;
-        vm_page_unlock_queues();
-}
-
 /*
  *	vm_page_unwire:
  *
@@ -2692,16 +3104,21 @@ vm_page_unwire(
 
 	VM_PAGE_CHECK(mem);
 	assert(VM_PAGE_WIRED(mem));
+	assert(!mem->gobbled);
 	assert(mem->object != VM_OBJECT_NULL);
 #if DEBUG
 	vm_object_lock_assert_exclusive(mem->object);
 	lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
 #endif
 	if (--mem->wire_count == 0) {
-		assert(!mem->private && !mem->fictitious);
-		vm_page_wire_count--;
+		if (!mem->private && !mem->fictitious) {
+			vm_page_wire_count--;
+		}
 		assert(mem->object->wired_page_count > 0);
 		mem->object->wired_page_count--;
+		if (!mem->object->wired_page_count) {
+		    VM_OBJECT_UNWIRED(mem->object);
+		}
 		assert(mem->object->resident_page_count >=
 		       mem->object->wired_page_count);
 		if (mem->object->purgable == VM_PURGABLE_VOLATILE) {
@@ -2709,7 +3126,25 @@ vm_page_unwire(
 			assert(vm_page_purgeable_wired_count > 0);
 			OSAddAtomic(-1, &vm_page_purgeable_wired_count);
 		}
-		assert(!mem->laundry);
+		if ((mem->object->purgable == VM_PURGABLE_VOLATILE ||
+		     mem->object->purgable == VM_PURGABLE_EMPTY) &&
+		    mem->object->vo_purgeable_owner != TASK_NULL) {
+			task_t owner;
+
+			owner = mem->object->vo_purgeable_owner;
+			/* more volatile bytes */
+			ledger_credit(owner->ledger,
+				      task_ledgers.purgeable_volatile,
+				      PAGE_SIZE);
+			/* less not-quite-volatile bytes */
+			ledger_debit(owner->ledger,
+				     task_ledgers.purgeable_nonvolatile,
+				     PAGE_SIZE);
+			/* less footprint */
+			ledger_debit(owner->ledger,
+				     task_ledgers.phys_footprint,
+				     PAGE_SIZE);
+		}
 		assert(mem->object != kernel_object);
 		assert(mem->pageq.next == NULL && mem->pageq.prev == NULL);
 
@@ -2720,24 +3155,9 @@ vm_page_unwire(
 				vm_page_activate(mem);
 			}
 		}
-#if CONFIG_EMBEDDED
-		{
-		int 	percent_avail;
 
-		/*
-		 * Decide if we need to poke the memorystatus notification thread.
-		 */
-		percent_avail = 
-			(vm_page_active_count + vm_page_inactive_count + 
-			 vm_page_speculative_count + vm_page_free_count +
-			 (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
-			atop_64(max_mem);
-		if (percent_avail >= (kern_memorystatus_level + 5)) {
-			kern_memorystatus_level = percent_avail;
-			thread_wakeup((event_t)&kern_memorystatus_wakeup);
-		}
-		}
-#endif
+		VM_CHECK_MEMORYSTATUS;
+		
 	}
 	VM_PAGE_CHECK(mem);
 }
@@ -2780,9 +3200,7 @@ vm_page_deactivate_internal(
 	 *	inactive queue.  Note wired pages should not have
 	 *	their reference bit cleared.
 	 */
-
-	if (m->absent && !m->unusual)
-		panic("vm_page_deactivate: %p absent", m);
+	assert ( !(m->absent && !m->unusual));
 
 	if (m->gobbled) {		/* can this happen? */
 		assert( !VM_PAGE_WIRED(m));
@@ -2792,76 +3210,65 @@ vm_page_deactivate_internal(
 		vm_page_gobble_count--;
 		m->gobbled = FALSE;
 	}
-	if (m->private || (VM_PAGE_WIRED(m)))
+	/*
+	 * if this page is currently on the pageout queue, we can't do the
+	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
+	 * and we can't remove it manually since we would need the object lock
+	 * (which is not required here) to decrement the activity_in_progress
+	 * reference which is held on the object while the page is in the pageout queue...
+	 * just let the normal laundry processing proceed
+	 */
+	if (m->laundry || m->pageout_queue || m->private || m->fictitious || m->compressor || (VM_PAGE_WIRED(m)))
 		return;
 
-	if (!m->fictitious && !m->absent && clear_hw_reference == TRUE)
+	if (!m->absent && clear_hw_reference == TRUE)
 		pmap_clear_reference(m->phys_page);
 
 	m->reference = FALSE;
 	m->no_cache = FALSE;
 
 	if (!m->inactive) {
-		VM_PAGE_QUEUES_REMOVE(m);
-
-		assert(!m->laundry);
-		assert(m->pageq.next == NULL && m->pageq.prev == NULL);
+		vm_page_queues_remove(m);
 
-		if (!IP_VALID(memory_manager_default) &&
+		if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) &&
 		    m->dirty && m->object->internal &&
 		    (m->object->purgable == VM_PURGABLE_DENY ||
 		     m->object->purgable == VM_PURGABLE_NONVOLATILE ||
 		     m->object->purgable == VM_PURGABLE_VOLATILE)) {
+			vm_page_check_pageable_safe(m);
 			queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
 			m->throttled = TRUE;
 			vm_page_throttled_count++;
 		} else {
-			if (!m->fictitious && m->object->named && m->object->ref_count == 1) {
+			if (m->object->named && m->object->ref_count == 1) {
 			        vm_page_speculate(m, FALSE);
 #if DEVELOPMENT || DEBUG
 				vm_page_speculative_recreated++;
 #endif
-				return;
 			} else {
-				if (m->zero_fill) {
-					queue_enter(&vm_page_queue_zf, m, vm_page_t, pageq);
-					vm_zf_queue_count++;
-				} else {
-					queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq);
-				}
-			}
-			m->inactive = TRUE;
-			if (!m->fictitious) {
-			        vm_page_inactive_count++;
-				token_new_pagecount++;
+				vm_page_enqueue_inactive(m, FALSE);
 			}
 		}
 	}
 }
 
 /*
- *	vm_page_activate:
+ * vm_page_enqueue_cleaned
  *
- *	Put the specified page on the active list (if appropriate).
+ * Put the page on the cleaned queue, mark it cleaned, etc.
+ * Being on the cleaned queue (and having m->clean_queue set)
+ * does ** NOT ** guarantee that the page is clean!
  *
- *	The page queues must be locked.
+ * Call with the queues lock held.
  */
 
-void
-vm_page_activate(
-	register vm_page_t	m)
+void vm_page_enqueue_cleaned(vm_page_t m)
 {
-	VM_PAGE_CHECK(m);
-#ifdef	FIXME_4778297
-	assert(m->object != kernel_object);
-#endif
 	assert(m->phys_page != vm_page_guard_addr);
 #if DEBUG
 	lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
 #endif
-
-	if (m->absent && !m->unusual)
-		panic("vm_page_activate: %p absent", m);
+	assert( !(m->absent && !m->unusual));
 
 	if (m->gobbled) {
 		assert( !VM_PAGE_WIRED(m));
@@ -2870,37 +3277,106 @@ vm_page_activate(
 		vm_page_gobble_count--;
 		m->gobbled = FALSE;
 	}
-	if (m->private)
+	/*
+	 * if this page is currently on the pageout queue, we can't do the
+	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
+	 * and we can't remove it manually since we would need the object lock
+	 * (which is not required here) to decrement the activity_in_progress
+	 * reference which is held on the object while the page is in the pageout queue...
+	 * just let the normal laundry processing proceed
+	 */
+	if (m->laundry || m->clean_queue || m->pageout_queue || m->private || m->fictitious)
 		return;
 
-#if DEBUG
-	if (m->active)
-	        panic("vm_page_activate: already active");
-#endif
+	vm_page_queues_remove(m);
 
-	if (m->speculative) {
-		DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
-		DTRACE_VM2(pgfrec, int, 1, (uint64_t *), NULL);
+	vm_page_check_pageable_safe(m);
+	queue_enter(&vm_page_queue_cleaned, m, vm_page_t, pageq);
+	m->clean_queue = TRUE;
+	vm_page_cleaned_count++;
+
+	m->inactive = TRUE;
+	vm_page_inactive_count++;
+	if (m->object->internal) {
+		vm_page_pageable_internal_count++;
+	} else {
+		vm_page_pageable_external_count++;
 	}
 
-	VM_PAGE_QUEUES_REMOVE(m);
+	vm_pageout_enqueued_cleaned++;
+}
 
-	if ( !VM_PAGE_WIRED(m)) {
-		assert(!m->laundry);
-		assert(m->pageq.next == NULL && m->pageq.prev == NULL);
-		if (!IP_VALID(memory_manager_default) && 
-		    !m->fictitious && m->dirty && m->object->internal && 
-		    (m->object->purgable == VM_PURGABLE_DENY ||
-		     m->object->purgable == VM_PURGABLE_NONVOLATILE ||
-		     m->object->purgable == VM_PURGABLE_VOLATILE)) {
-			queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
-			m->throttled = TRUE;
+/*
+ *	vm_page_activate:
+ *
+ *	Put the specified page on the active list (if appropriate).
+ *
+ *	The page queues must be locked.
+ */
+
+void
+vm_page_activate(
+	register vm_page_t	m)
+{
+	VM_PAGE_CHECK(m);
+#ifdef	FIXME_4778297
+	assert(m->object != kernel_object);
+#endif
+	assert(m->phys_page != vm_page_guard_addr);
+#if DEBUG
+	lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+#endif
+	assert( !(m->absent && !m->unusual));
+
+	if (m->gobbled) {
+		assert( !VM_PAGE_WIRED(m));
+		if (!m->private && !m->fictitious)
+			vm_page_wire_count--;
+		vm_page_gobble_count--;
+		m->gobbled = FALSE;
+	}
+	/*
+	 * if this page is currently on the pageout queue, we can't do the
+	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
+	 * and we can't remove it manually since we would need the object lock
+	 * (which is not required here) to decrement the activity_in_progress
+	 * reference which is held on the object while the page is in the pageout queue...
+	 * just let the normal laundry processing proceed
+	 */
+	if (m->laundry || m->pageout_queue || m->private || m->fictitious || m->compressor)
+		return;
+
+#if DEBUG
+	if (m->active)
+	        panic("vm_page_activate: already active");
+#endif
+
+	if (m->speculative) {
+		DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
+		DTRACE_VM2(pgfrec, int, 1, (uint64_t *), NULL);
+	}
+	
+	vm_page_queues_remove(m);
+
+	if ( !VM_PAGE_WIRED(m)) {
+		vm_page_check_pageable_safe(m);
+		if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) && 
+		    m->dirty && m->object->internal && 
+		    (m->object->purgable == VM_PURGABLE_DENY ||
+		     m->object->purgable == VM_PURGABLE_NONVOLATILE ||
+		     m->object->purgable == VM_PURGABLE_VOLATILE)) {
+			queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
+			m->throttled = TRUE;
 			vm_page_throttled_count++;
 		} else {
 			queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
 			m->active = TRUE;
-			if (!m->fictitious)
-				vm_page_active_count++;
+			vm_page_active_count++;
+			if (m->object->internal) {
+				vm_page_pageable_internal_count++;
+			} else {
+				vm_page_pageable_external_count++;
+			}
 		}
 		m->reference = TRUE;
 		m->no_cache = FALSE;
@@ -2924,16 +3400,26 @@ vm_page_speculate(
         struct vm_speculative_age_q	*aq;
 
 	VM_PAGE_CHECK(m);
-	assert(m->object != kernel_object);
+	vm_page_check_pageable_safe(m);
+
 	assert(m->phys_page != vm_page_guard_addr);
 #if DEBUG
 	lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
 #endif
+	assert( !(m->absent && !m->unusual));
 
-	if (m->absent && !m->unusual)
-		panic("vm_page_speculate: %p absent", m);
+	/*
+	 * if this page is currently on the pageout queue, we can't do the
+	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
+	 * and we can't remove it manually since we would need the object lock
+	 * (which is not required here) to decrement the activity_in_progress
+	 * reference which is held on the object while the page is in the pageout queue...
+	 * just let the normal laundry processing proceed
+	 */
+	if (m->laundry || m->pageout_queue || m->private || m->fictitious || m->compressor)
+		return;
 
-	VM_PAGE_QUEUES_REMOVE(m);		
+	vm_page_queues_remove(m);
 
 	if ( !VM_PAGE_WIRED(m)) {
 	        mach_timespec_t		ts;
@@ -2954,8 +3440,8 @@ vm_page_speculate(
 		        /*
 			 * set the timer to begin a new group
 			 */
-			aq->age_ts.tv_sec = VM_PAGE_SPECULATIVE_Q_AGE_MS / 1000;
-			aq->age_ts.tv_nsec = (VM_PAGE_SPECULATIVE_Q_AGE_MS % 1000) * 1000 * NSEC_PER_USEC;
+			aq->age_ts.tv_sec = vm_page_speculative_q_age_ms / 1000;
+			aq->age_ts.tv_nsec = (vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
 
 			ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
 		} else {
@@ -2978,8 +3464,8 @@ vm_page_speculate(
 				if (!queue_empty(&aq->age_q))
 				        vm_page_speculate_ageit(aq);
 
-				aq->age_ts.tv_sec = VM_PAGE_SPECULATIVE_Q_AGE_MS / 1000;
-				aq->age_ts.tv_nsec = (VM_PAGE_SPECULATIVE_Q_AGE_MS % 1000) * 1000 * NSEC_PER_USEC;
+				aq->age_ts.tv_sec = vm_page_speculative_q_age_ms / 1000;
+				aq->age_ts.tv_nsec = (vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
 
 				ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
 			}
@@ -2987,8 +3473,15 @@ vm_page_speculate(
 		enqueue_tail(&aq->age_q, &m->pageq);
 		m->speculative = TRUE;
 		vm_page_speculative_count++;
+		if (m->object->internal) {
+			vm_page_pageable_internal_count++;
+		} else {
+			vm_page_pageable_external_count++;
+		}
 
 		if (new == TRUE) {
+			vm_object_lock_assert_exclusive(m->object);
+
 		        m->object->pages_created++;
 #if DEVELOPMENT || DEBUG
 			vm_page_speculative_created++;
@@ -3049,24 +3542,22 @@ vm_page_lru(
 #if DEBUG
 	lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
 #endif
-	if (m->active || m->reference)
-		return;
-
-	if (m->private || (VM_PAGE_WIRED(m)))
+	/*
+	 * if this page is currently on the pageout queue, we can't do the
+	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
+	 * and we can't remove it manually since we would need the object lock
+	 * (which is not required here) to decrement the activity_in_progress
+	 * reference which is held on the object while the page is in the pageout queue...
+	 * just let the normal laundry processing proceed
+	 */
+	if (m->laundry || m->pageout_queue || m->private || m->compressor || (VM_PAGE_WIRED(m)))
 		return;
 
 	m->no_cache = FALSE;
 
-	VM_PAGE_QUEUES_REMOVE(m);
-
-	assert(!m->laundry);
-	assert(m->pageq.next == NULL && m->pageq.prev == NULL);
+	vm_page_queues_remove(m);
 
-	queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq);
-	m->inactive = TRUE;
-
-        vm_page_inactive_count++;
-	token_new_pagecount++;
+	vm_page_enqueue_inactive(m, FALSE);
 }
 
 
@@ -3077,8 +3568,14 @@ vm_page_reactivate_all_throttled(void)
 	vm_page_t	first_active;
 	vm_page_t	m;
 	int		extra_active_count;
+	int		extra_internal_count, extra_external_count;
+
+	if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default))
+		return;
 
 	extra_active_count = 0;
+	extra_internal_count = 0;
+	extra_external_count = 0;
 	vm_page_lock_queues();
 	if (! queue_empty(&vm_page_queue_throttled)) {
 		/*
@@ -3091,9 +3588,14 @@ vm_page_reactivate_all_throttled(void)
 			assert(!m->inactive);
 			assert(!m->speculative);
 			assert(!VM_PAGE_WIRED(m));
-			if (!m->fictitious) {
-				extra_active_count++;
+
+			extra_active_count++;
+			if (m->object->internal) {
+				extra_internal_count++;
+			} else {
+				extra_external_count++;
 			}
+
 			m->throttled = FALSE;
 			m->active = TRUE;
 			VM_PAGE_CHECK(m);
@@ -3125,6 +3627,8 @@ vm_page_reactivate_all_throttled(void)
 		 * Adjust the global page counts.
 		 */
 		vm_page_active_count += extra_active_count;
+		vm_page_pageable_internal_count += extra_internal_count;
+		vm_page_pageable_external_count += extra_external_count;
 		vm_page_throttled_count = 0;
 	}
 	assert(vm_page_throttled_count == 0);
@@ -3171,6 +3675,7 @@ vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks)
 
 		queue_iterate(&lq->vpl_queue, m, vm_page_t, pageq) {
 			VM_PAGE_CHECK(m);
+			vm_page_check_pageable_safe(m);
 			assert(m->local);
 			assert(!m->active);
 			assert(!m->inactive);
@@ -3213,7 +3718,11 @@ vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks)
 		 * Adjust the global page counts.
 		 */
 		vm_page_active_count += lq->vpl_count;
+		vm_page_pageable_internal_count += lq->vpl_internal_count;
+		vm_page_pageable_external_count += lq->vpl_external_count;
 		lq->vpl_count = 0;
+		lq->vpl_internal_count = 0;
+		lq->vpl_external_count = 0;
 	}
 	assert(queue_empty(&lq->vpl_queue));
 
@@ -3228,18 +3737,26 @@ vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks)
  *
  *	Zero-fill a part of the page.
  */
+#define PMAP_ZERO_PART_PAGE_IMPLEMENTED
 void
 vm_page_part_zero_fill(
 	vm_page_t	m,
 	vm_offset_t	m_pa,
 	vm_size_t	len)
 {
-	vm_page_t	tmp;
 
+#if 0
+	/*
+	 * we don't hold the page queue lock
+	 * so this check isn't safe to make
+	 */
 	VM_PAGE_CHECK(m);
+#endif
+
 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
 	pmap_zero_part_page(m->phys_page, m_pa, len);
 #else
+	vm_page_t	tmp;
 	while (1) {
        		tmp = vm_page_grab();
 		if (tmp == VM_PAGE_NULL) {
@@ -3274,8 +3791,13 @@ vm_page_zero_fill(
         XPR(XPR_VM_PAGE,
                 "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n",
                 m->object, m->offset, m, 0,0);
-
+#if 0
+	/*
+	 * we don't hold the page queue lock
+	 * so this check isn't safe to make
+	 */
 	VM_PAGE_CHECK(m);
+#endif
 
 //	dbgTrace(0xAEAEAEAE, m->phys_page, 0);		/* (BRINGUP) */
 	pmap_zero_page(m->phys_page);
@@ -3295,9 +3817,14 @@ vm_page_part_copy(
 	vm_offset_t	dst_pa,
 	vm_size_t	len)
 {
+#if 0
+	/*
+	 * we don't hold the page queue lock
+	 * so this check isn't safe to make
+	 */
 	VM_PAGE_CHECK(src_m);
 	VM_PAGE_CHECK(dst_m);
-
+#endif
 	pmap_copy_part_page(src_m->phys_page, src_pa,
 			dst_m->phys_page, dst_pa, len);
 }
@@ -3325,9 +3852,15 @@ vm_page_copy(
         src_m->object, src_m->offset, 
 	dest_m->object, dest_m->offset,
 	0);
-
+#if 0
+	/*
+	 * we don't hold the page queue lock
+	 * so this check isn't safe to make
+	 */
 	VM_PAGE_CHECK(src_m);
 	VM_PAGE_CHECK(dest_m);
+#endif
+	vm_object_lock_assert_held(src_m->object);
 
 	/*
 	 * ENCRYPTED SWAP:
@@ -3351,6 +3884,17 @@ vm_page_copy(
 		vm_page_copy_cs_validations++;
 		vm_page_validate_cs(src_m);
 	}
+
+	if (vm_page_is_slideable(src_m)) {
+		boolean_t was_busy = src_m->busy;
+		src_m->busy = TRUE;
+		(void) vm_page_slide(src_m, 0);
+		assert(src_m->busy);
+		if (!was_busy) {
+			PAGE_WAKEUP_DONE(src_m);
+		}
+	}
+
 	/*
 	 * Propagate the cs_tainted bit to the copy page. Do not propagate
 	 * the cs_validated bit.
@@ -3359,7 +3903,8 @@ vm_page_copy(
 	if (dest_m->cs_tainted) {
 		vm_page_copy_cs_tainted++;
 	}
-
+	dest_m->slid = src_m->slid;
+	dest_m->error = src_m->error; /* sliding src_m might have failed... */
 	pmap_copy_page(src_m->phys_page, dest_m->phys_page);
 }
 
@@ -3371,7 +3916,7 @@ _vm_page_print(
 	printf("vm_page %p: \n", p);
 	printf("  pageq: next=%p prev=%p\n", p->pageq.next, p->pageq.prev);
 	printf("  listq: next=%p prev=%p\n", p->listq.next, p->listq.prev);
-	printf("  next=%p\n", p->next);
+	printf("  next=%p\n", VM_PAGE_UNPACK_PTR(p->next_m));
 	printf("  object=%p offset=0x%llx\n", p->object, p->offset);
 	printf("  wire_count=%u\n", p->wire_count);
 
@@ -3409,14 +3954,11 @@ _vm_page_print(
 	       (p->unusual ? "" : "!"),
 	       (p->encrypted ? "" : "!"),
 	       (p->encrypted_cleaning ? "" : "!"));
-	printf("  %slist_req_pending, %sdump_cleaning, %scs_validated, %scs_tainted, %sno_cache\n",
-	       (p->list_req_pending ? "" : "!"),
-	       (p->dump_cleaning ? "" : "!"),
+	printf("  %scs_validated, %scs_tainted, %scs_nx, %sno_cache\n",
 	       (p->cs_validated ? "" : "!"),
 	       (p->cs_tainted ? "" : "!"),
+	       (p->cs_nx ? "" : "!"),
 	       (p->no_cache ? "" : "!"));
-	printf("  %szero_fill\n",
-	       (p->zero_fill ? "" : "!"));
 
 	printf("phys_page=0x%x\n", p->phys_page);
 }
@@ -3440,7 +3982,7 @@ vm_page_verify_contiguous(
 		if (m->phys_page != prev_addr + 1) {
 			printf("m %p prev_addr 0x%lx, current addr 0x%x\n",
 			       m, (long)prev_addr, m->phys_page);
-			printf("pages %p page_count %d\n", pages, page_count);
+			printf("pages %p page_count %d npages %d\n", pages, page_count, npages);
 			panic("vm_page_verify_contiguous:  not contiguous!");
 		}
 		prev_addr = m->phys_page;
@@ -3458,6 +4000,7 @@ vm_page_verify_contiguous(
 /*
  *	Check the free lists for proper length etc.
  */
+static boolean_t vm_page_verify_this_free_list_enabled = FALSE;
 static unsigned int
 vm_page_verify_free_list(
 	queue_head_t	*vm_page_queue,
@@ -3470,6 +4013,9 @@ vm_page_verify_free_list(
 	vm_page_t	prev_m;
 	boolean_t	found_page;
 
+	if (! vm_page_verify_this_free_list_enabled)
+		return 0;
+
 	found_page = FALSE;
 	npages = 0;
 	prev_m = (vm_page_t) vm_page_queue;
@@ -3477,21 +4023,24 @@ vm_page_verify_free_list(
 		      m,
 		      vm_page_t,
 		      pageq) {
+
 		if (m == look_for_page) {
 			found_page = TRUE;
 		}
 		if ((vm_page_t) m->pageq.prev != prev_m)
 			panic("vm_page_verify_free_list(color=%u, npages=%u): page %p corrupted prev ptr %p instead of %p\n",
 			      color, npages, m, m->pageq.prev, prev_m);
-		if ( ! m->free )
-			panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not free\n",
-			      color, npages, m);
 		if ( ! m->busy )
 			panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not busy\n",
 			      color, npages, m);
-		if ( color != (unsigned int) -1 && (m->phys_page & vm_color_mask) != color)
-			panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u\n",
-			      color, npages, m, m->phys_page & vm_color_mask, color);
+		if (color != (unsigned int) -1) {
+			if ((m->phys_page & vm_color_mask) != color)
+				panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u\n",
+				      color, npages, m, m->phys_page & vm_color_mask, color);
+			if ( ! m->free )
+				panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not free\n",
+				      color, npages, m);
+		}
 		++npages;
 		prev_m = m;
 	}
@@ -3508,13 +4057,12 @@ vm_page_verify_free_list(
 				if (other_color == color)
 					continue;
 				vm_page_verify_free_list(&vm_page_queue_free[other_color],
-							other_color, look_for_page, FALSE);
+							 other_color, look_for_page, FALSE);
 			}
-			if (color != (unsigned int) -1) {
+			if (color == (unsigned int) -1) {
 				vm_page_verify_free_list(&vm_lopage_queue_free,
 							 (unsigned int) -1, look_for_page, FALSE);
 			}
-
 			panic("vm_page_verify_free_list(color=%u)\n", color);
 		}
 		if (!expect_page && found_page) {
@@ -3525,24 +4073,37 @@ vm_page_verify_free_list(
 	return npages;
 }
 
-static boolean_t vm_page_verify_free_lists_enabled = FALSE;
+static boolean_t vm_page_verify_all_free_lists_enabled = FALSE;
 static void
 vm_page_verify_free_lists( void )
 {
 	unsigned int	color, npages, nlopages;
+	boolean_t	toggle = TRUE;
 
-	if (! vm_page_verify_free_lists_enabled)
+	if (! vm_page_verify_all_free_lists_enabled)
 		return;
 
 	npages = 0;
 
 	lck_mtx_lock(&vm_page_queue_free_lock);
+	
+	if (vm_page_verify_this_free_list_enabled == TRUE) {
+		/*
+		 * This variable has been set globally for extra checking of
+		 * each free list Q. Since we didn't set it, we don't own it
+		 * and we shouldn't toggle it.
+		 */
+		toggle = FALSE;
+	}
+
+	if (toggle == TRUE) {
+		vm_page_verify_this_free_list_enabled = TRUE;
+	}
 
 	for( color = 0; color < vm_colors; color++ ) {
 		npages += vm_page_verify_free_list(&vm_page_queue_free[color],
-						color, VM_PAGE_NULL, FALSE);
+						   color, VM_PAGE_NULL, FALSE);
 	}
-
 	nlopages = vm_page_verify_free_list(&vm_lopage_queue_free,
 					    (unsigned int) -1,
 					    VM_PAGE_NULL, FALSE);
@@ -3550,6 +4111,11 @@ vm_page_verify_free_lists( void )
 		panic("vm_page_verify_free_lists:  "
 		      "npages %u free_count %d nlopages %u lo_free_count %u",
 		      npages, vm_page_free_count, nlopages, vm_lopage_free_count);
+
+	if (toggle == TRUE) {
+		vm_page_verify_this_free_list_enabled = FALSE;
+	}
+
 	lck_mtx_unlock(&vm_page_queue_free_lock);
 }
 
@@ -3558,6 +4124,9 @@ vm_page_queues_assert(
 	vm_page_t	mem,
 	int		val)
 {
+#if DEBUG
+	lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+#endif
 	if (mem->free + mem->active + mem->inactive + mem->speculative +
 	    mem->throttled + mem->pageout_queue > (val)) {
 		_vm_page_print(mem);
@@ -3568,11 +4137,17 @@ vm_page_queues_assert(
 		assert(!mem->inactive);
 		assert(!mem->speculative);
 		assert(!mem->throttled);
+		assert(!mem->pageout_queue);
 	}
 }
 #endif	/* MACH_ASSERT */
 
 
+
+
+
+extern boolean_t (* volatile consider_buffer_cache_collect)(int);
+
 /*
  *	CONTIGUOUS PAGE ALLOCATION
  *
@@ -3655,27 +4230,34 @@ vm_page_find_contiguous(
 	unsigned int	idx_last_contig_page_found = 0;
 	int		free_considered, free_available;
 	int		substitute_needed;
-	boolean_t	wrapped;
+	boolean_t	wrapped, zone_gc_called = FALSE;
 #if DEBUG
 	clock_sec_t	tv_start_sec, tv_end_sec;
 	clock_usec_t	tv_start_usec, tv_end_usec;
 #endif
-#if MACH_ASSERT
+
 	int		yielded = 0;
 	int		dumped_run = 0;
 	int		stolen_pages = 0;
-#endif
+	int		compressed_pages = 0;
+
 
 	if (contig_pages == 0)
 		return VM_PAGE_NULL;
 
+full_scan_again:
+
 #if MACH_ASSERT
 	vm_page_verify_free_lists();
 #endif
 #if DEBUG
 	clock_get_system_microtime(&tv_start_sec, &tv_start_usec);
 #endif
+	PAGE_REPLACEMENT_ALLOWED(TRUE);
+
 	vm_page_lock_queues();
+
+
 	lck_mtx_lock(&vm_page_queue_free_lock);
 
 	RESET_STATE_OF_RUN();
@@ -3718,17 +4300,16 @@ retry:
 			/* no more low pages... */
 			break;
 		}
-		if (!npages && ((m->phys_page & pnum_mask) != 0)) {
+		if (!npages & ((m->phys_page & pnum_mask) != 0)) {
 			/*
 			 * not aligned
 			 */
 			RESET_STATE_OF_RUN();
 
 		} else if (VM_PAGE_WIRED(m) || m->gobbled ||
-			   m->encrypted || m->encrypted_cleaning || m->cs_validated || m->cs_tainted ||
-			   m->error || m->absent || m->pageout_queue || m->laundry || m->wanted || m->precious ||
-			   m->cleaning || m->overwriting || m->restart || m->unusual || m->list_req_pending ||
-			   m->pageout) {
+			   m->encrypted_cleaning ||
+			   m->pageout_queue || m->laundry || m->wanted ||
+			   m->cleaning || m->overwriting || m->pageout) {
 			/*
 			 * page is in a transient state
 			 * or a state we don't want to deal
@@ -3737,9 +4318,10 @@ retry:
 			 */
 			RESET_STATE_OF_RUN();
 
-		} else if (!m->free && !m->active && !m->inactive && !m->speculative && !m->throttled) {
+		} else if (!m->free && !m->active && !m->inactive && !m->speculative && !m->throttled && !m->compressor) {
 			/*
 			 * page needs to be on one of our queues
+			 * or it needs to belong to the compressor pool
 			 * in order for it to be stable behind the
 			 * locks we hold at this point...
 			 * if not, don't consider it which
@@ -3790,7 +4372,7 @@ retry:
 				 * into a substitute page.
 				 */
 #if VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
-				if (m->pmapped || m->dirty) {
+				if (m->pmapped || m->dirty || m->precious) {
 					substitute_needed++;
 				}
 #else
@@ -3825,12 +4407,16 @@ retry:
 		}
 did_consider:
 		if (considered > MAX_CONSIDERED_BEFORE_YIELD && npages <= 1) {
-			
+
+			PAGE_REPLACEMENT_ALLOWED(FALSE);
+
 			lck_mtx_unlock(&vm_page_queue_free_lock);
 			vm_page_unlock_queues();
 
 			mutex_pause(0);
 
+			PAGE_REPLACEMENT_ALLOWED(TRUE);
+
 			vm_page_lock_queues();
 			lck_mtx_lock(&vm_page_queue_free_lock);
 
@@ -3841,9 +4427,9 @@ did_consider:
 			 */
 			free_available = vm_page_free_count - vm_page_free_reserved;
 			considered = 0;
-#if MACH_ASSERT
+
 			yielded++;
-#endif
+
 			goto retry;
 		}
 		considered++;
@@ -3902,8 +4488,7 @@ did_consider:
 
 				color = m1->phys_page & vm_color_mask;
 #if MACH_ASSERT
-				vm_page_verify_free_list(&vm_page_queue_free[color],
-							 color, m1, TRUE);
+				vm_page_verify_free_list(&vm_page_queue_free[color], color, m1, TRUE);
 #endif
 				queue_remove(&vm_page_queue_free[color],
 					     m1,
@@ -3912,8 +4497,7 @@ did_consider:
 				m1->pageq.next = NULL;
 				m1->pageq.prev = NULL;
 #if MACH_ASSERT
-				vm_page_verify_free_list(&vm_page_queue_free[color],
-							 color, VM_PAGE_NULL, FALSE);
+				vm_page_verify_free_list(&vm_page_queue_free[color], color, VM_PAGE_NULL, FALSE);
 #endif
 				/*
 				 * Clear the "free" bit so that this page
@@ -3926,12 +4510,6 @@ did_consider:
 				vm_page_free_count--;
 			}
 		}
-		/*
-		 * adjust global freelist counts
-		 */
-		if (vm_page_free_count < vm_page_free_count_minimum)
-			vm_page_free_count_minimum = vm_page_free_count;
-
 		if( flags & KMA_LOMEM)
 			vm_page_lomem_find_contiguous_last_idx = page_idx;
 		else 
@@ -3957,6 +4535,7 @@ did_consider:
 			m1 = &vm_pages[cur_idx--];
 
 			assert(!m1->free);
+
 			if (m1->object == VM_OBJECT_NULL) {
 				/*
 				 * page has already been removed from
@@ -3968,6 +4547,8 @@ did_consider:
 				assert(!m1->laundry);
 			} else {
 				vm_object_t object;
+				int refmod;
+				boolean_t disconnected, reusable;
 
 				if (abort_run == TRUE)
 					continue;
@@ -3984,9 +4565,9 @@ did_consider:
 				}
 				if (locked_object == VM_OBJECT_NULL || 
 				    (VM_PAGE_WIRED(m1) || m1->gobbled ||
-				     m1->encrypted || m1->encrypted_cleaning || m1->cs_validated || m1->cs_tainted ||
-				     m1->error || m1->absent || m1->pageout_queue || m1->laundry || m1->wanted || m1->precious ||
-				     m1->cleaning || m1->overwriting || m1->restart || m1->unusual || m1->list_req_pending || m1->busy)) {
+				     m1->encrypted_cleaning ||
+				     m1->pageout_queue || m1->laundry || m1->wanted ||
+				     m1->cleaning || m1->overwriting || m1->pageout || m1->busy)) {
 
 					if (locked_object) {
 						vm_object_unlock(locked_object);
@@ -3996,8 +4577,31 @@ did_consider:
 					abort_run = TRUE;
 					continue;
 				}
-				if (m1->pmapped || m1->dirty) {
-					int refmod;
+
+				disconnected = FALSE;
+				reusable = FALSE;
+
+				if ((m1->reusable ||
+				     m1->object->all_reusable) &&
+				    m1->inactive &&
+				    !m1->dirty &&
+				    !m1->reference) {
+					/* reusable page... */
+					refmod = pmap_disconnect(m1->phys_page);
+					disconnected = TRUE;
+					if (refmod == 0) {
+						/*
+						 * ... not reused: can steal
+						 * without relocating contents.
+						 */
+						reusable = TRUE;
+					}
+				}
+
+				if ((m1->pmapped &&
+				     ! reusable) ||
+				    m1->dirty ||
+				    m1->precious) {
 					vm_object_offset_t offset;
 
 					m2 = vm_page_grab();
@@ -4011,19 +4615,81 @@ did_consider:
 						abort_run = TRUE;
 						continue;
 					}
-					if (m1->pmapped)
-						refmod = pmap_disconnect(m1->phys_page);
-					else
-						refmod = 0;
-					vm_page_copy(m1, m2);
-		  
-					m2->reference = m1->reference;
-					m2->dirty     = m1->dirty;
+					if (! disconnected) {
+						if (m1->pmapped)
+							refmod = pmap_disconnect(m1->phys_page);
+						else
+							refmod = 0;
+					}
+
+					/* copy the page's contents */
+					pmap_copy_page(m1->phys_page, m2->phys_page);
+					/* copy the page's state */
+					assert(!VM_PAGE_WIRED(m1));
+					assert(!m1->free);
+					assert(!m1->pageout_queue);
+					assert(!m1->laundry);
+					m2->reference	= m1->reference;
+					assert(!m1->gobbled);
+					assert(!m1->private);
+					m2->no_cache	= m1->no_cache;
+					m2->xpmapped	= 0;
+					assert(!m1->busy);
+					assert(!m1->wanted);
+					assert(!m1->fictitious);
+					m2->pmapped	= m1->pmapped; /* should flush cache ? */
+					m2->wpmapped	= m1->wpmapped;
+					assert(!m1->pageout);
+					m2->absent	= m1->absent;
+					m2->error	= m1->error;
+					m2->dirty	= m1->dirty;
+					assert(!m1->cleaning);
+					m2->precious	= m1->precious;
+					m2->clustered	= m1->clustered;
+					assert(!m1->overwriting);
+					m2->restart	= m1->restart;
+					m2->unusual	= m1->unusual;
+					m2->encrypted	= m1->encrypted;
+					assert(!m1->encrypted_cleaning);
+					m2->cs_validated = m1->cs_validated;
+					m2->cs_tainted	= m1->cs_tainted;
+					m2->cs_nx	= m1->cs_nx;
+
+					/*
+					 * If m1 had really been reusable,
+					 * we would have just stolen it, so
+					 * let's not propagate it's "reusable"
+					 * bit and assert that m2 is not
+					 * marked as "reusable".
+					 */
+					// m2->reusable	= m1->reusable;
+					assert(!m2->reusable);
+
+					assert(!m1->lopage);
+					m2->slid	= m1->slid;
+					m2->compressor	= m1->compressor;
+
+					/*
+					 * page may need to be flushed if
+					 * it is marshalled into a UPL
+					 * that is going to be used by a device
+					 * that doesn't support coherency
+					 */
+					m2->written_by_kernel = TRUE;
+
+					/*
+					 * make sure we clear the ref/mod state
+					 * from the pmap layer... else we risk
+					 * inheriting state from the last time
+					 * this page was used...
+					 */
+					pmap_clear_refmod(m2->phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
 
 					if (refmod & VM_MEM_REFERENCED)
 						m2->reference = TRUE;
-					if (refmod & VM_MEM_MODIFIED)
-						m2->dirty = TRUE;
+					if (refmod & VM_MEM_MODIFIED) {
+						SET_PAGE_DIRTY(m2, TRUE);
+					}
 					offset = m1->offset;
 
 					/*
@@ -4036,25 +4702,31 @@ did_consider:
 					vm_page_free_prepare(m1);
 
 					/*
-					 * make sure we clear the ref/mod state
-					 * from the pmap layer... else we risk
-					 * inheriting state from the last time
-					 * this page was used...
-					 */
-					pmap_clear_refmod(m2->phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
-					/*
-					 * now put the substitute page on the object
+					 * now put the substitute page
+					 * on the object
 					 */
-					vm_page_insert_internal(m2, locked_object, offset, TRUE, TRUE);
+					vm_page_insert_internal(m2, locked_object, offset, VM_KERN_MEMORY_NONE, TRUE, TRUE, FALSE, FALSE, NULL);
+
+					if (m2->compressor) {
+						m2->pmapped = TRUE;
+						m2->wpmapped = TRUE;
 
-					if (m2->reference)
-						vm_page_activate(m2);
-					else
-						vm_page_deactivate(m2);
+						PMAP_ENTER(kernel_pmap, m2->offset, m2,
+							   VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE);
 
+						compressed_pages++;
+
+					} else {
+						if (m2->reference)
+							vm_page_activate(m2);
+						else
+							vm_page_deactivate(m2);
+					}
 					PAGE_WAKEUP_DONE(m2);
 
 				} else {
+					assert(!m1->compressor);
+
 					/*
 					 * completely cleans up the state
 					 * of the page so that it is ready
@@ -4064,9 +4736,9 @@ did_consider:
 					 */
 					vm_page_free_prepare(m1);
 				}
-#if MACH_ASSERT
+
 				stolen_pages++;
-#endif
+
 			}
 			m1->pageq.next = (queue_entry_t) m;
 			m1->pageq.prev = NULL;
@@ -4081,9 +4753,9 @@ did_consider:
 			if (m != VM_PAGE_NULL) {
 				vm_page_free_list(m, FALSE);
 			}
-#if MACH_ASSERT
+
 			dumped_run++;
-#endif
+
 			/*
 			 * want the index of the last
 			 * page in this run that was
@@ -4141,6 +4813,8 @@ did_consider:
  		assert(vm_page_verify_contiguous(m, npages));
 	}
 done_scanning:
+	PAGE_REPLACEMENT_ALLOWED(FALSE);
+
 	vm_page_unlock_queues();
 
 #if DEBUG
@@ -4157,16 +4831,33 @@ done_scanning:
 		tv_end_sec -= 1000000;
 	}
 	if (vm_page_find_contig_debug) {
-		printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds...  started at %d... scanned %d pages...  yielded %d times...  dumped run %d times... stole %d pages\n",
-	       __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
-	       (long)tv_end_sec, tv_end_usec, orig_last_idx,
-	       scanned, yielded, dumped_run, stolen_pages);
+		printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds...  started at %d...  scanned %d pages...  yielded %d times...  dumped run %d times... stole %d pages... stole %d compressed pages\n",
+		       __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
+		       (long)tv_end_sec, tv_end_usec, orig_last_idx,
+		       scanned, yielded, dumped_run, stolen_pages, compressed_pages);
 	}
 
 #endif
 #if MACH_ASSERT
 	vm_page_verify_free_lists();
 #endif
+	if (m == NULL && zone_gc_called == FALSE) {
+		printf("%s(num=%d,low=%d): found %d pages at 0x%llx...scanned %d pages...  yielded %d times...  dumped run %d times... stole %d pages... stole %d compressed pages... wired count is %d\n",
+		       __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
+		       scanned, yielded, dumped_run, stolen_pages, compressed_pages, vm_page_wire_count);
+
+		if (consider_buffer_cache_collect != NULL) {
+			(void)(*consider_buffer_cache_collect)(1);
+		}
+
+		consider_zone_gc(TRUE);
+
+		zone_gc_called = TRUE;
+
+		printf("vm_page_find_contiguous: zone_gc called... wired count is %d\n", vm_page_wire_count);
+		goto full_scan_again;
+	}
+
 	return m;
 }
 
@@ -4185,7 +4876,7 @@ cpm_allocate(
 	vm_page_t		pages;
 	unsigned int		npages;
 
-	if (size % page_size != 0)
+	if (size % PAGE_SIZE != 0)
 		return KERN_INVALID_ARGUMENT;
 
 	npages = (unsigned int) (size / PAGE_SIZE);
@@ -4207,28 +4898,12 @@ cpm_allocate(
 	 * determine need for wakeups
 	 */
 	if ((vm_page_free_count < vm_page_free_min) ||
-	    ((vm_page_free_count < vm_page_free_target) &&
-	     ((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_min)))
-		thread_wakeup((event_t) &vm_page_free_wanted);
+	     ((vm_page_free_count < vm_page_free_target) &&
+	      ((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_min)))
+	         thread_wakeup((event_t) &vm_page_free_wanted);
 		
-#if CONFIG_EMBEDDED
-	{
-	int			percent_avail;
-
-	/*
-	 * Decide if we need to poke the memorystatus notification thread.
-	 */
-	percent_avail = 
-		(vm_page_active_count + vm_page_inactive_count + 
-		 vm_page_speculative_count + vm_page_free_count +
-		 (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count)  ) * 100 /
-		atop_64(max_mem);
-	if (percent_avail <= (kern_memorystatus_level - 5)) {
-		kern_memorystatus_level = percent_avail;
-		thread_wakeup((event_t)&kern_memorystatus_wakeup);
-	}
-	}
-#endif
+	VM_CHECK_MEMORYSTATUS;
+	
 	/*
 	 *	The CPM pages should now be available and
 	 *	ordered by ascending physical address.
@@ -4238,7 +4913,167 @@ cpm_allocate(
 	*list = pages;
 	return KERN_SUCCESS;
 }
+
+
+unsigned int vm_max_delayed_work_limit = DEFAULT_DELAYED_WORK_LIMIT;
+
+/*
+ * when working on a 'run' of pages, it is necessary to hold 
+ * the vm_page_queue_lock (a hot global lock) for certain operations
+ * on the page... however, the majority of the work can be done
+ * while merely holding the object lock... in fact there are certain
+ * collections of pages that don't require any work brokered by the
+ * vm_page_queue_lock... to mitigate the time spent behind the global
+ * lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
+ * while doing all of the work that doesn't require the vm_page_queue_lock...
+ * then call vm_page_do_delayed_work to acquire the vm_page_queue_lock and do the
+ * necessary work for each page... we will grab the busy bit on the page
+ * if it's not already held so that vm_page_do_delayed_work can drop the object lock
+ * if it can't immediately take the vm_page_queue_lock in order to compete
+ * for the locks in the same order that vm_pageout_scan takes them.
+ * the operation names are modeled after the names of the routines that
+ * need to be called in order to make the changes very obvious in the
+ * original loop
+ */
+
+void
+vm_page_do_delayed_work(
+	vm_object_t 	object,
+	vm_tag_t        tag,
+	struct vm_page_delayed_work *dwp,
+	int		dw_count)
+{
+	int		j;
+	vm_page_t	m;
+        vm_page_t       local_free_q = VM_PAGE_NULL;
+
+	/*
+	 * pageout_scan takes the vm_page_lock_queues first
+	 * then tries for the object lock... to avoid what
+	 * is effectively a lock inversion, we'll go to the
+	 * trouble of taking them in that same order... otherwise
+	 * if this object contains the majority of the pages resident
+	 * in the UBC (or a small set of large objects actively being
+	 * worked on contain the majority of the pages), we could
+	 * cause the pageout_scan thread to 'starve' in its attempt
+	 * to find pages to move to the free queue, since it has to
+	 * successfully acquire the object lock of any candidate page
+	 * before it can steal/clean it.
+	 */
+	if (!vm_page_trylockspin_queues()) {
+		vm_object_unlock(object);
+
+		vm_page_lockspin_queues();
+
+		for (j = 0; ; j++) {
+			if (!vm_object_lock_avoid(object) &&
+			    _vm_object_lock_try(object))
+				break;
+			vm_page_unlock_queues();
+			mutex_pause(j);
+			vm_page_lockspin_queues();
+		}
+	}
+	for (j = 0; j < dw_count; j++, dwp++) {
+
+		m = dwp->dw_m;
+
+		if (dwp->dw_mask & DW_vm_pageout_throttle_up)
+			vm_pageout_throttle_up(m);
+#if CONFIG_PHANTOM_CACHE
+		if (dwp->dw_mask & DW_vm_phantom_cache_update)
+			vm_phantom_cache_update(m);
+#endif
+		if (dwp->dw_mask & DW_vm_page_wire)
+			vm_page_wire(m, tag, FALSE);
+		else if (dwp->dw_mask & DW_vm_page_unwire) {
+			boolean_t	queueit;
+
+			queueit = (dwp->dw_mask & (DW_vm_page_free | DW_vm_page_deactivate_internal)) ? FALSE : TRUE;
+
+			vm_page_unwire(m, queueit);
+		}
+		if (dwp->dw_mask & DW_vm_page_free) {
+			vm_page_free_prepare_queues(m);
+
+			assert(m->pageq.next == NULL && m->pageq.prev == NULL);
+			/*
+			 * Add this page to our list of reclaimed pages,
+			 * to be freed later.
+			 */
+			m->pageq.next = (queue_entry_t) local_free_q;
+			local_free_q = m;
+		} else {
+			if (dwp->dw_mask & DW_vm_page_deactivate_internal)
+				vm_page_deactivate_internal(m, FALSE);
+			else if (dwp->dw_mask & DW_vm_page_activate) {
+				if (m->active == FALSE) {
+					vm_page_activate(m);
+				}
+			}
+			else if (dwp->dw_mask & DW_vm_page_speculate)
+				vm_page_speculate(m, TRUE);
+			else if (dwp->dw_mask & DW_enqueue_cleaned) {
+				/*
+				 * if we didn't hold the object lock and did this,
+				 * we might disconnect the page, then someone might
+				 * soft fault it back in, then we would put it on the
+				 * cleaned queue, and so we would have a referenced (maybe even dirty)
+				 * page on that queue, which we don't want
+				 */
+				int refmod_state = pmap_disconnect(m->phys_page);
+
+				if ((refmod_state & VM_MEM_REFERENCED)) {
+					/*
+					 * this page has been touched since it got cleaned; let's activate it
+					 * if it hasn't already been
+					 */
+					vm_pageout_enqueued_cleaned++;
+					vm_pageout_cleaned_reactivated++;
+					vm_pageout_cleaned_commit_reactivated++;
+
+					if (m->active == FALSE)
+						vm_page_activate(m);
+				} else {
+					m->reference = FALSE;
+					vm_page_enqueue_cleaned(m);
+				}
+			}
+			else if (dwp->dw_mask & DW_vm_page_lru)
+				vm_page_lru(m);
+			else if (dwp->dw_mask & DW_VM_PAGE_QUEUES_REMOVE) {
+				if ( !m->pageout_queue)
+					vm_page_queues_remove(m);
+			}
+			if (dwp->dw_mask & DW_set_reference)
+				m->reference = TRUE;
+			else if (dwp->dw_mask & DW_clear_reference)
+				m->reference = FALSE;
+
+			if (dwp->dw_mask & DW_move_page) {
+				if ( !m->pageout_queue) {
+					vm_page_queues_remove(m);
+
+					assert(m->object != kernel_object);
+
+					vm_page_enqueue_inactive(m, FALSE);
+				}
+			}
+			if (dwp->dw_mask & DW_clear_busy)
+				m->busy = FALSE;
+
+			if (dwp->dw_mask & DW_PAGE_WAKEUP)
+				PAGE_WAKEUP(m);
+		}
+	}
+	vm_page_unlock_queues();
+
+	if (local_free_q)
+		vm_page_free_list(local_free_q, TRUE);
 	
+	VM_CHECK_MEMORYSTATUS;
+
+}
 
 kern_return_t
 vm_page_alloc_list(
@@ -4304,17 +5139,20 @@ vm_page_get_phys_page(vm_page_t page)
 
 static vm_page_t hibernate_gobble_queue;
 
-extern boolean_t (* volatile consider_buffer_cache_collect)(int);
-
 static int  hibernate_drain_pageout_queue(struct vm_pageout_queue *);
-static int  hibernate_flush_dirty_pages(void);
+static int  hibernate_flush_dirty_pages(int);
 static int  hibernate_flush_queue(queue_head_t *, int);
-static void hibernate_dirty_page(vm_page_t);
 
 void hibernate_flush_wait(void);
 void hibernate_mark_in_progress(void);
 void hibernate_clear_in_progress(void);
 
+void		hibernate_free_range(int, int);
+void		hibernate_hash_insert_page(vm_page_t);
+uint32_t	hibernate_mark_as_unneeded(addr64_t, addr64_t, hibernate_page_list_t *, hibernate_page_list_t *);
+void		hibernate_rebuild_vm_structs(void);
+uint32_t	hibernate_teardown_vm_structs(hibernate_page_list_t *, hibernate_page_list_t *);
+ppnum_t		hibernate_lookup_paddr(unsigned int);
 
 struct hibernate_statistics {
 	int hibernate_considered;
@@ -4323,6 +5161,7 @@ struct hibernate_statistics {
 	int hibernate_skipped_cleaning;
 	int hibernate_skipped_transient;
 	int hibernate_skipped_precious;
+	int hibernate_skipped_external;
 	int hibernate_queue_nolock;
 	int hibernate_queue_paused;
 	int hibernate_throttled;
@@ -4337,15 +5176,25 @@ struct hibernate_statistics {
 	int cd_found_cleaning;
 	int cd_found_laundry;
 	int cd_found_dirty;
+	int cd_found_xpmapped;
+	int cd_skipped_xpmapped;
 	int cd_local_free;
 	int cd_total_free;
 	int cd_vm_page_wire_count;
+	int cd_vm_struct_pages_unneeded;
 	int cd_pages;
 	int cd_discarded;
 	int cd_count_wire;
 } hibernate_stats;
 
 
+/*
+ * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
+ * so that we don't overrun the estimated image size, which would
+ * result in a hibernation failure.
+ */
+#define	HIBERNATE_XPMAPPED_LIMIT	40000
+
 
 static int
 hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
@@ -4354,7 +5203,7 @@ hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
 
 	vm_page_lock_queues();
 
-	while (q->pgo_laundry) {
+	while ( !queue_empty(&q->pgo_pending) ) {
 
 		q->pgo_draining = TRUE;
 
@@ -4364,8 +5213,12 @@ hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
 
 		wait_result = thread_block(THREAD_CONTINUE_NULL);
 
-		if (wait_result == THREAD_TIMED_OUT) {
+		if (wait_result == THREAD_TIMED_OUT && !queue_empty(&q->pgo_pending)) {
 			hibernate_stats.hibernate_drain_timeout++;
+			
+			if (q == &vm_pageout_queue_external)
+				return (0);
+			
 			return (1);
 		}
 		vm_page_lock_queues();
@@ -4377,46 +5230,8 @@ hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
 	return (0);
 }
 
-static void
-hibernate_dirty_page(vm_page_t m)
-{
-	vm_object_t	object = m->object;
-        struct		vm_pageout_queue *q;
-
-#if DEBUG
-	lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
-	vm_object_lock_assert_exclusive(object);
-
-	/*
-	 * protect the object from collapse - 
-	 * locking in the object's paging_offset.
-	 */
-	vm_object_paging_begin(object);
-
-	m->list_req_pending = TRUE;
-	m->cleaning = TRUE;
-	m->busy = TRUE;
-
-	if (object->internal == TRUE)
-	        q = &vm_pageout_queue_internal;
-	else
-	        q = &vm_pageout_queue_external;
-
-        /* 
-	 * pgo_laundry count is tied to the laundry bit
-	 */
-	m->laundry = TRUE;
-	q->pgo_laundry++;
 
-	m->pageout_queue = TRUE;
-	queue_enter(&q->pgo_pending, m, vm_page_t, pageq);
-	
-	if (q->pgo_idle == TRUE) {
-	        q->pgo_idle = FALSE;
-	        thread_wakeup((event_t) &q->pgo_pending);
-	}
-}
+boolean_t hibernate_skip_external = FALSE;
 
 static int
 hibernate_flush_queue(queue_head_t *q, int qcount)
@@ -4482,7 +5297,6 @@ hibernate_flush_queue(queue_head_t *q, int qcount)
 
 					goto reenter_pg_on_q;
 				}
-				vm_pageout_scan_wants_object = m_object;
 
 				vm_page_unlock_queues();
 				mutex_pause(try_failed_count++);
@@ -4492,10 +5306,9 @@ hibernate_flush_queue(queue_head_t *q, int qcount)
 				continue;
 			} else {
 				l_object = m_object;
-				vm_pageout_scan_wants_object = VM_OBJECT_NULL;
 			}
 		}
-		if ( !m_object->alive || m->encrypted_cleaning || m->cleaning || m->busy || m->absent || m->error) {
+		if ( !m_object->alive || m->encrypted_cleaning || m->cleaning || m->laundry || m->busy || m->absent || m->error) {
 			/*
 			 * page is not to be cleaned
 			 * put it back on the head of its queue
@@ -4507,9 +5320,6 @@ hibernate_flush_queue(queue_head_t *q, int qcount)
 
 			goto reenter_pg_on_q;
 		}
-		if ( !m_object->pager_initialized && m_object->pager_created)
-			goto reenter_pg_on_q;
-			
 		if (m_object->copy == VM_OBJECT_NULL) {
 			if (m_object->purgable == VM_PURGABLE_VOLATILE || m_object->purgable == VM_PURGABLE_EMPTY) {
 				/*
@@ -4522,8 +5332,9 @@ hibernate_flush_queue(queue_head_t *q, int qcount)
 		if ( !m->dirty && m->pmapped) {
 		        refmod_state = pmap_get_refmod(m->phys_page);
 
-			if ((refmod_state & VM_MEM_MODIFIED))
-				m->dirty = TRUE;
+			if ((refmod_state & VM_MEM_MODIFIED)) {
+				SET_PAGE_DIRTY(m, FALSE);
+			}
 		} else
 			refmod_state = 0;
 
@@ -4537,6 +5348,13 @@ hibernate_flush_queue(queue_head_t *q, int qcount)
 
 			goto reenter_pg_on_q;
 		}
+
+		if (hibernate_skip_external == TRUE && !m_object->internal) {
+
+			hibernate_stats.hibernate_skipped_external++;
+			
+			goto reenter_pg_on_q;
+		}
 		tq = NULL;
 
 		if (m_object->internal) {
@@ -4553,30 +5371,37 @@ hibernate_flush_queue(queue_head_t *q, int qcount)
 			        vm_object_unlock(l_object);
 				l_object = NULL;
 			}
-			vm_pageout_scan_wants_object = VM_OBJECT_NULL;
-
-			tq->pgo_throttled = TRUE;
 
 			while (retval == 0) {
 
+				tq->pgo_throttled = TRUE;
+
 				assert_wait_timeout((event_t) &tq->pgo_laundry, THREAD_INTERRUPTIBLE, 1000, 1000*NSEC_PER_USEC);
 
-			vm_page_unlock_queues();
+				vm_page_unlock_queues();
 
-			wait_result = thread_block(THREAD_CONTINUE_NULL);
+				wait_result = thread_block(THREAD_CONTINUE_NULL);
 
 				vm_page_lock_queues();
 
+				if (wait_result != THREAD_TIMED_OUT)
+					break;
+                                if (!VM_PAGE_Q_THROTTLED(tq))
+                                        break;
+
 				if (hibernate_should_abort())
 					retval = 1;
 
-				if (wait_result != THREAD_TIMED_OUT)
-					break;
-				
 				if (--wait_count == 0) {
-				hibernate_stats.hibernate_throttle_timeout++;
-				retval = 1;
-			}
+
+					hibernate_stats.hibernate_throttle_timeout++;
+
+					if (tq == eq) {
+						hibernate_skip_external = TRUE;
+						break;
+					}
+					retval = 1;
+				}
 			}
 			if (retval)
 				break;
@@ -4585,9 +5410,19 @@ hibernate_flush_queue(queue_head_t *q, int qcount)
 
 			continue;
 		}
-		VM_PAGE_QUEUES_REMOVE(m);
+		/*
+		 * we've already factored out pages in the laundry which
+		 * means this page can't be on the pageout queue so it's
+		 * safe to do the vm_page_queues_remove
+		 */
+                assert(!m->pageout_queue);
+
+		vm_page_queues_remove(m);
 
-		hibernate_dirty_page(m);
+		if (COMPRESSED_PAGER_IS_ACTIVE && m_object->internal == TRUE)
+			pmap_disconnect_options(m->phys_page, PMAP_OPTIONS_COMPRESSOR, NULL);
+
+		(void)vm_pageout_cluster(m, FALSE, FALSE, FALSE);
 
 		hibernate_stats.hibernate_found_dirty++;
 
@@ -4608,7 +5443,6 @@ next_pg:
 		vm_object_unlock(l_object);
 		l_object = NULL;
 	}
-    vm_pageout_scan_wants_object = VM_OBJECT_NULL;
 
 	vm_page_unlock_queues();
 
@@ -4619,13 +5453,11 @@ next_pg:
 
 
 static int
-hibernate_flush_dirty_pages()
+hibernate_flush_dirty_pages(int pass)
 {
 	struct vm_speculative_age_q	*aq;
 	uint32_t	i;
 
-	bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
-
 	if (vm_page_local_q) {
 		for (i = 0; i < vm_page_local_q_count; i++)
 			vm_page_reactivate_local(i, TRUE, FALSE);
@@ -4657,21 +5489,44 @@ hibernate_flush_dirty_pages()
 				return (1);
 		}
 	}
-	if (hibernate_flush_queue(&vm_page_queue_active, vm_page_active_count))
+	if (hibernate_flush_queue(&vm_page_queue_inactive, vm_page_inactive_count - vm_page_anonymous_count - vm_page_cleaned_count))
 		return (1);
-	if (hibernate_flush_queue(&vm_page_queue_inactive, vm_page_inactive_count - vm_zf_queue_count))
+	if (hibernate_flush_queue(&vm_page_queue_anonymous, vm_page_anonymous_count))
 		return (1);
-	if (hibernate_flush_queue(&vm_page_queue_zf, vm_zf_queue_count))
+	if (hibernate_flush_queue(&vm_page_queue_cleaned, vm_page_cleaned_count))
 		return (1);
-
 	if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal))
 		return (1);
-	return (hibernate_drain_pageout_queue(&vm_pageout_queue_external));
+
+	if (COMPRESSED_PAGER_IS_ACTIVE && pass == 1)
+		vm_compressor_record_warmup_start();
+
+	if (hibernate_flush_queue(&vm_page_queue_active, vm_page_active_count)) {
+		if (COMPRESSED_PAGER_IS_ACTIVE && pass == 1)
+			vm_compressor_record_warmup_end();
+		return (1);
+	}
+	if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
+		if (COMPRESSED_PAGER_IS_ACTIVE && pass == 1)
+			vm_compressor_record_warmup_end();
+		return (1);
+	}
+	if (COMPRESSED_PAGER_IS_ACTIVE && pass == 1)
+		vm_compressor_record_warmup_end();
+
+	if (hibernate_skip_external == FALSE && hibernate_drain_pageout_queue(&vm_pageout_queue_external))
+		return (1);
+
+	return (0);
 }
 
 
-extern void IOSleep(unsigned int);
-extern int sync_internal(void);
+void
+hibernate_reset_stats()
+{
+	bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
+}
+
 
 int
 hibernate_flush_memory()
@@ -4680,32 +5535,50 @@ hibernate_flush_memory()
 
 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_START, vm_page_free_count, 0, 0, 0, 0);
 
-	IOSleep(2 * 1000);
+	hibernate_cleaning_in_progress = TRUE;
+	hibernate_skip_external = FALSE;
+
+	if ((retval = hibernate_flush_dirty_pages(1)) == 0) {
+
+		if (COMPRESSED_PAGER_IS_ACTIVE) {
+
+				KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_START, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
 
-	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_NONE, vm_page_free_count, 0, 0, 0, 0);
+				vm_compressor_flush();
 
-	if ((retval = hibernate_flush_dirty_pages()) == 0) {
+				KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_END, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
+		}
 		if (consider_buffer_cache_collect != NULL) {
+			unsigned int orig_wire_count;
 
-			KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, vm_page_wire_count, 0, 0, 0, 0);
+			KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, 0, 0, 0, 0, 0);
+			orig_wire_count = vm_page_wire_count;
 			
-			sync_internal();
 			(void)(*consider_buffer_cache_collect)(1);
-			consider_zone_gc(1);
+			consider_zone_gc(TRUE);
+
+			HIBLOG("hibernate_flush_memory: buffer_cache_gc freed up %d wired pages\n", orig_wire_count - vm_page_wire_count);
 
-			KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_END, vm_page_wire_count, 0, 0, 0, 0);
+			KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_END, orig_wire_count - vm_page_wire_count, 0, 0, 0, 0);
 		}
 	}
+	hibernate_cleaning_in_progress = FALSE;
+
 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_END, vm_page_free_count, hibernate_stats.hibernate_found_dirty, retval, 0, 0);
 
+	if (retval && COMPRESSED_PAGER_IS_ACTIVE)
+		HIBLOG("hibernate_flush_memory() failed to finish - vm_page_compressor_count(%d)\n", VM_PAGE_COMPRESSOR_COUNT);
+
+
     HIBPRINT("hibernate_flush_memory() considered(%d) reentered_on_q(%d) found_dirty(%d)\n",
                 hibernate_stats.hibernate_considered,
                 hibernate_stats.hibernate_reentered_on_q,
                 hibernate_stats.hibernate_found_dirty);
-    HIBPRINT("   skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) queue_nolock(%d)\n",
+    HIBPRINT("   skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) skipped_external(%d) queue_nolock(%d)\n",
                 hibernate_stats.hibernate_skipped_cleaning,
                 hibernate_stats.hibernate_skipped_transient,
                 hibernate_stats.hibernate_skipped_precious,
+                hibernate_stats.hibernate_skipped_external,
                 hibernate_stats.hibernate_queue_nolock);
     HIBPRINT("   queue_paused(%d) throttled(%d) throttle_timeout(%d) drained(%d) drain_timeout(%d)\n",
                 hibernate_stats.hibernate_queue_paused,
@@ -4717,6 +5590,7 @@ hibernate_flush_memory()
 	return (retval);
 }
 
+
 static void
 hibernate_page_list_zero(hibernate_page_list_t *list)
 {
@@ -4738,38 +5612,6 @@ hibernate_page_list_zero(hibernate_page_list_t *list)
     }
 }
 
-void
-hibernate_gobble_pages(uint32_t gobble_count, uint32_t free_page_time)
-{
-    uint32_t i;
-    vm_page_t m;
-    uint64_t start, end, timeout, nsec;
-    clock_interval_to_deadline(free_page_time, 1000 * 1000 /*ms*/, &timeout);
-    clock_get_uptime(&start);
-
-    for (i = 0; i < gobble_count; i++)
-    {
-	while (VM_PAGE_NULL == (m = vm_page_grab()))
-	{
-	    clock_get_uptime(&end);
-	    if (end >= timeout)
-		break;
-	    VM_PAGE_WAIT();
-	}
-	if (!m)
-	    break;
-	m->busy = FALSE;
-	vm_page_gobble(m);
-
-	m->pageq.next = (queue_entry_t) hibernate_gobble_queue;
-	hibernate_gobble_queue = m;
-    }
-
-    clock_get_uptime(&end);
-    absolutetime_to_nanoseconds(end - start, &nsec);
-    HIBLOG("Gobbled %d pages, time: %qd ms\n", i, nsec / 1000000ULL);
-}
-
 void
 hibernate_free_gobble_pages(void)
 {
@@ -4791,7 +5633,7 @@ hibernate_free_gobble_pages(void)
 }
 
 static boolean_t 
-hibernate_consider_discard(vm_page_t m)
+hibernate_consider_discard(vm_page_t m, boolean_t preflight)
 {
     vm_object_t object = NULL;
     int                  refmod_state;
@@ -4803,39 +5645,39 @@ hibernate_consider_discard(vm_page_t m)
             panic("hibernate_consider_discard: private");
 
         if (!vm_object_lock_try(m->object)) {
-	    hibernate_stats.cd_lock_failed++;
+	    if (!preflight) hibernate_stats.cd_lock_failed++;
             break;
 	}
         object = m->object;
 
 	if (VM_PAGE_WIRED(m)) {
-	    hibernate_stats.cd_found_wired++;
+	    if (!preflight) hibernate_stats.cd_found_wired++;
             break;
 	}
         if (m->precious) {
-	    hibernate_stats.cd_found_precious++;
+	    if (!preflight) hibernate_stats.cd_found_precious++;
             break;
 	}
         if (m->busy || !object->alive) {
            /*
             *	Somebody is playing with this page.
             */
-   	    hibernate_stats.cd_found_busy++;
-	    break;
+	    if (!preflight) hibernate_stats.cd_found_busy++;
+            break;
 	}
         if (m->absent || m->unusual || m->error) {
            /*
             * If it's unusual in anyway, ignore it
             */
-	    hibernate_stats.cd_found_unusual++;
+	    if (!preflight) hibernate_stats.cd_found_unusual++;
             break;
 	}
         if (m->cleaning) {
-	    hibernate_stats.cd_found_cleaning++;
+	    if (!preflight) hibernate_stats.cd_found_cleaning++;
             break;
 	}
-	if (m->laundry || m->list_req_pending) {
-	    hibernate_stats.cd_found_laundry++;
+	if (m->laundry) {
+	    if (!preflight) hibernate_stats.cd_found_laundry++;
             break;
 	}
         if (!m->dirty)
@@ -4844,8 +5686,9 @@ hibernate_consider_discard(vm_page_t m)
         
             if (refmod_state & VM_MEM_REFERENCED)
                 m->reference = TRUE;
-            if (refmod_state & VM_MEM_MODIFIED)
-                m->dirty = TRUE;
+            if (refmod_state & VM_MEM_MODIFIED) {
+              	SET_PAGE_DIRTY(m, FALSE);
+	    }
         }
    
         /*
@@ -4855,8 +5698,20 @@ hibernate_consider_discard(vm_page_t m)
 		    || (VM_PURGABLE_VOLATILE == object->purgable)
 		    || (VM_PURGABLE_EMPTY    == object->purgable);
 
-	if (discard == FALSE)
-	    hibernate_stats.cd_found_dirty++;
+
+        if (discard == FALSE) {
+		if (!preflight)
+			hibernate_stats.cd_found_dirty++;
+        } else if (m->xpmapped && m->reference && !object->internal) {
+		if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) {
+			if (!preflight)
+				hibernate_stats.cd_found_xpmapped++;
+			discard = FALSE;
+		} else {
+			if (!preflight)
+				hibernate_stats.cd_skipped_xpmapped++;
+		}
+        }
     }
     while (FALSE);
 
@@ -4876,6 +5731,15 @@ hibernate_discard_page(vm_page_t m)
         */
         return;
 
+#if MACH_ASSERT || DEBUG
+    vm_object_t object = m->object;
+    if (!vm_object_lock_try(m->object))
+	panic("hibernate_discard_page(%p) !vm_object_lock_try", m);
+#else
+    /* No need to lock page queue for token delete, hibernate_vm_unlock() 
+       makes sure these locks are uncontended before sleep */
+#endif /* MACH_ASSERT || DEBUG */
+
     if (m->pmapped == TRUE) 
     {
         __unused int refmod_state = pmap_disconnect(m->phys_page);
@@ -4894,13 +5758,66 @@ hibernate_discard_page(vm_page_t m)
         assert((m->object->objq.next != NULL) && (m->object->objq.prev != NULL));
         purgeable_q_t old_queue = vm_purgeable_object_remove(m->object);
         assert(old_queue);
-        /* No need to lock page queue for token delete, hibernate_vm_unlock() 
-           makes sure these locks are uncontended before sleep */
-        vm_purgeable_token_delete_first(old_queue);
+	if (m->object->purgeable_when_ripe) {
+		vm_purgeable_token_delete_first(old_queue);
+	}
         m->object->purgable = VM_PURGABLE_EMPTY;
+
+	/*
+	 * Purgeable ledgers:  pages of VOLATILE and EMPTY objects are
+	 * accounted in the "volatile" ledger, so no change here.
+	 * We have to update vm_page_purgeable_count, though, since we're
+	 * effectively purging this object.
+	 */
+	unsigned int delta;
+	assert(m->object->resident_page_count >= m->object->wired_page_count);
+	delta = (m->object->resident_page_count - m->object->wired_page_count);
+	assert(vm_page_purgeable_count >= delta);
+	assert(delta > 0);
+	OSAddAtomic(-delta, (SInt32 *)&vm_page_purgeable_count);
     }
 	
     vm_page_free(m);
+
+#if MACH_ASSERT || DEBUG
+    vm_object_unlock(object);
+#endif	/* MACH_ASSERT || DEBUG */
+}
+
+/*
+ Grab locks for hibernate_page_list_setall()
+*/
+void
+hibernate_vm_lock_queues(void)
+{
+    vm_object_lock(compressor_object);
+    vm_page_lock_queues();
+    lck_mtx_lock(&vm_page_queue_free_lock);
+
+    if (vm_page_local_q) {
+	uint32_t  i;
+	for (i = 0; i < vm_page_local_q_count; i++) {
+	    struct vpl	*lq;
+	    lq = &vm_page_local_q[i].vpl_un.vpl;
+	    VPL_LOCK(&lq->vpl_lock);
+	}
+    }
+}
+
+void
+hibernate_vm_unlock_queues(void)
+{
+    if (vm_page_local_q) {
+	uint32_t  i;
+	for (i = 0; i < vm_page_local_q_count; i++) {
+	    struct vpl	*lq;
+	    lq = &vm_page_local_q[i].vpl_un.vpl;
+	    VPL_UNLOCK(&lq->vpl_lock);
+	}
+    }
+    lck_mtx_unlock(&vm_page_queue_free_lock);
+    vm_page_unlock_queues();
+    vm_object_unlock(compressor_object);
 }
 
 /*
@@ -4912,52 +5829,94 @@ hibernate_discard_page(vm_page_t m)
 void
 hibernate_page_list_setall(hibernate_page_list_t * page_list,
 			   hibernate_page_list_t * page_list_wired,
+			   hibernate_page_list_t * page_list_pal,
+			   boolean_t preflight, 
+			   boolean_t will_discard,
 			   uint32_t * pagesOut)
 {
     uint64_t start, end, nsec;
     vm_page_t m;
+    vm_page_t next;
     uint32_t pages = page_list->page_count;
-    uint32_t count_zf = 0, count_throttled = 0;
-    uint32_t count_inactive = 0, count_active = 0, count_speculative = 0;
+    uint32_t count_anonymous = 0, count_throttled = 0, count_compressor = 0;
+    uint32_t count_inactive = 0, count_active = 0, count_speculative = 0, count_cleaned = 0;
     uint32_t count_wire = pages;
     uint32_t count_discard_active    = 0;
     uint32_t count_discard_inactive  = 0;
+    uint32_t count_discard_cleaned   = 0;
     uint32_t count_discard_purgeable = 0;
     uint32_t count_discard_speculative = 0;
+    uint32_t count_discard_vm_struct_pages = 0;
     uint32_t i;
     uint32_t             bank;
     hibernate_bitmap_t * bitmap;
     hibernate_bitmap_t * bitmap_wired;
+    boolean_t			 discard_all;
+    boolean_t            discard;
+
+    HIBLOG("hibernate_page_list_setall(preflight %d) start\n", preflight);
+
+    if (preflight) {
+        page_list       = NULL;
+        page_list_wired = NULL;
+        page_list_pal   = NULL;
+		discard_all     = FALSE;
+    } else {
+		discard_all     = will_discard;
+    }
 
+#if MACH_ASSERT || DEBUG
+    if (!preflight)
+    {
+        vm_page_lock_queues();
+	if (vm_page_local_q) {
+	    for (i = 0; i < vm_page_local_q_count; i++) {
+		struct vpl	*lq;
+		lq = &vm_page_local_q[i].vpl_un.vpl;
+		VPL_LOCK(&lq->vpl_lock);
+	    }
+	}
+    }
+#endif  /* MACH_ASSERT || DEBUG */
 
-    HIBLOG("hibernate_page_list_setall start %p, %p\n", page_list, page_list_wired);
 
     KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0);
 
     clock_get_uptime(&start);
 
-    hibernate_page_list_zero(page_list);
-    hibernate_page_list_zero(page_list_wired);
-
-    hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count;
-    hibernate_stats.cd_pages = pages;
+    if (!preflight) {
+	hibernate_page_list_zero(page_list);
+	hibernate_page_list_zero(page_list_wired);
+	hibernate_page_list_zero(page_list_pal);
+    
+	hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count;
+	hibernate_stats.cd_pages = pages;
+    }
 
     if (vm_page_local_q) {
 	    for (i = 0; i < vm_page_local_q_count; i++)
-		    vm_page_reactivate_local(i, TRUE, TRUE);
+		    vm_page_reactivate_local(i, TRUE, !preflight);
+    }
+
+    if (preflight) {
+	vm_object_lock(compressor_object);
+	vm_page_lock_queues();
+	lck_mtx_lock(&vm_page_queue_free_lock);
     }
 
     m = (vm_page_t) hibernate_gobble_queue;
-    while(m)
+    while (m)
     {
 	pages--;
 	count_wire--;
-	hibernate_page_bitset(page_list,       TRUE, m->phys_page);
-	hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+	if (!preflight) {
+	    hibernate_page_bitset(page_list,       TRUE, m->phys_page);
+	    hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+	}
 	m = (vm_page_t) m->pageq.next;
     }
-#ifndef PPC
-    for( i = 0; i < real_ncpus; i++ )
+
+    if (!preflight) for( i = 0; i < real_ncpus; i++ )
     {
 	if (cpu_data_ptr[i] && cpu_data_ptr[i]->cpu_processor)
 	{
@@ -4973,7 +5932,7 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list,
 	    }
 	}
     }
-#endif
+
     for( i = 0; i < vm_colors; i++ )
     {
 	queue_iterate(&vm_page_queue_free[i],
@@ -4983,10 +5942,12 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list,
 	{
 	    pages--;
 	    count_wire--;
-	    hibernate_page_bitset(page_list,       TRUE, m->phys_page);
-	    hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
-
-	    hibernate_stats.cd_total_free++;
+	    if (!preflight) {
+		hibernate_page_bitset(page_list,       TRUE, m->phys_page);
+		hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+    
+		hibernate_stats.cd_total_free++;
+	    }
 	}
     }
 
@@ -4997,136 +5958,229 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list,
     {
 	pages--;
 	count_wire--;
-	hibernate_page_bitset(page_list,       TRUE, m->phys_page);
-	hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
-
-	hibernate_stats.cd_total_free++;
+	if (!preflight) {
+	    hibernate_page_bitset(page_list,       TRUE, m->phys_page);
+	    hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+    
+	    hibernate_stats.cd_total_free++;
+	}
     }
 
-    queue_iterate( &vm_page_queue_throttled,
-                    m,
-                    vm_page_t,
-                    pageq )
+    m = (vm_page_t) queue_first(&vm_page_queue_throttled);
+    while (m && !queue_end(&vm_page_queue_throttled, (queue_entry_t)m))
     {
+        next = (vm_page_t) m->pageq.next;
+	discard = FALSE;
         if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
-         && hibernate_consider_discard(m))
+         && hibernate_consider_discard(m, preflight))
         {
-            hibernate_page_bitset(page_list, TRUE, m->phys_page);
+            if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
             count_discard_inactive++;
+            discard = discard_all;
         }
         else
             count_throttled++;
 	count_wire--;
-	hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+	if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+
+        if (discard) hibernate_discard_page(m);
+	m = next;
     }
 
-    queue_iterate( &vm_page_queue_zf,
-                    m,
-                    vm_page_t,
-                   pageq )
+    m = (vm_page_t) queue_first(&vm_page_queue_anonymous);
+    while (m && !queue_end(&vm_page_queue_anonymous, (queue_entry_t)m))
     {
+        next = (vm_page_t) m->pageq.next;
+	discard = FALSE;
         if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
-         && hibernate_consider_discard(m))
+         && hibernate_consider_discard(m, preflight))
         {
-            hibernate_page_bitset(page_list, TRUE, m->phys_page);
+            if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
 	    if (m->dirty)
 		count_discard_purgeable++;
 	    else
 		count_discard_inactive++;
+            discard = discard_all;
         }
         else
-            count_zf++;
+            count_anonymous++;
 	count_wire--;
-	hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+	if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+        if (discard)    hibernate_discard_page(m);
+	m = next;
     }
 
-    queue_iterate( &vm_page_queue_inactive,
-                    m,
-                    vm_page_t,
-                    pageq )
+    m = (vm_page_t) queue_first(&vm_page_queue_cleaned);
+    while (m && !queue_end(&vm_page_queue_cleaned, (queue_entry_t)m))
     {
+        next = (vm_page_t) m->pageq.next;
+	discard = FALSE;
         if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
-         && hibernate_consider_discard(m))
+         && hibernate_consider_discard(m, preflight))
         {
-            hibernate_page_bitset(page_list, TRUE, m->phys_page);
+            if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
 	    if (m->dirty)
 		count_discard_purgeable++;
 	    else
-		count_discard_inactive++;
+		count_discard_cleaned++;
+            discard = discard_all;
         }
         else
-            count_inactive++;
+            count_cleaned++;
 	count_wire--;
-	hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
-    }
-
-    for( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ )
-    {
-       queue_iterate(&vm_page_queue_speculative[i].age_q,
-                     m,
-                     vm_page_t,
-                     pageq)
-       {
-           if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
-            && hibernate_consider_discard(m))
-           {
-               hibernate_page_bitset(page_list, TRUE, m->phys_page);
-               count_discard_speculative++;
-           }
-           else
-               count_speculative++;
-           count_wire--;
-           hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
-       }
+	if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+        if (discard)    hibernate_discard_page(m);
+	m = next;
     }
 
-    queue_iterate( &vm_page_queue_active,
-                    m,
-                    vm_page_t,
-                    pageq )
+    m = (vm_page_t) queue_first(&vm_page_queue_active);
+    while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m))
     {
+        next = (vm_page_t) m->pageq.next;
+	discard = FALSE;
         if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) 
-         && hibernate_consider_discard(m))
+         && hibernate_consider_discard(m, preflight))
         {
-            hibernate_page_bitset(page_list, TRUE, m->phys_page);
+            if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
 	    if (m->dirty)
 		count_discard_purgeable++;
 	    else
 		count_discard_active++;
+            discard = discard_all;
         }
         else
             count_active++;
 	count_wire--;
-	hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+	if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+        if (discard)    hibernate_discard_page(m);
+	m = next;
+    }
+
+    m = (vm_page_t) queue_first(&vm_page_queue_inactive);
+    while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m))
+    {
+        next = (vm_page_t) m->pageq.next;
+	discard = FALSE;
+        if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
+         && hibernate_consider_discard(m, preflight))
+        {
+            if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
+	    if (m->dirty)
+		count_discard_purgeable++;
+	    else
+		count_discard_inactive++;
+            discard = discard_all;
+        }
+        else
+            count_inactive++;
+	count_wire--;
+	if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+        if (discard)    hibernate_discard_page(m);
+	m = next;
     }
 
-    // pull wired from hibernate_bitmap
+    for( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ )
+    {
+	m = (vm_page_t) queue_first(&vm_page_queue_speculative[i].age_q);
+	while (m && !queue_end(&vm_page_queue_speculative[i].age_q, (queue_entry_t)m))
+	{
+	    next = (vm_page_t) m->pageq.next;
+	    discard = FALSE;
+	    if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) 
+	     && hibernate_consider_discard(m, preflight))
+	    {
+		if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
+		count_discard_speculative++;
+		discard = discard_all;
+	    }
+	    else
+		count_speculative++;
+	    count_wire--;
+	    if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+	    if (discard)    hibernate_discard_page(m);
+	    m = next;
+	}
+    }
 
-    bitmap = &page_list->bank_bitmap[0];
-    bitmap_wired = &page_list_wired->bank_bitmap[0];
-    for (bank = 0; bank < page_list->bank_count; bank++)
+    queue_iterate(&compressor_object->memq, m, vm_page_t, listq)
     {
-	for (i = 0; i < bitmap->bitmapwords; i++)
-	    bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
-	bitmap       = (hibernate_bitmap_t *) &bitmap->bitmap      [bitmap->bitmapwords];
-	bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
+        count_compressor++;
+	count_wire--;
+	if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+    }
+
+    if (preflight == FALSE && discard_all == TRUE) {
+	    KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_START, 0, 0, 0, 0, 0);
+
+	    HIBLOG("hibernate_teardown started\n");
+	    count_discard_vm_struct_pages = hibernate_teardown_vm_structs(page_list, page_list_wired);
+	    HIBLOG("hibernate_teardown completed - discarded %d\n", count_discard_vm_struct_pages);
+
+	    pages -= count_discard_vm_struct_pages;
+	    count_wire -= count_discard_vm_struct_pages;
+
+	    hibernate_stats.cd_vm_struct_pages_unneeded = count_discard_vm_struct_pages;
+
+	    KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END, 0, 0, 0, 0, 0);
+    }
+
+    if (!preflight) {
+	// pull wired from hibernate_bitmap
+	bitmap = &page_list->bank_bitmap[0];
+	bitmap_wired = &page_list_wired->bank_bitmap[0];
+	for (bank = 0; bank < page_list->bank_count; bank++)
+	{
+	    for (i = 0; i < bitmap->bitmapwords; i++)
+		bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
+	    bitmap       = (hibernate_bitmap_t *) &bitmap->bitmap      [bitmap->bitmapwords];
+	    bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
+	}
     }
 
     // machine dependent adjustments
-    hibernate_page_list_setall_machine(page_list, page_list_wired, &pages);
+    hibernate_page_list_setall_machine(page_list, page_list_wired, preflight, &pages);
 
-    hibernate_stats.cd_count_wire = count_wire;
-    hibernate_stats.cd_discarded = count_discard_active + count_discard_inactive + count_discard_purgeable + count_discard_speculative;
+    if (!preflight) {
+	hibernate_stats.cd_count_wire = count_wire;
+	hibernate_stats.cd_discarded = count_discard_active + count_discard_inactive + count_discard_purgeable +
+		count_discard_speculative + count_discard_cleaned + count_discard_vm_struct_pages;
+    }
 
     clock_get_uptime(&end);
     absolutetime_to_nanoseconds(end - start, &nsec);
     HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
 
-    HIBLOG("pages %d, wire %d, act %d, inact %d, spec %d, zf %d, throt %d, could discard act %d inact %d purgeable %d spec %d\n", 
-                pages, count_wire, count_active, count_inactive, count_speculative, count_zf, count_throttled,
-                count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative);
+    HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n  %s discard act %d inact %d purgeable %d spec %d cleaned %d\n", 
+	   pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative, count_anonymous, count_throttled, count_compressor, hibernate_stats.cd_found_xpmapped,
+	        discard_all ? "did" : "could",
+	        count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
+
+    if (hibernate_stats.cd_skipped_xpmapped)
+	    HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped);
 
-    *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative;
+    *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned;
+
+    if (preflight && will_discard) *pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;
+
+#if MACH_ASSERT || DEBUG
+    if (!preflight)
+    {
+	if (vm_page_local_q) {
+	    for (i = 0; i < vm_page_local_q_count; i++) {
+		struct vpl	*lq;
+		lq = &vm_page_local_q[i].vpl_un.vpl;
+		VPL_UNLOCK(&lq->vpl_lock);
+	    }
+	}
+        vm_page_unlock_queues();
+    }
+#endif  /* MACH_ASSERT || DEBUG */
+
+    if (preflight) {
+	lck_mtx_unlock(&vm_page_queue_free_lock);
+	vm_page_unlock_queues();
+	vm_object_unlock(compressor_object);
+    }
 
     KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_END, count_wire, *pagesOut, 0, 0, 0);
 }
@@ -5141,12 +6195,25 @@ hibernate_page_list_discard(hibernate_page_list_t * page_list)
     uint32_t  count_discard_active    = 0;
     uint32_t  count_discard_inactive  = 0;
     uint32_t  count_discard_purgeable = 0;
+    uint32_t  count_discard_cleaned   = 0;
     uint32_t  count_discard_speculative = 0;
 
+
+#if MACH_ASSERT || DEBUG
+        vm_page_lock_queues();
+	if (vm_page_local_q) {
+	    for (i = 0; i < vm_page_local_q_count; i++) {
+		struct vpl	*lq;
+		lq = &vm_page_local_q[i].vpl_un.vpl;
+		VPL_LOCK(&lq->vpl_lock);
+	    }
+	}
+#endif  /* MACH_ASSERT || DEBUG */
+
     clock_get_uptime(&start);
 
-    m = (vm_page_t) queue_first(&vm_page_queue_zf);
-    while (m && !queue_end(&vm_page_queue_zf, (queue_entry_t)m))
+    m = (vm_page_t) queue_first(&vm_page_queue_anonymous);
+    while (m && !queue_end(&vm_page_queue_anonymous, (queue_entry_t)m))
     {
         next = (vm_page_t) m->pageq.next;
         if (hibernate_page_bittst(page_list, m->phys_page))
@@ -5205,21 +6272,396 @@ hibernate_page_list_discard(hibernate_page_list_t * page_list)
         m = next;
     }
 
-    clock_get_uptime(&end);
-    absolutetime_to_nanoseconds(end - start, &nsec);
-    HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d\n",
-                nsec / 1000000ULL,
-                count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative);
-}
-
-#endif /* HIBERNATION */
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-#include <mach_vm_debug.h>
-#if	MACH_VM_DEBUG
-
-#include <mach_debug/hash_info.h>
+    m = (vm_page_t) queue_first(&vm_page_queue_cleaned);
+    while (m && !queue_end(&vm_page_queue_cleaned, (queue_entry_t)m))
+    {
+        next = (vm_page_t) m->pageq.next;
+        if (hibernate_page_bittst(page_list, m->phys_page))
+        {
+	    if (m->dirty)
+		count_discard_purgeable++;
+	    else
+		count_discard_cleaned++;
+            hibernate_discard_page(m);
+        }
+        m = next;
+    }
+
+#if MACH_ASSERT || DEBUG
+	if (vm_page_local_q) {
+	    for (i = 0; i < vm_page_local_q_count; i++) {
+		struct vpl	*lq;
+		lq = &vm_page_local_q[i].vpl_un.vpl;
+		VPL_UNLOCK(&lq->vpl_lock);
+	    }
+	}
+        vm_page_unlock_queues();
+#endif  /* MACH_ASSERT || DEBUG */
+
+    clock_get_uptime(&end);
+    absolutetime_to_nanoseconds(end - start, &nsec);
+    HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d cleaned %d\n",
+                nsec / 1000000ULL,
+	        count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
+}
+
+boolean_t       hibernate_paddr_map_inited = FALSE;
+boolean_t       hibernate_rebuild_needed = FALSE;
+unsigned int	hibernate_teardown_last_valid_compact_indx = -1;
+vm_page_t	hibernate_rebuild_hash_list = NULL;
+
+unsigned int	hibernate_teardown_found_tabled_pages = 0;
+unsigned int	hibernate_teardown_found_created_pages = 0;
+unsigned int	hibernate_teardown_found_free_pages = 0;
+unsigned int	hibernate_teardown_vm_page_free_count;
+
+
+struct ppnum_mapping {
+	struct ppnum_mapping	*ppnm_next;
+	ppnum_t			ppnm_base_paddr;
+	unsigned int		ppnm_sindx;
+	unsigned int		ppnm_eindx;
+};
+
+struct ppnum_mapping	*ppnm_head;
+struct ppnum_mapping	*ppnm_last_found = NULL;
+
+
+void
+hibernate_create_paddr_map() 
+{
+	unsigned int	i;
+	ppnum_t		next_ppnum_in_run = 0;
+	struct ppnum_mapping *ppnm = NULL;
+
+	if (hibernate_paddr_map_inited == FALSE) {
+
+		for (i = 0; i < vm_pages_count; i++) {
+
+			if (ppnm)
+				ppnm->ppnm_eindx = i;
+
+			if (ppnm == NULL || vm_pages[i].phys_page != next_ppnum_in_run) {
+
+				ppnm = kalloc(sizeof(struct ppnum_mapping));
+
+				ppnm->ppnm_next = ppnm_head;
+				ppnm_head = ppnm;
+
+				ppnm->ppnm_sindx = i;
+				ppnm->ppnm_base_paddr = vm_pages[i].phys_page;
+			}
+			next_ppnum_in_run = vm_pages[i].phys_page + 1;
+		}
+		ppnm->ppnm_eindx++;
+
+		hibernate_paddr_map_inited = TRUE;
+	}
+}
+
+ppnum_t
+hibernate_lookup_paddr(unsigned int indx)
+{
+	struct ppnum_mapping *ppnm = NULL;
+	
+	ppnm = ppnm_last_found;
+
+	if (ppnm) {
+		if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx)
+			goto done;
+	}
+	for (ppnm = ppnm_head; ppnm; ppnm = ppnm->ppnm_next) {
+
+		if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
+			ppnm_last_found = ppnm;
+			break;
+		}
+	}
+	if (ppnm == NULL)
+		panic("hibernate_lookup_paddr of %d failed\n", indx);
+done:
+	return (ppnm->ppnm_base_paddr + (indx - ppnm->ppnm_sindx));
+}
+
+
+uint32_t
+hibernate_mark_as_unneeded(addr64_t saddr, addr64_t eaddr, hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
+{
+	addr64_t	saddr_aligned;
+	addr64_t	eaddr_aligned;
+	addr64_t	addr;
+	ppnum_t		paddr;
+	unsigned int	mark_as_unneeded_pages = 0;
+
+	saddr_aligned = (saddr + PAGE_MASK_64) & ~PAGE_MASK_64;
+	eaddr_aligned = eaddr & ~PAGE_MASK_64;
+
+	for (addr = saddr_aligned; addr < eaddr_aligned; addr += PAGE_SIZE_64) {
+
+		paddr = pmap_find_phys(kernel_pmap, addr);
+
+		assert(paddr);
+
+		hibernate_page_bitset(page_list,       TRUE, paddr);
+		hibernate_page_bitset(page_list_wired, TRUE, paddr);
+
+		mark_as_unneeded_pages++;
+	}
+	return (mark_as_unneeded_pages);
+}
+
+
+void
+hibernate_hash_insert_page(vm_page_t mem)
+{
+	vm_page_bucket_t *bucket;
+	int		hash_id;
+
+	assert(mem->hashed);
+	assert(mem->object);
+	assert(mem->offset != (vm_object_offset_t) -1);
+
+	/*
+	 *	Insert it into the object_object/offset hash table
+	 */
+	hash_id = vm_page_hash(mem->object, mem->offset);
+	bucket = &vm_page_buckets[hash_id];
+
+	mem->next_m = bucket->page_list;
+	bucket->page_list = VM_PAGE_PACK_PTR(mem);
+}
+
+
+void
+hibernate_free_range(int sindx, int eindx)
+{
+	vm_page_t	mem;
+	unsigned int	color;
+
+	while (sindx < eindx) {
+		mem = &vm_pages[sindx];
+
+		vm_page_init(mem, hibernate_lookup_paddr(sindx), FALSE);
+
+		mem->lopage = FALSE;
+		mem->free = TRUE;
+
+	        color = mem->phys_page & vm_color_mask;
+		queue_enter_first(&vm_page_queue_free[color],
+				  mem,
+				  vm_page_t,
+				  pageq);
+		vm_page_free_count++;
+
+		sindx++;
+	}
+}
+
+
+extern void hibernate_rebuild_pmap_structs(void);
+
+void
+hibernate_rebuild_vm_structs(void)
+{
+	int		cindx, sindx, eindx;
+	vm_page_t	mem, tmem, mem_next;
+	AbsoluteTime	startTime, endTime;
+	uint64_t	nsec;
+
+	if (hibernate_rebuild_needed == FALSE)
+		return;
+
+	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_START, 0, 0, 0, 0, 0);
+	HIBLOG("hibernate_rebuild started\n");
+
+	clock_get_uptime(&startTime);
+
+	hibernate_rebuild_pmap_structs();
+
+	bzero(&vm_page_buckets[0], vm_page_bucket_count * sizeof(vm_page_bucket_t));
+	eindx = vm_pages_count;
+
+	for (cindx = hibernate_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
+		
+		mem = &vm_pages[cindx];
+		/*
+		 * hibernate_teardown_vm_structs leaves the location where
+		 * this vm_page_t must be located in "next".
+		 */
+		tmem = VM_PAGE_UNPACK_PTR(mem->next_m);
+		mem->next_m = VM_PAGE_PACK_PTR(NULL);
+
+		sindx = (int)(tmem - &vm_pages[0]);
+
+		if (mem != tmem) {
+			/*
+			 * this vm_page_t was moved by hibernate_teardown_vm_structs,
+			 * so move it back to its real location
+			 */
+			*tmem = *mem;
+			mem = tmem;
+		}
+		if (mem->hashed)
+			hibernate_hash_insert_page(mem);
+		/*
+		 * the 'hole' between this vm_page_t and the previous
+		 * vm_page_t we moved needs to be initialized as 
+		 * a range of free vm_page_t's
+		 */
+		hibernate_free_range(sindx + 1, eindx);
+
+		eindx = sindx;
+	}
+	if (sindx)
+		hibernate_free_range(0, sindx);
+
+	assert(vm_page_free_count == hibernate_teardown_vm_page_free_count);
+
+	/*
+	 * process the list of vm_page_t's that were entered in the hash,
+	 * but were not located in the vm_pages arrary... these are 
+	 * vm_page_t's that were created on the fly (i.e. fictitious)
+	 */
+	for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) {
+		mem_next = VM_PAGE_UNPACK_PTR(mem->next_m);
+
+		mem->next_m = VM_PAGE_PACK_PTR(NULL);
+		hibernate_hash_insert_page(mem);
+	}
+	hibernate_rebuild_hash_list = NULL;
+
+        clock_get_uptime(&endTime);
+        SUB_ABSOLUTETIME(&endTime, &startTime);
+        absolutetime_to_nanoseconds(endTime, &nsec);
+
+	HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec / 1000000ULL);
+
+	hibernate_rebuild_needed = FALSE;
+
+	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END, 0, 0, 0, 0, 0);
+}
+
+
+extern void hibernate_teardown_pmap_structs(addr64_t *, addr64_t *);
+
+uint32_t
+hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
+{
+	unsigned int	i;
+	unsigned int	compact_target_indx;
+	vm_page_t	mem, mem_next;
+	vm_page_bucket_t *bucket;
+	unsigned int	mark_as_unneeded_pages = 0;
+	unsigned int	unneeded_vm_page_bucket_pages = 0;
+	unsigned int	unneeded_vm_pages_pages = 0;
+	unsigned int	unneeded_pmap_pages = 0;
+	addr64_t	start_of_unneeded = 0;
+	addr64_t	end_of_unneeded = 0;
+
+	
+	if (hibernate_should_abort())
+		return (0);
+
+	HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, active_pages %d, inactive_pages %d, speculative_pages %d, cleaned_pages %d, compressor_pages %d\n",
+	       vm_page_wire_count, vm_page_free_count, vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count,
+	       vm_page_cleaned_count, compressor_object->resident_page_count);
+
+	for (i = 0; i < vm_page_bucket_count; i++) {
+
+		bucket = &vm_page_buckets[i];
+
+		for (mem = VM_PAGE_UNPACK_PTR(bucket->page_list); mem != VM_PAGE_NULL; mem = mem_next) {
+			assert(mem->hashed);
+
+			mem_next = VM_PAGE_UNPACK_PTR(mem->next_m);
+
+			if (mem < &vm_pages[0] || mem >= &vm_pages[vm_pages_count]) {
+				mem->next_m = VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list);
+				hibernate_rebuild_hash_list = mem;
+			}
+		}
+	}
+	unneeded_vm_page_bucket_pages = hibernate_mark_as_unneeded((addr64_t)&vm_page_buckets[0], (addr64_t)&vm_page_buckets[vm_page_bucket_count], page_list, page_list_wired);
+	mark_as_unneeded_pages += unneeded_vm_page_bucket_pages;
+
+	hibernate_teardown_vm_page_free_count = vm_page_free_count;
+
+	compact_target_indx = 0;
+
+	for (i = 0; i < vm_pages_count; i++) {
+
+		mem = &vm_pages[i];
+
+		if (mem->free) {
+			unsigned int color;
+
+			assert(mem->busy);
+			assert(!mem->lopage);
+
+			color = mem->phys_page & vm_color_mask;
+
+			queue_remove(&vm_page_queue_free[color],
+				     mem,
+				     vm_page_t,
+				     pageq);
+			mem->pageq.next = NULL;
+			mem->pageq.prev = NULL;
+
+			vm_page_free_count--;
+
+			hibernate_teardown_found_free_pages++;
+
+			if ( !vm_pages[compact_target_indx].free)
+				compact_target_indx = i;
+		} else {
+			/*
+			 * record this vm_page_t's original location
+			 * we need this even if it doesn't get moved
+			 * as an indicator to the rebuild function that
+			 * we don't have to move it
+			 */
+			mem->next_m = VM_PAGE_PACK_PTR(mem);
+
+			if (vm_pages[compact_target_indx].free) {
+				/*
+				 * we've got a hole to fill, so
+				 * move this vm_page_t to it's new home
+				 */
+				vm_pages[compact_target_indx] = *mem;
+				mem->free = TRUE;
+
+				hibernate_teardown_last_valid_compact_indx = compact_target_indx;
+				compact_target_indx++;
+			} else
+				hibernate_teardown_last_valid_compact_indx = i;
+		}
+	}
+	unneeded_vm_pages_pages = hibernate_mark_as_unneeded((addr64_t)&vm_pages[hibernate_teardown_last_valid_compact_indx+1],
+							     (addr64_t)&vm_pages[vm_pages_count-1], page_list, page_list_wired);
+	mark_as_unneeded_pages += unneeded_vm_pages_pages;
+
+	hibernate_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded);
+
+	if (start_of_unneeded) {
+		unneeded_pmap_pages = hibernate_mark_as_unneeded(start_of_unneeded, end_of_unneeded, page_list, page_list_wired);
+		mark_as_unneeded_pages += unneeded_pmap_pages;
+	}
+	HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n", unneeded_vm_page_bucket_pages, unneeded_vm_pages_pages, unneeded_pmap_pages);
+
+	hibernate_rebuild_needed = TRUE;
+
+	return (mark_as_unneeded_pages);
+}
+
+
+#endif /* HIBERNATION */
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include <mach_vm_debug.h>
+#if	MACH_VM_DEBUG
+
+#include <mach_debug/hash_info.h>
 #include <vm/vm_debug.h>
 
 /*
@@ -5252,7 +6694,7 @@ vm_page_info(
 		bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
 		lck_spin_lock(bucket_lock);
 
-		for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next)
+		for (m = VM_PAGE_UNPACK_PTR(bucket->page_list); m != VM_PAGE_NULL; m = VM_PAGE_UNPACK_PTR(m->next_m))
 			bucket_count++;
 
 		lck_spin_unlock(bucket_lock);
@@ -5265,63 +6707,653 @@ vm_page_info(
 }
 #endif	/* MACH_VM_DEBUG */
 
-#include <mach_kdb.h>
-#if	MACH_KDB
+#if VM_PAGE_BUCKETS_CHECK
+void
+vm_page_buckets_check(void)
+{
+	unsigned int i;
+	vm_page_t p;
+	unsigned int p_hash;
+	vm_page_bucket_t *bucket;
+	lck_spin_t	*bucket_lock;
+
+	if (!vm_page_buckets_check_ready) {
+		return;
+	}
 
-#include <ddb/db_output.h>
-#include <vm/vm_print.h>
-#define	printf	kdbprintf
+#if HIBERNATION
+	if (hibernate_rebuild_needed ||
+	    hibernate_rebuild_hash_list) {
+		panic("BUCKET_CHECK: hibernation in progress: "
+		      "rebuild_needed=%d rebuild_hash_list=%p\n",
+		      hibernate_rebuild_needed,
+		      hibernate_rebuild_hash_list);
+	}
+#endif /* HIBERNATION */
+
+#if VM_PAGE_FAKE_BUCKETS
+	char *cp;
+	for (cp = (char *) vm_page_fake_buckets_start;
+	     cp < (char *) vm_page_fake_buckets_end;
+	     cp++) {
+		if (*cp != 0x5a) {
+			panic("BUCKET_CHECK: corruption at %p in fake buckets "
+			      "[0x%llx:0x%llx]\n",
+			      cp,
+			      (uint64_t) vm_page_fake_buckets_start,
+			      (uint64_t) vm_page_fake_buckets_end);
+		}
+	}
+#endif /* VM_PAGE_FAKE_BUCKETS */
+
+	for (i = 0; i < vm_page_bucket_count; i++) {
+		bucket = &vm_page_buckets[i];
+		if (!bucket->page_list) {
+			continue;
+		}
+
+		bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
+		lck_spin_lock(bucket_lock);
+		p = VM_PAGE_UNPACK_PTR(bucket->page_list);
+		while (p != VM_PAGE_NULL) {
+			if (!p->hashed) {
+				panic("BUCKET_CHECK: page %p (%p,0x%llx) "
+				      "hash %d in bucket %d at %p "
+				      "is not hashed\n",
+				      p, p->object, p->offset,
+				      p_hash, i, bucket);
+			}
+			p_hash = vm_page_hash(p->object, p->offset);
+			if (p_hash != i) {
+				panic("BUCKET_CHECK: corruption in bucket %d "
+				      "at %p: page %p object %p offset 0x%llx "
+				      "hash %d\n",
+				      i, bucket, p, p->object, p->offset,
+				      p_hash);
+			}
+			p = VM_PAGE_UNPACK_PTR(p->next_m);
+		}
+		lck_spin_unlock(bucket_lock);
+	}
+
+//	printf("BUCKET_CHECK: checked buckets\n");
+}
+#endif /* VM_PAGE_BUCKETS_CHECK */
 
 /*
- *	Routine:	vm_page_print [exported]
+ * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the
+ * local queues if they exist... its the only spot in the system where we add pages
+ * to those queues...  once on those queues, those pages can only move to one of the
+ * global page queues or the free queues... they NEVER move from local q to local q.
+ * the 'local' state is stable when vm_page_queues_remove is called since we're behind
+ * the global vm_page_queue_lock at this point...  we still need to take the local lock
+ * in case this operation is being run on a different CPU then the local queue's identity,
+ * but we don't have to worry about the page moving to a global queue or becoming wired
+ * while we're grabbing the local lock since those operations would require the global
+ * vm_page_queue_lock to be held, and we already own it.
+ *
+ * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id...
+ * 'wired' and local are ALWAYS mutually exclusive conditions.
  */
 void
-vm_page_print(
-	db_addr_t	db_addr)
+vm_page_queues_remove(vm_page_t mem)
 {
-	vm_page_t	p;
+	boolean_t	was_pageable;
 
-	p = (vm_page_t) (long) db_addr;
+	VM_PAGE_QUEUES_ASSERT(mem, 1);
+	assert(!mem->pageout_queue);
+	/*
+	 *	if (mem->pageout_queue)
+	 * 		NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue...
+	 * 		the caller is responsible for determing if the page is on that queue, and if so, must
+	 * 		either first remove it (it needs both the page queues lock and the object lock to do
+	 * 		this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove
+	 */
+	if (mem->local) {
+		struct vpl	*lq;
+		assert(mem->object != kernel_object);
+		assert(mem->object != compressor_object);
+		assert(!mem->inactive && !mem->speculative);
+		assert(!mem->active && !mem->throttled);
+		assert(!mem->clean_queue);
+		assert(!mem->fictitious);
+		lq = &vm_page_local_q[mem->local_id].vpl_un.vpl;
+		VPL_LOCK(&lq->vpl_lock);
+		queue_remove(&lq->vpl_queue,
+			     mem, vm_page_t, pageq);
+		mem->local = FALSE;
+		mem->local_id = 0;
+		lq->vpl_count--;
+		if (mem->object->internal) {
+			lq->vpl_internal_count--;
+		} else {
+			lq->vpl_external_count--;
+		}
+		VPL_UNLOCK(&lq->vpl_lock);
+		was_pageable = FALSE;
+	}
 
-	iprintf("page 0x%x\n", p);
+	else if (mem->active) {
+		assert(mem->object != kernel_object);
+		assert(mem->object != compressor_object);
+		assert(!mem->inactive && !mem->speculative);
+		assert(!mem->clean_queue);
+		assert(!mem->throttled);
+		assert(!mem->fictitious);
+		queue_remove(&vm_page_queue_active,
+			mem, vm_page_t, pageq);
+		mem->active = FALSE;
+		vm_page_active_count--;
+		was_pageable = TRUE;
+	}
 
-	db_indent += 2;
+	else if (mem->inactive) {
+		assert(mem->object != kernel_object);
+		assert(mem->object != compressor_object);
+		assert(!mem->active && !mem->speculative);
+		assert(!mem->throttled);
+		assert(!mem->fictitious);
+		vm_page_inactive_count--;
+		if (mem->clean_queue) {
+			queue_remove(&vm_page_queue_cleaned,
+                        mem, vm_page_t, pageq);
+			mem->clean_queue = FALSE;
+			vm_page_cleaned_count--;
+		} else {
+			if (mem->object->internal) {
+				queue_remove(&vm_page_queue_anonymous,
+				mem, vm_page_t, pageq);
+				vm_page_anonymous_count--;
+			} else {
+				queue_remove(&vm_page_queue_inactive,
+				mem, vm_page_t, pageq);
+			}
+			vm_purgeable_q_advance_all();
+		}
+		mem->inactive = FALSE;
+		was_pageable = TRUE;
+	}
 
-	iprintf("object=0x%x", p->object);
-	printf(", offset=0x%x", p->offset);
-	printf(", wire_count=%d", p->wire_count);
+	else if (mem->throttled) {
+		assert(mem->object != compressor_object);
+		assert(!mem->active && !mem->inactive);
+		assert(!mem->speculative);
+		assert(!mem->fictitious);
+		queue_remove(&vm_page_queue_throttled,
+			     mem, vm_page_t, pageq);
+		mem->throttled = FALSE;
+		vm_page_throttled_count--;
+		was_pageable = FALSE;
+	}
 
-	iprintf("%slocal, %sinactive, %sactive, %sthrottled, %sgobbled, %slaundry, %sfree, %sref, %sencrypted\n",
-		(p->local ? "" : "!"),
-		(p->inactive ? "" : "!"),
-		(p->active ? "" : "!"),
-		(p->throttled ? "" : "!"),
-		(p->gobbled ? "" : "!"),
-		(p->laundry ? "" : "!"),
-		(p->free ? "" : "!"),
-		(p->reference ? "" : "!"),
-		(p->encrypted ? "" : "!"));
-	iprintf("%sbusy, %swanted, %stabled, %sfictitious, %sprivate, %sprecious\n",
-		(p->busy ? "" : "!"),
-		(p->wanted ? "" : "!"),
-		(p->tabled ? "" : "!"),
-		(p->fictitious ? "" : "!"),
-		(p->private ? "" : "!"),
-		(p->precious ? "" : "!"));
-	iprintf("%sabsent, %serror, %sdirty, %scleaning, %spageout, %sclustered\n",
-		(p->absent ? "" : "!"),
-		(p->error ? "" : "!"),
-		(p->dirty ? "" : "!"),
-		(p->cleaning ? "" : "!"),
-		(p->pageout ? "" : "!"),
-		(p->clustered ? "" : "!"));
-	iprintf("%soverwriting, %srestart, %sunusual\n",
-		(p->overwriting ? "" : "!"),
-		(p->restart ? "" : "!"),
-		(p->unusual ? "" : "!"));
-
-	iprintf("phys_page=0x%x", p->phys_page);
-
-	db_indent -= 2;
+	else if (mem->speculative) {
+		assert(mem->object != compressor_object);
+		assert(!mem->active && !mem->inactive);
+		assert(!mem->throttled);
+		assert(!mem->fictitious);
+                remque(&mem->pageq);
+		mem->speculative = FALSE;
+		vm_page_speculative_count--;
+		was_pageable = TRUE;
+	}
+
+	else if (mem->pageq.next || mem->pageq.prev) {
+		was_pageable = FALSE;
+		panic("vm_page_queues_remove: unmarked page on Q");
+	} else {
+		was_pageable = FALSE;
+	}
+
+	mem->pageq.next = NULL;
+	mem->pageq.prev = NULL;
+	VM_PAGE_QUEUES_ASSERT(mem, 0);
+	if (was_pageable) {
+		if (mem->object->internal) {
+			vm_page_pageable_internal_count--;
+		} else {
+			vm_page_pageable_external_count--;
+		}
+	}
+}
+
+void
+vm_page_remove_internal(vm_page_t page)
+{
+	vm_object_t __object = page->object;
+	if (page == __object->memq_hint) {
+		vm_page_t	__new_hint;
+		queue_entry_t	__qe;
+		__qe = queue_next(&page->listq);
+		if (queue_end(&__object->memq, __qe)) {
+			__qe = queue_prev(&page->listq);
+			if (queue_end(&__object->memq, __qe)) {
+				__qe = NULL;
+			}
+		}
+		__new_hint = (vm_page_t) __qe;
+		__object->memq_hint = __new_hint;
+	}
+	queue_remove(&__object->memq, page, vm_page_t, listq);
+}
+
+void
+vm_page_enqueue_inactive(vm_page_t mem, boolean_t first)
+{
+	VM_PAGE_QUEUES_ASSERT(mem, 0);
+	assert(!mem->fictitious);
+	assert(!mem->laundry);
+	assert(!mem->pageout_queue);
+	vm_page_check_pageable_safe(mem);
+	if (mem->object->internal) {
+		if (first == TRUE)
+			queue_enter_first(&vm_page_queue_anonymous, mem, vm_page_t, pageq);
+		else
+			queue_enter(&vm_page_queue_anonymous, mem, vm_page_t, pageq);
+		vm_page_anonymous_count++;
+		vm_page_pageable_internal_count++;
+	} else {
+		if (first == TRUE)
+			queue_enter_first(&vm_page_queue_inactive, mem, vm_page_t, pageq);
+		else
+			queue_enter(&vm_page_queue_inactive, mem, vm_page_t, pageq);
+		vm_page_pageable_external_count++;
+	}
+	mem->inactive = TRUE;
+	vm_page_inactive_count++;
+	token_new_pagecount++;
+}
+
+/*
+ * Pages from special kernel objects shouldn't
+ * be placed on pageable queues.
+ */
+void
+vm_page_check_pageable_safe(vm_page_t page)
+{
+	if (page->object == kernel_object) {
+		panic("vm_page_check_pageable_safe: trying to add page" \
+			 "from kernel object (%p) to pageable queue", kernel_object);
+	}
+
+	if (page->object == compressor_object) {
+		panic("vm_page_check_pageable_safe: trying to add page" \
+			 "from compressor object (%p) to pageable queue", compressor_object);
+	}
+
+	if (page->object == vm_submap_object) {
+		panic("vm_page_check_pageable_safe: trying to add page" \
+			"from submap object (%p) to pageable queue", vm_submap_object);
+	}
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * wired page diagnose
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include <libkern/OSKextLibPrivate.h>
+
+vm_allocation_site_t * 
+vm_allocation_sites[VM_KERN_MEMORY_COUNT];
+
+vm_tag_t 
+vm_tag_bt(void)
+{
+    uintptr_t* frameptr;
+    uintptr_t* frameptr_next;
+    uintptr_t retaddr;
+    uintptr_t kstackb, kstackt;
+    const vm_allocation_site_t * site;
+    thread_t cthread;
+    
+    cthread = current_thread();
+    if (__improbable(cthread == NULL)) return VM_KERN_MEMORY_OSFMK;
+
+    kstackb = cthread->kernel_stack;
+    kstackt = kstackb + kernel_stack_size;
+
+    /* Load stack frame pointer (EBP on x86) into frameptr */
+    frameptr = __builtin_frame_address(0);
+    site = NULL;
+    while (frameptr != NULL) 
+    {
+	/* Verify thread stack bounds */
+	if (((uintptr_t)(frameptr + 2) > kstackt) || ((uintptr_t)frameptr < kstackb)) break;
+
+	/* Next frame pointer is pointed to by the previous one */
+	frameptr_next = (uintptr_t*) *frameptr;
+
+	/* Pull return address from one spot above the frame pointer */
+	retaddr = *(frameptr + 1);
+
+	if ((retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top))
+	{
+	    site = OSKextGetAllocationSiteForCaller(retaddr);
+	    break;
+	}
+
+	frameptr = frameptr_next;
+    }
+    return (site ? site->tag : VM_KERN_MEMORY_NONE);
+}
+
+static uint64_t free_tag_bits[256/64];
+
+void
+vm_tag_alloc_locked(vm_allocation_site_t * site)
+{
+    vm_tag_t tag;
+    uint64_t avail;
+    uint64_t idx;
+
+    if (site->tag) return;
+
+    idx = 0;
+    while (TRUE)
+    {
+	avail = free_tag_bits[idx];
+	if (avail)
+	{
+	    tag = __builtin_clzll(avail);
+	    avail &= ~(1ULL << (63 - tag));
+	    free_tag_bits[idx] = avail;
+	    tag += (idx << 6);
+	    break;
+	}
+	idx++;
+	if (idx >= (sizeof(free_tag_bits) / sizeof(free_tag_bits[0])))
+	{
+	     tag = VM_KERN_MEMORY_ANY;
+	     break;
+	}
+    }
+    site->tag = tag;
+    if (VM_KERN_MEMORY_ANY != tag)
+    {
+	assert(!vm_allocation_sites[tag]);
+	vm_allocation_sites[tag] = site;
+    }
+}
+
+static void
+vm_tag_free_locked(vm_tag_t tag)
+{
+    uint64_t avail;
+    uint32_t idx;
+    uint64_t bit;
+
+    if (VM_KERN_MEMORY_ANY == tag) return;
+
+    idx = (tag >> 6);
+    avail = free_tag_bits[idx];
+    tag &= 63;
+    bit = (1ULL << (63 - tag));
+    assert(!(avail & bit));
+    free_tag_bits[idx] = (avail | bit);
+}
+
+static void
+vm_tag_init(void)
+{
+    vm_tag_t tag;
+    for (tag = VM_KERN_MEMORY_FIRST_DYNAMIC; tag < VM_KERN_MEMORY_ANY; tag++)
+    {
+        vm_tag_free_locked(tag);
+    }
+}
+
+vm_tag_t
+vm_tag_alloc(vm_allocation_site_t * site)
+{
+    vm_tag_t tag;
+
+    if (VM_TAG_BT & site->flags)
+    {
+	tag = vm_tag_bt();
+	if (VM_KERN_MEMORY_NONE != tag) return (tag);
+    }
+
+    if (!site->tag) 
+    {
+	lck_spin_lock(&vm_allocation_sites_lock);
+	vm_tag_alloc_locked(site);
+	lck_spin_unlock(&vm_allocation_sites_lock);
+    }
+
+    return (site->tag);
+}
+
+static void 
+vm_page_count_object(mach_memory_info_t * sites, unsigned int __unused num_sites, vm_object_t object)
+{
+    if (!object->wired_page_count) return;
+    if (object != kernel_object)
+    {
+	assert(object->wire_tag < num_sites);
+	sites[object->wire_tag].size += ptoa_64(object->wired_page_count);
+    }
+}
+
+typedef void (*vm_page_iterate_proc)(mach_memory_info_t * sites, 
+				     unsigned int num_sites, vm_object_t object);
+
+static void 
+vm_page_iterate_purgeable_objects(mach_memory_info_t * sites, unsigned int num_sites,
+				  vm_page_iterate_proc proc, purgeable_q_t queue, 
+				  int group)
+{
+    vm_object_t object;
+
+    for (object = (vm_object_t) queue_first(&queue->objq[group]);
+	!queue_end(&queue->objq[group], (queue_entry_t) object);
+	object = (vm_object_t) queue_next(&object->objq))
+    {
+	proc(sites, num_sites, object);
+    }
+}
+
+static void 
+vm_page_iterate_objects(mach_memory_info_t * sites, unsigned int num_sites,
+			vm_page_iterate_proc proc)
+{
+    purgeable_q_t   volatile_q;
+    queue_head_t  * nonvolatile_q;
+    vm_object_t     object;
+    int             group;
+
+    lck_spin_lock(&vm_objects_wired_lock);
+    queue_iterate(&vm_objects_wired,
+		  object,
+		  vm_object_t,
+		  objq)
+    {
+	proc(sites, num_sites, object);
+    }
+    lck_spin_unlock(&vm_objects_wired_lock);
+
+    lck_mtx_lock(&vm_purgeable_queue_lock);
+    nonvolatile_q = &purgeable_nonvolatile_queue;
+    for (object = (vm_object_t) queue_first(nonvolatile_q);
+	 !queue_end(nonvolatile_q, (queue_entry_t) object);
+	 object = (vm_object_t) queue_next(&object->objq))
+    {
+	proc(sites, num_sites, object);
+    }
+
+    volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
+    vm_page_iterate_purgeable_objects(sites, num_sites, proc, volatile_q, 0);
+
+    volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
+    for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
+    {
+	vm_page_iterate_purgeable_objects(sites, num_sites, proc, volatile_q, group);
+    }
+
+    volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
+    for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
+    {
+	vm_page_iterate_purgeable_objects(sites, num_sites, proc, volatile_q, group);
+    }
+    lck_mtx_unlock(&vm_purgeable_queue_lock);
+}
+
+static uint64_t
+process_account(mach_memory_info_t * sites, unsigned int __unused num_sites)
+{
+    uint64_t found;
+    unsigned int idx;
+    vm_allocation_site_t * site;
+
+    assert(num_sites >= VM_KERN_MEMORY_COUNT);
+    found = 0;
+    for (idx = 0; idx < VM_KERN_MEMORY_COUNT; idx++) 
+    {
+	found += sites[idx].size;
+	if (idx < VM_KERN_MEMORY_FIRST_DYNAMIC)
+	{
+	    sites[idx].site   = idx;
+	    sites[idx].flags |= VM_KERN_SITE_TAG;
+	    if (VM_KERN_MEMORY_ZONE == idx) sites[idx].flags |= VM_KERN_SITE_HIDE;
+	    else                            sites[idx].flags |= VM_KERN_SITE_WIRED;
+	    continue;
+	}
+	lck_spin_lock(&vm_allocation_sites_lock);
+	if ((site = vm_allocation_sites[idx]))
+	{
+	    if (sites[idx].size)
+	    {
+		sites[idx].flags |= VM_KERN_SITE_WIRED;
+		if (VM_TAG_KMOD == (VM_KERN_SITE_TYPE & site->flags))
+		{
+		    sites[idx].site   = OSKextGetKmodIDForSite(site);
+		    sites[idx].flags |= VM_KERN_SITE_KMOD;
+		}
+		else
+		{
+		    sites[idx].site   = VM_KERNEL_UNSLIDE(site);
+		    sites[idx].flags |= VM_KERN_SITE_KERNEL;
+		}
+		site = NULL;
+	    }
+	    else
+	    {
+		vm_tag_free_locked(site->tag);
+	        site->tag = VM_KERN_MEMORY_NONE;
+	        vm_allocation_sites[idx] = NULL;
+		if (!(VM_TAG_UNLOAD & site->flags)) site = NULL;
+	    }
+	}
+	lck_spin_unlock(&vm_allocation_sites_lock);
+        if (site) OSKextFreeSite(site);
+    }
+    return (found);
+}
+
+kern_return_t 
+vm_page_diagnose(mach_memory_info_t * sites, unsigned int num_sites)
+{
+    enum             	   { kMaxKernelDepth = 1 };
+    vm_map_t           	     maps   [kMaxKernelDepth];
+    vm_map_entry_t     	     entries[kMaxKernelDepth];
+    vm_map_t           	     map;
+    vm_map_entry_t     	     entry;
+    vm_object_offset_t 	     offset;
+    vm_page_t          	     page;
+    int                	     stackIdx, count;
+    uint64_t	       	     wired_size;
+    uint64_t	       	     wired_managed_size;
+    uint64_t	       	     wired_reserved_size;
+    mach_memory_info_t     * counts;
+
+    bzero(sites, num_sites * sizeof(mach_memory_info_t));
+
+    vm_page_iterate_objects(sites, num_sites, &vm_page_count_object);
+
+    wired_size          = ptoa_64(vm_page_wire_count + vm_lopage_free_count + vm_page_throttled_count);
+    wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count + vm_page_throttled_count);
+    wired_managed_size  = ptoa_64(vm_page_wire_count - vm_page_wire_count_initial);
+
+    assert(num_sites >= (VM_KERN_MEMORY_COUNT + VM_KERN_COUNTER_COUNT));
+    counts = &sites[VM_KERN_MEMORY_COUNT];
+
+#define SET_COUNT(xcount, xsize, xflags)			\
+    counts[xcount].site  = (xcount);			\
+    counts[xcount].size  = (xsize);			\
+    counts[xcount].flags = VM_KERN_SITE_COUNTER | xflags;
+
+    SET_COUNT(VM_KERN_COUNT_MANAGED,		  ptoa_64(vm_page_pages),        0);
+    SET_COUNT(VM_KERN_COUNT_WIRED,		  wired_size,                    0);
+    SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED,	  wired_managed_size,            0);
+    SET_COUNT(VM_KERN_COUNT_RESERVED,	  	  wired_reserved_size, 		 VM_KERN_SITE_WIRED);
+    SET_COUNT(VM_KERN_COUNT_STOLEN,	          ptoa_64(vm_page_stolen_count), VM_KERN_SITE_WIRED);
+    SET_COUNT(VM_KERN_COUNT_LOPAGE,	          ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED);
+
+#define SET_MAP(xcount, xsize, xfree, xlargest)		\
+    counts[xcount].site    = (xcount);			\
+    counts[xcount].size    = (xsize);			\
+    counts[xcount].free    = (xfree);			\
+    counts[xcount].largest = (xlargest);		\
+    counts[xcount].flags   = VM_KERN_SITE_COUNTER;
+
+    vm_map_size_t map_size, map_free, map_largest;
+
+    vm_map_sizes(kernel_map, &map_size, &map_free, &map_largest);
+    SET_MAP(VM_KERN_COUNT_MAP_KERNEL, map_size, map_free, map_largest);
+
+    vm_map_sizes(zone_map, &map_size, &map_free, &map_largest);
+    SET_MAP(VM_KERN_COUNT_MAP_ZONE, map_size, map_free, map_largest);
+
+    vm_map_sizes(kalloc_map, &map_size, &map_free, &map_largest);
+    SET_MAP(VM_KERN_COUNT_MAP_KALLOC, map_size, map_free, map_largest);
+
+    map = kernel_map;
+    stackIdx = 0;
+    while (map)
+    {
+	vm_map_lock(map);
+	for (entry = map->hdr.links.next; map; entry = entry->links.next)
+	{
+	    if (entry->is_sub_map)
+	    {
+	    	assert(stackIdx < kMaxKernelDepth);
+		maps[stackIdx] = map;
+		entries[stackIdx] = entry;
+		stackIdx++;
+		map = VME_SUBMAP(entry);
+		entry = NULL;
+		break;
+	    }
+	    if (VME_OBJECT(entry) == kernel_object)
+	    {
+		count = 0;
+		vm_object_lock(VME_OBJECT(entry));
+		for (offset = entry->links.start; offset < entry->links.end; offset += page_size)
+		{
+			page = vm_page_lookup(VME_OBJECT(entry), offset);
+			if (page && VM_PAGE_WIRED(page)) count++;
+		}
+		vm_object_unlock(VME_OBJECT(entry));
+
+		if (count)
+		{
+		    assert(VME_ALIAS(entry) < num_sites);
+		    sites[VME_ALIAS(entry)].size += ptoa_64(count);
+		}
+	    }
+	    if (entry == vm_map_last_entry(map))
+	    {
+		vm_map_unlock(map);
+		if (!stackIdx) map = NULL;
+		else
+		{
+		    --stackIdx;
+		    map = maps[stackIdx];
+		    entry = entries[stackIdx];
+		}
+	    }
+	}
+    }
+
+    process_account(sites, num_sites);
+    
+    return (KERN_SUCCESS);
 }
-#endif	/* MACH_KDB */