xnu-6153.11.26.tar.gz
[apple/xnu.git] / osfmk / vm / vm_object.c
index 882c3c0776d78d781e1ab7a2566653ae86b58027..5b6250afc8d1d9277af3d8ed012b62ca8949c3ed 100644 (file)
@@ -1,8 +1,8 @@
 /*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * unlawful or unlicensed copies of an Apple operating system, or to
  * circumvent, violate, or enable the circumvention or violation of, any
  * terms of an Apple operating system software license agreement.
- * 
+ *
  * Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
  */
-/* 
+/*
  * Mach Operating System
  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
  * All Rights Reserved.
- * 
+ *
  * Permission to use, copy, modify and distribute this software and its
  * documentation is hereby granted, provided that both the copyright
  * notice and this permission notice appear in all copies of the
  * software, derivative works or modified versions, and any portions
  * thereof, and that both notices appear in supporting documentation.
- * 
+ *
  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- * 
+ *
  * Carnegie Mellon requests users of this software to return to
- * 
+ *
  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
  *  School of Computer Science
  *  Carnegie Mellon University
  *  Pittsburgh PA 15213-3890
- * 
+ *
  * any improvements or extensions that they make and grant Carnegie Mellon
  * the rights to redistribute these changes.
  */
@@ -80,7 +80,6 @@
 #include <kern/kern_types.h>
 #include <kern/assert.h>
 #include <kern/queue.h>
-#include <kern/xpr.h>
 #include <kern/kalloc.h>
 #include <kern/zalloc.h>
 #include <kern/host.h>
 #include <vm/vm_phantom_cache.h>
 #endif
 
+#if VM_OBJECT_ACCESS_TRACKING
+uint64_t vm_object_access_tracking_reads = 0;
+uint64_t vm_object_access_tracking_writes = 0;
+#endif /* VM_OBJECT_ACCESS_TRACKING */
+
 boolean_t vm_object_collapse_compressor_allowed = TRUE;
 
 struct vm_counters vm_counters;
@@ -119,8 +123,8 @@ vm_object_tracking_init(void)
        int vm_object_tracking;
 
        vm_object_tracking = 1;
-       PE_parse_boot_argn("vm_object_tracking", &vm_object_tracking, 
-                          sizeof (vm_object_tracking));
+       PE_parse_boot_argn("vm_object_tracking", &vm_object_tracking,
+           sizeof(vm_object_tracking));
 
        if (vm_object_tracking) {
                vm_object_tracking_btlog = btlog_create(
@@ -182,7 +186,7 @@ vm_object_tracking_init(void)
  *     that depend on the default memory manager are called
  *     "internal".  The "pager_created" field is provided to
  *     indicate whether these ports have ever been allocated.
- *     
+ *
  *     The kernel may also create virtual memory objects to
  *     hold changed pages after a copy-on-write operation.
  *     In this case, the virtual memory object (and its
@@ -207,41 +211,37 @@ vm_object_tracking_init(void)
  */
 
 /* Forward declarations for internal functions. */
-static kern_return_t   vm_object_terminate(
-                               vm_object_t     object);
+static kern_return_t    vm_object_terminate(
+       vm_object_t     object);
 
-extern void            vm_object_remove(
-                               vm_object_t     object);
+static kern_return_t    vm_object_copy_call(
+       vm_object_t             src_object,
+       vm_object_offset_t      src_offset,
+       vm_object_size_t        size,
+       vm_object_t             *_result_object);
 
-static kern_return_t   vm_object_copy_call(
-                               vm_object_t             src_object,
-                               vm_object_offset_t      src_offset,
-                               vm_object_size_t        size,
-                               vm_object_t             *_result_object);
+static void             vm_object_do_collapse(
+       vm_object_t     object,
+       vm_object_t     backing_object);
 
-static void            vm_object_do_collapse(
-                               vm_object_t     object,
-                               vm_object_t     backing_object);
+static void             vm_object_do_bypass(
+       vm_object_t     object,
+       vm_object_t     backing_object);
 
-static void            vm_object_do_bypass(
-                               vm_object_t     object,
-                               vm_object_t     backing_object);
+static void             vm_object_release_pager(
+       memory_object_t pager);
 
-static void            vm_object_release_pager(
-                               memory_object_t pager,
-                               boolean_t       hashed);
-
-static zone_t          vm_object_zone;         /* vm backing store zone */
+zone_t          vm_object_zone;         /* vm backing store zone */
 
 /*
  *     All wired-down kernel memory belongs to a single virtual
  *     memory object (kernel_object) to avoid wasting data structures.
  */
-static struct vm_object                        kernel_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
-vm_object_t                            kernel_object;
+static struct vm_object                 kernel_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
+vm_object_t                             kernel_object;
 
-static struct vm_object                        compressor_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
-vm_object_t                            compressor_object = &compressor_object_store;
+static struct vm_object                 compressor_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
+vm_object_t                             compressor_object = &compressor_object_store;
 
 /*
  *     The submap object is used as a placeholder for vm_map_submap
@@ -249,7 +249,7 @@ vm_object_t                         compressor_object = &compressor_object_store;
  *     is exported by the vm_map module.  The storage is declared
  *     here because it must be initialized here.
  */
-static struct vm_object                        vm_submap_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
+static struct vm_object                 vm_submap_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
 
 /*
  *     Virtual memory objects are initialized from
@@ -259,150 +259,75 @@ static struct vm_object                  vm_submap_object_store __attribute__((aligned(VM_PACKE
  *     object structure, be sure to add initialization
  *     (see _vm_object_allocate()).
  */
-static struct vm_object                        vm_object_template;
+static struct vm_object                 vm_object_template;
 
 unsigned int vm_page_purged_wired = 0;
 unsigned int vm_page_purged_busy = 0;
 unsigned int vm_page_purged_others = 0;
 
-#if VM_OBJECT_CACHE
-/*
- *     Virtual memory objects that are not referenced by
- *     any address maps, but that are allowed to persist
- *     (an attribute specified by the associated memory manager),
- *     are kept in a queue (vm_object_cached_list).
- *
- *     When an object from this queue is referenced again,
- *     for example to make another address space mapping,
- *     it must be removed from the queue.  That is, the
- *     queue contains *only* objects with zero references.
- *
- *     The kernel may choose to terminate objects from this
- *     queue in order to reclaim storage.  The current policy
- *     is to permit a fixed maximum number of unreferenced
- *     objects (vm_object_cached_max).
- *
- *     A spin lock (accessed by routines
- *     vm_object_cache_{lock,lock_try,unlock}) governs the
- *     object cache.  It must be held when objects are
- *     added to or removed from the cache (in vm_object_terminate).
- *     The routines that acquire a reference to a virtual
- *     memory object based on one of the memory object ports
- *     must also lock the cache.
- *
- *     Ideally, the object cache should be more isolated
- *     from the reference mechanism, so that the lock need
- *     not be held to make simple references.
- */
-static vm_object_t     vm_object_cache_trim(
-                               boolean_t called_from_vm_object_deallocate);
-
-static void            vm_object_deactivate_all_pages(
-                               vm_object_t     object);
-
-static int             vm_object_cached_high;  /* highest # cached objects */
-static int             vm_object_cached_max = 512;     /* may be patched*/
-
-#define vm_object_cache_lock()         \
-               lck_mtx_lock(&vm_object_cached_lock_data)
-#define vm_object_cache_lock_try()             \
-               lck_mtx_try_lock(&vm_object_cached_lock_data)
-
-#endif /* VM_OBJECT_CACHE */
-
-static queue_head_t    vm_object_cached_list;
-static uint32_t                vm_object_cache_pages_freed = 0;
-static uint32_t                vm_object_cache_pages_moved = 0;
-static uint32_t                vm_object_cache_pages_skipped = 0;
-static uint32_t                vm_object_cache_adds = 0;
-static uint32_t                vm_object_cached_count = 0;
-static lck_mtx_t       vm_object_cached_lock_data;
-static lck_mtx_ext_t   vm_object_cached_lock_data_ext;
-
-static uint32_t                vm_object_page_grab_failed = 0;
-static uint32_t                vm_object_page_grab_skipped = 0;
-static uint32_t                vm_object_page_grab_returned = 0;
-static uint32_t                vm_object_page_grab_pmapped = 0;
-static uint32_t                vm_object_page_grab_reactivations = 0;
-
-#define vm_object_cache_lock_spin()            \
-               lck_mtx_lock_spin(&vm_object_cached_lock_data)
-#define vm_object_cache_unlock()       \
-               lck_mtx_unlock(&vm_object_cached_lock_data)
-
-static void    vm_object_cache_remove_locked(vm_object_t);
-
-
-#define        VM_OBJECT_HASH_COUNT            1024
-#define        VM_OBJECT_HASH_LOCK_COUNT       512
-
-static lck_mtx_t       vm_object_hashed_lock_data[VM_OBJECT_HASH_LOCK_COUNT];
-static lck_mtx_ext_t   vm_object_hashed_lock_data_ext[VM_OBJECT_HASH_LOCK_COUNT];
-
-static queue_head_t    vm_object_hashtable[VM_OBJECT_HASH_COUNT];
-static struct zone     *vm_object_hash_zone;
-
-struct vm_object_hash_entry {
-       queue_chain_t           hash_link;      /* hash chain link */
-       memory_object_t pager;          /* pager we represent */
-       vm_object_t             object;         /* corresponding object */
-       boolean_t               waiting;        /* someone waiting for
-                                                * termination */
-};
+static queue_head_t     vm_object_cached_list;
+static uint32_t         vm_object_cache_pages_freed = 0;
+static uint32_t         vm_object_cache_pages_moved = 0;
+static uint32_t         vm_object_cache_pages_skipped = 0;
+static uint32_t         vm_object_cache_adds = 0;
+static uint32_t         vm_object_cached_count = 0;
+static lck_mtx_t        vm_object_cached_lock_data;
+static lck_mtx_ext_t    vm_object_cached_lock_data_ext;
 
-typedef struct vm_object_hash_entry    *vm_object_hash_entry_t;
-#define VM_OBJECT_HASH_ENTRY_NULL      ((vm_object_hash_entry_t) 0)
+static uint32_t         vm_object_page_grab_failed = 0;
+static uint32_t         vm_object_page_grab_skipped = 0;
+static uint32_t         vm_object_page_grab_returned = 0;
+static uint32_t         vm_object_page_grab_pmapped = 0;
+static uint32_t         vm_object_page_grab_reactivations = 0;
 
-#define VM_OBJECT_HASH_SHIFT   5
-#define vm_object_hash(pager) \
-       ((int)((((uintptr_t)pager) >> VM_OBJECT_HASH_SHIFT) % VM_OBJECT_HASH_COUNT))
+#define vm_object_cache_lock_spin()             \
+               lck_mtx_lock_spin(&vm_object_cached_lock_data)
+#define vm_object_cache_unlock()        \
+               lck_mtx_unlock(&vm_object_cached_lock_data)
 
-#define vm_object_lock_hash(pager) \
-       ((int)((((uintptr_t)pager) >> VM_OBJECT_HASH_SHIFT) % VM_OBJECT_HASH_LOCK_COUNT))
+static void     vm_object_cache_remove_locked(vm_object_t);
 
-void vm_object_hash_entry_free(
-       vm_object_hash_entry_t  entry);
 
 static void vm_object_reap(vm_object_t object);
 static void vm_object_reap_async(vm_object_t object);
 static void vm_object_reaper_thread(void);
 
-static lck_mtx_t       vm_object_reaper_lock_data;
-static lck_mtx_ext_t   vm_object_reaper_lock_data_ext;
+static lck_mtx_t        vm_object_reaper_lock_data;
+static lck_mtx_ext_t    vm_object_reaper_lock_data_ext;
 
 static queue_head_t vm_object_reaper_queue; /* protected by vm_object_reaper_lock() */
 unsigned int vm_object_reap_count = 0;
 unsigned int vm_object_reap_count_async = 0;
 
-#define vm_object_reaper_lock()                \
-               lck_mtx_lock(&vm_object_reaper_lock_data)
-#define vm_object_reaper_lock_spin()           \
-               lck_mtx_lock_spin(&vm_object_reaper_lock_data)
-#define vm_object_reaper_unlock()      \
-               lck_mtx_unlock(&vm_object_reaper_lock_data)
+#define vm_object_reaper_lock()         \
+               lck_mtx_lock(&vm_object_reaper_lock_data)
+#define vm_object_reaper_lock_spin()            \
+               lck_mtx_lock_spin(&vm_object_reaper_lock_data)
+#define vm_object_reaper_unlock()       \
+               lck_mtx_unlock(&vm_object_reaper_lock_data)
 
 #if CONFIG_IOSCHED
 /* I/O Re-prioritization request list */
-queue_head_t   io_reprioritize_list;
-lck_spin_t     io_reprioritize_list_lock;
+queue_head_t    io_reprioritize_list;
+lck_spin_t      io_reprioritize_list_lock;
 
-#define IO_REPRIORITIZE_LIST_LOCK()    \
-               lck_spin_lock(&io_reprioritize_list_lock)
-#define IO_REPRIORITIZE_LIST_UNLOCK()  \
-               lck_spin_unlock(&io_reprioritize_list_lock)
+#define IO_REPRIORITIZE_LIST_LOCK()     \
+               lck_spin_lock_grp(&io_reprioritize_list_lock, &vm_object_lck_grp)
+#define IO_REPRIORITIZE_LIST_UNLOCK()   \
+               lck_spin_unlock(&io_reprioritize_list_lock)
 
-#define MAX_IO_REPRIORITIZE_REQS       8192
-zone_t                 io_reprioritize_req_zone;
+#define MAX_IO_REPRIORITIZE_REQS        8192
+zone_t          io_reprioritize_req_zone;
 
 /* I/O Re-prioritization thread */
 int io_reprioritize_wakeup = 0;
 static void io_reprioritize_thread(void *param __unused, wait_result_t wr __unused);
 
-#define IO_REPRIO_THREAD_WAKEUP()      thread_wakeup((event_t)&io_reprioritize_wakeup)
-#define IO_REPRIO_THREAD_CONTINUATION()                                \
-{                                                              \
-       assert_wait(&io_reprioritize_wakeup, THREAD_UNINT);     \
-       thread_block(io_reprioritize_thread);                   \
+#define IO_REPRIO_THREAD_WAKEUP()       thread_wakeup((event_t)&io_reprioritize_wakeup)
+#define IO_REPRIO_THREAD_CONTINUATION()                                 \
+{                                                               \
+       assert_wait(&io_reprioritize_wakeup, THREAD_UNINT);     \
+       thread_block(io_reprioritize_thread);                   \
 }
 
 void vm_page_request_reprioritize(vm_object_t, uint64_t, uint32_t, int);
@@ -416,109 +341,6 @@ void vm_decmp_upl_reprioritize(upl_t, int);
 #endif
 
 
-static lck_mtx_t *
-vm_object_hash_lock_spin(
-       memory_object_t pager)
-{
-       int     index;
-
-       index = vm_object_lock_hash(pager);
-
-       lck_mtx_lock_spin(&vm_object_hashed_lock_data[index]);
-
-       return (&vm_object_hashed_lock_data[index]);
-}
-
-static void
-vm_object_hash_unlock(lck_mtx_t *lck)
-{
-       lck_mtx_unlock(lck);
-}
-
-
-/*
- *     vm_object_hash_lookup looks up a pager in the hashtable
- *     and returns the corresponding entry, with optional removal.
- */
-static vm_object_hash_entry_t
-vm_object_hash_lookup(
-       memory_object_t pager,
-       boolean_t       remove_entry)
-{
-       queue_t                 bucket;
-       vm_object_hash_entry_t  entry;
-
-       bucket = &vm_object_hashtable[vm_object_hash(pager)];
-
-       entry = (vm_object_hash_entry_t)queue_first(bucket);
-       while (!queue_end(bucket, (queue_entry_t)entry)) {
-               if (entry->pager == pager) {
-                       if (remove_entry) {
-                               queue_remove(bucket, entry,
-                                            vm_object_hash_entry_t, hash_link);
-                       }
-                       return(entry);
-               }
-               entry = (vm_object_hash_entry_t)queue_next(&entry->hash_link);
-       }
-       return(VM_OBJECT_HASH_ENTRY_NULL);
-}
-
-/*
- *     vm_object_hash_enter enters the specified
- *     pager / cache object association in the hashtable.
- */
-
-static void
-vm_object_hash_insert(
-       vm_object_hash_entry_t  entry,
-       vm_object_t             object)
-{
-       queue_t         bucket;
-
-       assert(vm_object_hash_lookup(entry->pager, FALSE) == NULL);
-
-       bucket = &vm_object_hashtable[vm_object_hash(entry->pager)];
-
-       queue_enter(bucket, entry, vm_object_hash_entry_t, hash_link);
-
-       if (object->hashed) {
-               /*
-                * "hashed" was pre-set on this (new) object to avoid
-                * locking issues in vm_object_enter() (can't attempt to
-                * grab the object lock while holding the hash lock as
-                * a spinlock), so no need to set it here (and no need to
-                * hold the object's lock).
-                */
-       } else {
-               vm_object_lock_assert_exclusive(object);
-               object->hashed = TRUE;
-       }
-
-       entry->object = object;
-}
-
-static vm_object_hash_entry_t
-vm_object_hash_entry_alloc(
-       memory_object_t pager)
-{
-       vm_object_hash_entry_t  entry;
-
-       entry = (vm_object_hash_entry_t)zalloc(vm_object_hash_zone);
-       entry->pager = pager;
-       entry->object = VM_OBJECT_NULL;
-       entry->waiting = FALSE;
-
-       return(entry);
-}
-
-void
-vm_object_hash_entry_free(
-       vm_object_hash_entry_t  entry)
-{
-       zfree(vm_object_hash_zone, entry);
-}
-
 /*
  *     vm_object_allocate:
  *
@@ -527,16 +349,11 @@ vm_object_hash_entry_free(
 
 __private_extern__ void
 _vm_object_allocate(
-       vm_object_size_t        size,
-       vm_object_t             object)
+       vm_object_size_t        size,
+       vm_object_t             object)
 {
-       XPR(XPR_VM_OBJECT,
-               "vm_object_allocate, object 0x%X size 0x%X\n",
-               object, size, 0,0,0);
-
        *object = vm_object_template;
        vm_page_queue_init(&object->memq);
-       queue_init(&object->msr_q);
 #if UPL_DEBUG || CONFIG_IOSCHED
        queue_init(&object->uplq);
 #endif
@@ -545,42 +362,47 @@ _vm_object_allocate(
 
 #if VM_OBJECT_TRACKING_OP_CREATED
        if (vm_object_tracking_inited) {
-               void    *bt[VM_OBJECT_TRACKING_BTDEPTH];
-               int     numsaved = 0;
+               void    *bt[VM_OBJECT_TRACKING_BTDEPTH];
+               int     numsaved = 0;
 
                numsaved = OSBacktrace(bt, VM_OBJECT_TRACKING_BTDEPTH);
                btlog_add_entry(vm_object_tracking_btlog,
-                               object,
-                               VM_OBJECT_TRACKING_OP_CREATED,
-                               bt,
-                               numsaved);
+                   object,
+                   VM_OBJECT_TRACKING_OP_CREATED,
+                   bt,
+                   numsaved);
        }
 #endif /* VM_OBJECT_TRACKING_OP_CREATED */
 }
 
 __private_extern__ vm_object_t
 vm_object_allocate(
-       vm_object_size_t        size)
+       vm_object_size_t        size)
 {
        vm_object_t object;
 
        object = (vm_object_t) zalloc(vm_object_zone);
-       
+
 //     dbgLog(object, size, 0, 2);                     /* (TEST/DEBUG) */
 
-       if (object != VM_OBJECT_NULL)
+       if (object != VM_OBJECT_NULL) {
                _vm_object_allocate(size, object);
+       }
 
        return object;
 }
 
 
-lck_grp_t              vm_object_lck_grp;
-lck_grp_t              vm_object_cache_lck_grp;
-lck_grp_attr_t         vm_object_lck_grp_attr;
-lck_attr_t             vm_object_lck_attr;
-lck_attr_t             kernel_object_lck_attr;
-lck_attr_t             compressor_object_lck_attr;
+lck_grp_t               vm_object_lck_grp;
+lck_grp_t               vm_object_cache_lck_grp;
+lck_grp_attr_t          vm_object_lck_grp_attr;
+lck_attr_t              vm_object_lck_attr;
+lck_attr_t              kernel_object_lck_attr;
+lck_attr_t              compressor_object_lck_attr;
+
+extern void vm_named_entry_init(void);
+
+int workaround_41447923 = 0;
 
 /*
  *     vm_object_bootstrap:
@@ -590,50 +412,35 @@ lck_attr_t                compressor_object_lck_attr;
 __private_extern__ void
 vm_object_bootstrap(void)
 {
-       int     i;
-       vm_size_t       vm_object_size;
+       vm_size_t       vm_object_size;
+
+       assert(sizeof(mo_ipc_object_bits_t) == sizeof(ipc_object_bits_t));
 
-       vm_object_size = (sizeof(struct vm_object) + (VM_PACKED_POINTER_ALIGNMENT-1)) & ~(VM_PACKED_POINTER_ALIGNMENT - 1);
+       vm_object_size = (sizeof(struct vm_object) + (VM_PACKED_POINTER_ALIGNMENT - 1)) & ~(VM_PACKED_POINTER_ALIGNMENT - 1);
 
        vm_object_zone = zinit(vm_object_size,
-                              round_page(512*1024),
-                              round_page(12*1024),
-                              "vm objects");
+           round_page(512 * 1024),
+           round_page(12 * 1024),
+           "vm objects");
        zone_change(vm_object_zone, Z_CALLERACCT, FALSE); /* don't charge caller */
        zone_change(vm_object_zone, Z_NOENCRYPT, TRUE);
+       zone_change(vm_object_zone, Z_ALIGNMENT_REQUIRED, TRUE);
 
        vm_object_init_lck_grp();
 
        queue_init(&vm_object_cached_list);
 
        lck_mtx_init_ext(&vm_object_cached_lock_data,
-               &vm_object_cached_lock_data_ext,
-               &vm_object_cache_lck_grp,
-               &vm_object_lck_attr);
+           &vm_object_cached_lock_data_ext,
+           &vm_object_cache_lck_grp,
+           &vm_object_lck_attr);
 
        queue_init(&vm_object_reaper_queue);
 
-       for (i = 0; i < VM_OBJECT_HASH_LOCK_COUNT; i++) {
-               lck_mtx_init_ext(&vm_object_hashed_lock_data[i],
-                                &vm_object_hashed_lock_data_ext[i],
-                                &vm_object_lck_grp,
-                                &vm_object_lck_attr);
-       }
        lck_mtx_init_ext(&vm_object_reaper_lock_data,
-               &vm_object_reaper_lock_data_ext,
-               &vm_object_lck_grp,
-               &vm_object_lck_attr);
-
-       vm_object_hash_zone =
-                       zinit((vm_size_t) sizeof (struct vm_object_hash_entry),
-                             round_page(512*1024),
-                             round_page(12*1024),
-                             "vm object hash entries");
-       zone_change(vm_object_hash_zone, Z_CALLERACCT, FALSE);
-       zone_change(vm_object_hash_zone, Z_NOENCRYPT, TRUE);
-
-       for (i = 0; i < VM_OBJECT_HASH_COUNT; i++)
-               queue_init(&vm_object_hashtable[i]);
+           &vm_object_reaper_lock_data_ext,
+           &vm_object_lck_grp,
+           &vm_object_lck_attr);
 
 
        /*
@@ -641,7 +448,6 @@ vm_object_bootstrap(void)
         */
 
        /* memq; Lock; init after allocation */
-       
 
        vm_object_template.memq.prev = 0;
        vm_object_template.memq.next = 0;
@@ -661,9 +467,9 @@ vm_object_bootstrap(void)
        vm_object_template.vo_size = 0;
        vm_object_template.memq_hint = VM_PAGE_NULL;
        vm_object_template.ref_count = 1;
-#if    TASK_SWAPPER
+#if     TASK_SWAPPER
        vm_object_template.res_count = 1;
-#endif /* TASK_SWAPPER */
+#endif  /* TASK_SWAPPER */
        vm_object_template.resident_page_count = 0;
        vm_object_template.wired_page_count = 0;
        vm_object_template.reusable_page_count = 0;
@@ -688,14 +494,13 @@ vm_object_bootstrap(void)
        vm_object_template.pager_trusted = FALSE;
        vm_object_template.can_persist = FALSE;
        vm_object_template.internal = TRUE;
-       vm_object_template.temporary = TRUE;
        vm_object_template.private = FALSE;
        vm_object_template.pageout = FALSE;
        vm_object_template.alive = TRUE;
        vm_object_template.purgable = VM_PURGABLE_DENY;
        vm_object_template.purgeable_when_ripe = FALSE;
+       vm_object_template.purgeable_only_by_kernel = FALSE;
        vm_object_template.shadowed = FALSE;
-       vm_object_template.advisory_pageout = FALSE;
        vm_object_template.true_share = FALSE;
        vm_object_template.terminating = FALSE;
        vm_object_template.named = FALSE;
@@ -706,9 +511,7 @@ vm_object_bootstrap(void)
 
        vm_object_template.cached_list.prev = NULL;
        vm_object_template.cached_list.next = NULL;
-       vm_object_template.msr_q.prev = NULL;
-       vm_object_template.msr_q.next = NULL;
-       
+
        vm_object_template.last_alloc = (vm_object_offset_t) 0;
        vm_object_template.sequential = (vm_object_offset_t) 0;
        vm_object_template.pages_created = 0;
@@ -718,16 +521,12 @@ vm_object_bootstrap(void)
        vm_object_template.phantom_object_id = 0;
 #endif
        vm_object_template.cow_hint = ~(vm_offset_t)0;
-#if    MACH_ASSERT
-       vm_object_template.paging_object = VM_OBJECT_NULL;
-#endif /* MACH_ASSERT */
 
        /* cache bitfields */
        vm_object_template.wimg_bits = VM_WIMG_USE_DEFAULT;
        vm_object_template.set_cache_attr = FALSE;
-       vm_object_template.object_slid = FALSE;
+       vm_object_template.object_is_shared_cache = FALSE;
        vm_object_template.code_signed = FALSE;
-       vm_object_template.hashed = FALSE;
        vm_object_template.transposed = FALSE;
        vm_object_template.mapping_in_progress = FALSE;
        vm_object_template.phantom_isssd = FALSE;
@@ -735,18 +534,21 @@ vm_object_bootstrap(void)
        vm_object_template.volatile_fault = FALSE;
        vm_object_template.all_reusable = FALSE;
        vm_object_template.blocked_access = FALSE;
-       vm_object_template.__object2_unused_bits = 0;
+       vm_object_template.vo_ledger_tag = VM_LEDGER_TAG_NONE;
+       vm_object_template.vo_no_footprint = FALSE;
 #if CONFIG_IOSCHED || UPL_DEBUG
        vm_object_template.uplq.prev = NULL;
        vm_object_template.uplq.next = NULL;
 #endif /* UPL_DEBUG */
 #ifdef VM_PIP_DEBUG
        bzero(&vm_object_template.pip_holders,
-             sizeof (vm_object_template.pip_holders));
+           sizeof(vm_object_template.pip_holders));
 #endif /* VM_PIP_DEBUG */
 
        vm_object_template.objq.next = NULL;
        vm_object_template.objq.prev = NULL;
+       vm_object_template.task_objq.next = NULL;
+       vm_object_template.task_objq.prev = NULL;
 
        vm_object_template.purgeable_queue_type = PURGEABLE_Q_TYPE_MAX;
        vm_object_template.purgeable_queue_group = 0;
@@ -754,6 +556,10 @@ vm_object_bootstrap(void)
        vm_object_template.vo_cache_ts = 0;
 
        vm_object_template.wire_tag = VM_KERN_MEMORY_NONE;
+#if !VM_TAG_ACTIVE_UPDATE
+       vm_object_template.wired_objq.next = NULL;
+       vm_object_template.wired_objq.prev = NULL;
+#endif /* ! VM_TAG_ACTIVE_UPDATE */
 
        vm_object_template.io_tracking = FALSE;
 
@@ -763,13 +569,19 @@ vm_object_bootstrap(void)
 #else /* CONFIG_SECLUDED_MEMORY */
        vm_object_template.__object3_unused_bits = 0;
 #endif /* CONFIG_SECLUDED_MEMORY */
-       
+
+#if VM_OBJECT_ACCESS_TRACKING
+       vm_object_template.access_tracking = FALSE;
+       vm_object_template.access_tracking_reads = 0;
+       vm_object_template.access_tracking_writes = 0;
+#endif /* VM_OBJECT_ACCESS_TRACKING */
+
 #if DEBUG
        bzero(&vm_object_template.purgeable_owner_bt[0],
-             sizeof (vm_object_template.purgeable_owner_bt));
+           sizeof(vm_object_template.purgeable_owner_bt));
        vm_object_template.vo_purgeable_volatilizer = NULL;
        bzero(&vm_object_template.purgeable_volatilizer_bt[0],
-             sizeof (vm_object_template.purgeable_volatilizer_bt));
+           sizeof(vm_object_template.purgeable_volatilizer_bt));
 #endif /* DEBUG */
 
        /*
@@ -779,17 +591,18 @@ vm_object_bootstrap(void)
        kernel_object = &kernel_object_store;
 
 /*
- *     Note that in the following size specifications, we need to add 1 because 
+ *     Note that in the following size specifications, we need to add 1 because
  *     VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
  */
 
        _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1,
-                           kernel_object);
+           kernel_object);
 
        _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1,
-                           compressor_object);
+           compressor_object);
        kernel_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
        compressor_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
+       kernel_object->no_tag_update = TRUE;
 
        /*
         *      Initialize the "submap object".  Make it as large as the
@@ -798,7 +611,7 @@ vm_object_bootstrap(void)
 
        vm_submap_object = &vm_submap_object_store;
        _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1,
-                           vm_submap_object);
+           vm_submap_object);
        vm_submap_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
 
        /*
@@ -807,46 +620,54 @@ vm_object_bootstrap(void)
         * non-zone memory.
         */
        vm_object_reference(vm_submap_object);
+
+       vm_named_entry_init();
+
+       PE_parse_boot_argn("workaround_41447923", &workaround_41447923,
+           sizeof(workaround_41447923));
 }
 
 #if CONFIG_IOSCHED
 void
 vm_io_reprioritize_init(void)
 {
-       kern_return_t   result;
-       thread_t        thread = THREAD_NULL;
+       kern_return_t   result;
+       thread_t        thread = THREAD_NULL;
 
        /* Initialze the I/O reprioritization subsystem */
-        lck_spin_init(&io_reprioritize_list_lock, &vm_object_lck_grp, &vm_object_lck_attr);
-        queue_init(&io_reprioritize_list);
+       lck_spin_init(&io_reprioritize_list_lock, &vm_object_lck_grp, &vm_object_lck_attr);
+       queue_init(&io_reprioritize_list);
 
        io_reprioritize_req_zone = zinit(sizeof(struct io_reprioritize_req),
-                                        MAX_IO_REPRIORITIZE_REQS * sizeof(struct io_reprioritize_req),
-                                        4096, "io_reprioritize_req");  
+           MAX_IO_REPRIORITIZE_REQS * sizeof(struct io_reprioritize_req),
+           4096, "io_reprioritize_req");
+       zone_change(io_reprioritize_req_zone, Z_COLLECT, FALSE);
 
        result = kernel_thread_start_priority(io_reprioritize_thread, NULL, 95 /* MAXPRI_KERNEL */, &thread);
-        if (result == KERN_SUCCESS) {
-                thread_deallocate(thread);
-        } else {
-                panic("Could not create io_reprioritize_thread");
-        }
+       if (result == KERN_SUCCESS) {
+               thread_set_thread_name(thread, "VM_io_reprioritize_thread");
+               thread_deallocate(thread);
+       } else {
+               panic("Could not create io_reprioritize_thread");
+       }
 }
 #endif
 
 void
 vm_object_reaper_init(void)
 {
-       kern_return_t   kr;
-       thread_t        thread;
+       kern_return_t   kr;
+       thread_t        thread;
 
        kr = kernel_thread_start_priority(
                (thread_continue_t) vm_object_reaper_thread,
                NULL,
-               BASEPRI_PREEMPT - 1,
+               BASEPRI_VM,
                &thread);
        if (kr != KERN_SUCCESS) {
                panic("failed to launch vm_object_reaper_thread kr=0x%x", kr);
        }
+       thread_set_thread_name(thread, "VM_object_reaper_thread");
        thread_deallocate(thread);
 }
 
@@ -875,12 +696,6 @@ vm_object_init_lck_grp(void)
        lck_attr_cleardebug(&compressor_object_lck_attr);
 }
 
-#if VM_OBJECT_CACHE
-#define        MIGHT_NOT_CACHE_SHADOWS         1
-#if    MIGHT_NOT_CACHE_SHADOWS
-static int cache_shadows = TRUE;
-#endif /* MIGHT_NOT_CACHE_SHADOWS */
-#endif
 
 /*
  *     vm_object_deallocate:
@@ -899,19 +714,16 @@ unsigned long vm_object_deallocate_shared_swap_failures = 0;
 
 __private_extern__ void
 vm_object_deallocate(
-       vm_object_t     object)
+       vm_object_t     object)
 {
-#if VM_OBJECT_CACHE
-       boolean_t       retry_cache_trim = FALSE;
-       uint32_t        try_failed_count = 0;
-#endif
-       vm_object_t     shadow = VM_OBJECT_NULL;
-       
+       vm_object_t     shadow = VM_OBJECT_NULL;
+
 //     if(object)dbgLog(object, object->ref_count, object->can_persist, 3);    /* (TEST/DEBUG) */
 //     else dbgLog(object, 0, 0, 3);   /* (TEST/DEBUG) */
 
-       if (object == VM_OBJECT_NULL)
-               return;
+       if (object == VM_OBJECT_NULL) {
+               return;
+       }
 
        if (object == kernel_object || object == compressor_object) {
                vm_object_lock_shared(object);
@@ -919,10 +731,11 @@ vm_object_deallocate(
                OSAddAtomic(-1, &object->ref_count);
 
                if (object->ref_count == 0) {
-                       if (object == kernel_object)
+                       if (object == kernel_object) {
                                panic("vm_object_deallocate: losing kernel_object\n");
-                       else
+                       } else {
                                panic("vm_object_deallocate: losing compressor_object\n");
+                       }
                }
                vm_object_unlock(object);
                return;
@@ -936,18 +749,18 @@ vm_object_deallocate(
                 * we'll need to call memory_object_last_unmap().
                 */
        } else if (object->ref_count == 2 &&
-                  object->internal &&
-                  object->shadow != VM_OBJECT_NULL) {
+           object->internal &&
+           object->shadow != VM_OBJECT_NULL) {
                /*
                 * This internal object's reference count is about to
                 * drop from 2 to 1 and it has a shadow object:
                 * we'll want to try and collapse this object with its
                 * shadow.
                 */
-       } else if (object->ref_count >= 2) { 
-               UInt32          original_ref_count;
-               volatile UInt32 *ref_count_p;
-               Boolean         atomic_swap;
+       } else if (object->ref_count >= 2) {
+               UInt32          original_ref_count;
+               volatile UInt32 *ref_count_p;
+               Boolean         atomic_swap;
 
                /*
                 * The object currently looks like it is not being
@@ -958,7 +771,7 @@ vm_object_deallocate(
                 * Lock the object "shared" to make sure we don't race with
                 * anyone holding it "exclusive".
                 */
-               vm_object_lock_shared(object);
+               vm_object_lock_shared(object);
                ref_count_p = (volatile UInt32 *) &object->ref_count;
                original_ref_count = object->ref_count;
                /*
@@ -970,11 +783,11 @@ vm_object_deallocate(
                        /* need to take slow path for m_o_last_unmap() */
                        atomic_swap = FALSE;
                } else if (original_ref_count == 2 &&
-                          object->internal &&
-                          object->shadow != VM_OBJECT_NULL) {
+                   object->internal &&
+                   object->shadow != VM_OBJECT_NULL) {
                        /* need to take slow path for vm_object_collapse() */
                        atomic_swap = FALSE;
-               } else if (original_ref_count < 2) { 
+               } else if (original_ref_count < 2) {
                        /* need to take slow path for vm_object_terminate() */
                        atomic_swap = FALSE;
                } else {
@@ -988,7 +801,7 @@ vm_object_deallocate(
                                /* fall back to the slow path... */
                        }
                }
-                       
+
                vm_object_unlock(object);
 
                if (atomic_swap) {
@@ -1008,7 +821,6 @@ vm_object_deallocate(
        }
 
        while (object != VM_OBJECT_NULL) {
-
                vm_object_lock(object);
 
                assert(object->ref_count > 0);
@@ -1018,8 +830,8 @@ vm_object_deallocate(
                 *      that reference would remain, inform the pager
                 *      about the last "mapping" reference going away.
                 */
-               if ((object->ref_count == 2)  && (object->named)) {
-                       memory_object_t pager = object->pager;
+               if ((object->ref_count == 2) && (object->named)) {
+                       memory_object_t pager = object->pager;
 
                        /* Notify the Pager that there are no */
                        /* more mappers for this object */
@@ -1074,14 +886,7 @@ vm_object_deallocate(
                                 */
                                vm_object_collapse(object, 0, FALSE);
                        }
-                       vm_object_unlock(object); 
-#if VM_OBJECT_CACHE
-                       if (retry_cache_trim &&
-                           ((object = vm_object_cache_trim(TRUE)) !=
-                            VM_OBJECT_NULL)) {
-                               continue;
-                       }
-#endif
+                       vm_object_unlock(object);
                        return;
                }
 
@@ -1089,183 +894,49 @@ vm_object_deallocate(
                 *      We have to wait for initialization
                 *      before destroying or caching the object.
                 */
-               
-               if (object->pager_created && ! object->pager_initialized) {
-                       assert(! object->can_persist);
+
+               if (object->pager_created && !object->pager_initialized) {
+                       assert(!object->can_persist);
                        vm_object_assert_wait(object,
-                                             VM_OBJECT_EVENT_INITIALIZED,
-                                             THREAD_UNINT);
+                           VM_OBJECT_EVENT_INITIALIZED,
+                           THREAD_UNINT);
                        vm_object_unlock(object);
 
                        thread_block(THREAD_CONTINUE_NULL);
                        continue;
                }
 
-#if VM_OBJECT_CACHE
+               VM_OBJ_RES_DECR(object);        /* XXX ? */
                /*
-                *      If this object can persist, then enter it in
-                *      the cache. Otherwise, terminate it.
-                *
-                *      NOTE:  Only permanent objects are cached, and
-                *      permanent objects cannot have shadows.  This
-                *      affects the residence counting logic in a minor
-                *      way (can do it in-line, mostly).
+                *      Terminate this object. If it had a shadow,
+                *      then deallocate it; otherwise, if we need
+                *      to retry a cache trim, do so now; otherwise,
+                *      we are done. "pageout" objects have a shadow,
+                *      but maintain a "paging reference" rather than
+                *      a normal reference.
                 */
+               shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
 
-               if ((object->can_persist) && (object->alive)) {
-                       /*
-                        *      Now it is safe to decrement reference count,
-                        *      and to return if reference count is > 0.
-                        */
-
-                       vm_object_lock_assert_exclusive(object);
-                       if (--object->ref_count > 0) {
-                               vm_object_res_deallocate(object);
-                               vm_object_unlock(object);
-
-                               if (retry_cache_trim &&
-                                   ((object = vm_object_cache_trim(TRUE)) !=
-                                    VM_OBJECT_NULL)) {
-                                       continue;
-                               }
-                               return;
-                       }
-
-#if    MIGHT_NOT_CACHE_SHADOWS
-                       /*
-                        *      Remove shadow now if we don't
-                        *      want to cache shadows.
-                        */
-                       if (! cache_shadows) {
-                               shadow = object->shadow;
-                               object->shadow = VM_OBJECT_NULL;
-                       }
-#endif /* MIGHT_NOT_CACHE_SHADOWS */
-
-                       /*
-                        *      Enter the object onto the queue of
-                        *      cached objects, and deactivate
-                        *      all of its pages.
-                        */
-                       assert(object->shadow == VM_OBJECT_NULL);
-                       VM_OBJ_RES_DECR(object);
-                       XPR(XPR_VM_OBJECT,
-                     "vm_o_deallocate: adding %x to cache, queue = (%x, %x)\n",
-                               object,
-                               vm_object_cached_list.next,
-                               vm_object_cached_list.prev,0,0);
-
-
-                       vm_object_unlock(object);
-
-                       try_failed_count = 0;
-                       for (;;) {
-                               vm_object_cache_lock();
-
-                               /*
-                                * if we try to take a regular lock here
-                                * we risk deadlocking against someone
-                                * holding a lock on this object while
-                                * trying to vm_object_deallocate a different
-                                * object
-                                */
-                               if (vm_object_lock_try(object))
-                                       break;
-                               vm_object_cache_unlock();
-                               try_failed_count++;
-
-                               mutex_pause(try_failed_count);  /* wait a bit */
-                       }
-                       vm_object_cached_count++;
-                       if (vm_object_cached_count > vm_object_cached_high)
-                               vm_object_cached_high = vm_object_cached_count;
-                       queue_enter(&vm_object_cached_list, object,
-                               vm_object_t, cached_list);
-                       vm_object_cache_unlock();
-
-                       vm_object_deactivate_all_pages(object);
-                       vm_object_unlock(object);
-
-#if    MIGHT_NOT_CACHE_SHADOWS
-                       /*
-                        *      If we have a shadow that we need
-                        *      to deallocate, do so now, remembering
-                        *      to trim the cache later.
-                        */
-                       if (! cache_shadows && shadow != VM_OBJECT_NULL) {
-                               object = shadow;
-                               retry_cache_trim = TRUE;
-                               continue;
-                       }
-#endif /* MIGHT_NOT_CACHE_SHADOWS */
-
-                       /*
-                        *      Trim the cache. If the cache trim
-                        *      returns with a shadow for us to deallocate,
-                        *      then remember to retry the cache trim
-                        *      when we are done deallocating the shadow.
-                        *      Otherwise, we are done.
-                        */
-
-                       object = vm_object_cache_trim(TRUE);
-                       if (object == VM_OBJECT_NULL) {
-                               return;
-                       }
-                       retry_cache_trim = TRUE;
-               } else
-#endif /* VM_OBJECT_CACHE */
-               {
-                       /*
-                        *      This object is not cachable; terminate it.
-                        */
-                       XPR(XPR_VM_OBJECT,
-        "vm_o_deallocate: !cacheable 0x%X res %d paging_ops %d thread 0x%p ref %d\n",
-                           object, object->resident_page_count,
-                           object->paging_in_progress,
-                           (void *)current_thread(),object->ref_count);
-
-                       VM_OBJ_RES_DECR(object);        /* XXX ? */
-                       /*
-                        *      Terminate this object. If it had a shadow,
-                        *      then deallocate it; otherwise, if we need
-                        *      to retry a cache trim, do so now; otherwise,
-                        *      we are done. "pageout" objects have a shadow,
-                        *      but maintain a "paging reference" rather than
-                        *      a normal reference.
-                        */
-                       shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
-
-                       if (vm_object_terminate(object) != KERN_SUCCESS) {
-                               return;
-                       }
-                       if (shadow != VM_OBJECT_NULL) {
-                               object = shadow;
-                               continue;
-                       }
-#if VM_OBJECT_CACHE
-                       if (retry_cache_trim &&
-                           ((object = vm_object_cache_trim(TRUE)) !=
-                            VM_OBJECT_NULL)) {
-                               continue;
-                       }
-#endif
+               if (vm_object_terminate(object) != KERN_SUCCESS) {
                        return;
                }
+               if (shadow != VM_OBJECT_NULL) {
+                       object = shadow;
+                       continue;
+               }
+               return;
        }
-#if VM_OBJECT_CACHE
-       assert(! retry_cache_trim);
-#endif
 }
 
 
 
 vm_page_t
 vm_object_page_grab(
-       vm_object_t     object)
+       vm_object_t     object)
 {
-       vm_page_t       p, next_p;
-       int             p_limit = 0;
-       int             p_skipped = 0;
+       vm_page_t       p, next_p;
+       int             p_limit = 0;
+       int             p_skipped = 0;
 
        vm_object_lock_assert_exclusive(object);
 
@@ -1273,46 +944,47 @@ vm_object_page_grab(
        p_limit = MIN(50, object->resident_page_count);
 
        while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && --p_limit > 0) {
-
                p = next_p;
-               next_p = (vm_page_t)vm_page_queue_next(&next_p->listq);
+               next_p = (vm_page_t)vm_page_queue_next(&next_p->vmp_listq);
 
-               if (VM_PAGE_WIRED(p) || p->busy || p->cleaning || p->laundry || p->fictitious)
+               if (VM_PAGE_WIRED(p) || p->vmp_busy || p->vmp_cleaning || p->vmp_laundry || p->vmp_fictitious) {
                        goto move_page_in_obj;
+               }
 
-               if (p->pmapped || p->dirty || p->precious) {
+               if (p->vmp_pmapped || p->vmp_dirty || p->vmp_precious) {
                        vm_page_lockspin_queues();
 
-                       if (p->pmapped) {
+                       if (p->vmp_pmapped) {
                                int refmod_state;
 
                                vm_object_page_grab_pmapped++;
 
-                               if (p->reference == FALSE || p->dirty == FALSE) {
-
+                               if (p->vmp_reference == FALSE || p->vmp_dirty == FALSE) {
                                        refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(p));
 
-                                       if (refmod_state & VM_MEM_REFERENCED)
-                                               p->reference = TRUE;
+                                       if (refmod_state & VM_MEM_REFERENCED) {
+                                               p->vmp_reference = TRUE;
+                                       }
                                        if (refmod_state & VM_MEM_MODIFIED) {
                                                SET_PAGE_DIRTY(p, FALSE);
                                        }
                                }
-                               if (p->dirty == FALSE && p->precious == FALSE) {
-
+                               if (p->vmp_dirty == FALSE && p->vmp_precious == FALSE) {
                                        refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
 
-                                       if (refmod_state & VM_MEM_REFERENCED)
-                                               p->reference = TRUE;
+                                       if (refmod_state & VM_MEM_REFERENCED) {
+                                               p->vmp_reference = TRUE;
+                                       }
                                        if (refmod_state & VM_MEM_MODIFIED) {
                                                SET_PAGE_DIRTY(p, FALSE);
                                        }
 
-                                       if (p->dirty == FALSE)
+                                       if (p->vmp_dirty == FALSE) {
                                                goto take_page;
+                                       }
                                }
                        }
-                       if ((p->vm_page_q_state != VM_PAGE_ON_ACTIVE_Q) && p->reference == TRUE) {
+                       if ((p->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) && p->vmp_reference == TRUE) {
                                vm_page_activate(p);
 
                                VM_STAT_INCR(reactivations);
@@ -1320,8 +992,8 @@ vm_object_page_grab(
                        }
                        vm_page_unlock_queues();
 move_page_in_obj:
-                       vm_page_queue_remove(&object->memq, p, vm_page_t, listq);
-                       vm_page_queue_enter(&object->memq, p, vm_page_t, listq);
+                       vm_page_queue_remove(&object->memq, p, vmp_listq);
+                       vm_page_queue_enter(&object->memq, p, vmp_listq);
 
                        p_skipped++;
                        continue;
@@ -1335,66 +1007,68 @@ take_page:
                vm_page_unlock_queues();
 
                vm_page_free_prepare_object(p, TRUE);
-               
-               return (p);
+
+               return p;
        }
        vm_object_page_grab_skipped += p_skipped;
        vm_object_page_grab_failed++;
 
-       return (NULL);
+       return NULL;
 }
 
 
 
-#define EVICT_PREPARE_LIMIT    64
-#define EVICT_AGE              10
+#define EVICT_PREPARE_LIMIT     64
+#define EVICT_AGE               10
 
-static clock_sec_t     vm_object_cache_aging_ts = 0;
+static  clock_sec_t     vm_object_cache_aging_ts = 0;
 
 static void
 vm_object_cache_remove_locked(
-       vm_object_t     object)
+       vm_object_t     object)
 {
        assert(object->purgable == VM_PURGABLE_DENY);
-       assert(object->wired_page_count == 0);
 
-       queue_remove(&vm_object_cached_list, object, vm_object_t, objq);
-       object->objq.next = NULL;
-       object->objq.prev = NULL;
+       queue_remove(&vm_object_cached_list, object, vm_object_t, cached_list);
+       object->cached_list.next = NULL;
+       object->cached_list.prev = NULL;
 
        vm_object_cached_count--;
 }
 
 void
 vm_object_cache_remove(
-       vm_object_t     object)
+       vm_object_t     object)
 {
        vm_object_cache_lock_spin();
 
-       if (object->objq.next || object->objq.prev)
+       if (object->cached_list.next &&
+           object->cached_list.prev) {
                vm_object_cache_remove_locked(object);
+       }
 
        vm_object_cache_unlock();
 }
 
 void
 vm_object_cache_add(
-       vm_object_t     object)
+       vm_object_t     object)
 {
        clock_sec_t sec;
        clock_nsec_t nsec;
 
        assert(object->purgable == VM_PURGABLE_DENY);
-       assert(object->wired_page_count == 0);
 
-       if (object->resident_page_count == 0)
+       if (object->resident_page_count == 0) {
                return;
+       }
        clock_get_system_nanotime(&sec, &nsec);
 
        vm_object_cache_lock_spin();
 
-       if (object->objq.next == NULL && object->objq.prev == NULL) {
-               queue_enter(&vm_object_cached_list, object, vm_object_t, objq);
+       if (object->cached_list.next == NULL &&
+           object->cached_list.prev == NULL) {
+               queue_enter(&vm_object_cached_list, object, vm_object_t, cached_list);
                object->vo_cache_ts = sec + EVICT_AGE;
                object->vo_cache_pages_to_scan = object->resident_page_count;
 
@@ -1406,33 +1080,33 @@ vm_object_cache_add(
 
 int
 vm_object_cache_evict(
-       int     num_to_evict,
-       int     max_objects_to_examine)
+       int     num_to_evict,
+       int     max_objects_to_examine)
 {
-       vm_object_t     object = VM_OBJECT_NULL;
-       vm_object_t     next_obj = VM_OBJECT_NULL;
-       vm_page_t       local_free_q = VM_PAGE_NULL;
-       vm_page_t       p;
-       vm_page_t       next_p;
-       int             object_cnt = 0;
-       vm_page_t       ep_array[EVICT_PREPARE_LIMIT];
-       int             ep_count;
-       int             ep_limit;
-       int             ep_index;
-       int             ep_freed = 0;
-       int             ep_moved = 0;
-       uint32_t        ep_skipped = 0;
-       clock_sec_t     sec;
-       clock_nsec_t    nsec;
+       vm_object_t     object = VM_OBJECT_NULL;
+       vm_object_t     next_obj = VM_OBJECT_NULL;
+       vm_page_t       local_free_q = VM_PAGE_NULL;
+       vm_page_t       p;
+       vm_page_t       next_p;
+       int             object_cnt = 0;
+       vm_page_t       ep_array[EVICT_PREPARE_LIMIT];
+       int             ep_count;
+       int             ep_limit;
+       int             ep_index;
+       int             ep_freed = 0;
+       int             ep_moved = 0;
+       uint32_t        ep_skipped = 0;
+       clock_sec_t     sec;
+       clock_nsec_t    nsec;
 
        KERNEL_DEBUG(0x13001ec | DBG_FUNC_START, 0, 0, 0, 0, 0);
        /*
-        * do a couple of quick checks to see if it's 
+        * do a couple of quick checks to see if it's
         * worthwhile grabbing the lock
         */
        if (queue_empty(&vm_object_cached_list)) {
                KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0);
-               return (0);
+               return 0;
        }
        clock_get_system_nanotime(&sec, &nsec);
 
@@ -1442,10 +1116,10 @@ vm_object_cache_evict(
         */
        if (sec < vm_object_cache_aging_ts) {
                KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0);
-               return (0);
+               return 0;
        }
        /*
-        * don't need the queue lock to find 
+        * don't need the queue lock to find
         * and lock an object on the cached list
         */
        vm_page_unlock_queues();
@@ -1456,13 +1130,11 @@ vm_object_cache_evict(
                next_obj = (vm_object_t)queue_first(&vm_object_cached_list);
 
                while (!queue_end(&vm_object_cached_list, (queue_entry_t)next_obj) && object_cnt++ < max_objects_to_examine) {
-
                        object = next_obj;
-                       next_obj = (vm_object_t)queue_next(&next_obj->objq);
+                       next_obj = (vm_object_t)queue_next(&next_obj->cached_list);
 
                        assert(object->purgable == VM_PURGABLE_DENY);
-                       assert(object->wired_page_count == 0);
-                       
+
                        if (sec < object->vo_cache_ts) {
                                KERNEL_DEBUG(0x130020c, object, object->resident_page_count, object->vo_cache_ts, sec, 0);
 
@@ -1488,7 +1160,7 @@ vm_object_cache_evict(
                                 * the list, we'll never move past it.
                                 */
                                KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0);
-                               
+
                                vm_object_cache_remove_locked(object);
                                vm_object_unlock(object);
                                object = VM_OBJECT_NULL;
@@ -1502,8 +1174,9 @@ vm_object_cache_evict(
                }
                vm_object_cache_unlock();
 
-               if (object == VM_OBJECT_NULL)
+               if (object == VM_OBJECT_NULL) {
                        break;
+               }
 
                /*
                 * object is locked at this point and
@@ -1519,27 +1192,27 @@ vm_object_cache_evict(
                 * tenfold...  and we may have a 'run' of pages we can't utilize that
                 * needs to be skipped over...
                 */
-               if ((ep_limit = num_to_evict - (ep_freed + ep_moved)) > EVICT_PREPARE_LIMIT)
+               if ((ep_limit = num_to_evict - (ep_freed + ep_moved)) > EVICT_PREPARE_LIMIT) {
                        ep_limit = EVICT_PREPARE_LIMIT;
+               }
                ep_count = 0;
 
                while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && object->vo_cache_pages_to_scan && ep_count < ep_limit) {
-
                        p = next_p;
-                       next_p = (vm_page_t)vm_page_queue_next(&next_p->listq);
+                       next_p = (vm_page_t)vm_page_queue_next(&next_p->vmp_listq);
 
                        object->vo_cache_pages_to_scan--;
 
-                       if (VM_PAGE_WIRED(p) || p->busy || p->cleaning || p->laundry) {
-                               vm_page_queue_remove(&object->memq, p, vm_page_t, listq);
-                               vm_page_queue_enter(&object->memq, p, vm_page_t, listq);
+                       if (VM_PAGE_WIRED(p) || p->vmp_busy || p->vmp_cleaning || p->vmp_laundry) {
+                               vm_page_queue_remove(&object->memq, p, vmp_listq);
+                               vm_page_queue_enter(&object->memq, p, vmp_listq);
 
                                ep_skipped++;
                                continue;
                        }
-                       if (p->wpmapped || p->dirty || p->precious) {
-                               vm_page_queue_remove(&object->memq, p, vm_page_t, listq);
-                               vm_page_queue_enter(&object->memq, p, vm_page_t, listq);
+                       if (p->vmp_wpmapped || p->vmp_dirty || p->vmp_precious) {
+                               vm_page_queue_remove(&object->memq, p, vmp_listq);
+                               vm_page_queue_enter(&object->memq, p, vmp_listq);
 
                                pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(p));
                        }
@@ -1550,12 +1223,11 @@ vm_object_cache_evict(
                vm_page_lockspin_queues();
 
                for (ep_index = 0; ep_index < ep_count; ep_index++) {
-
                        p = ep_array[ep_index];
 
-                       if (p->wpmapped || p->dirty || p->precious) {
-                               p->reference = FALSE;
-                               p->no_cache = FALSE;
+                       if (p->vmp_wpmapped || p->vmp_dirty || p->vmp_precious) {
+                               p->vmp_reference = FALSE;
+                               p->vmp_no_cache = FALSE;
 
                                /*
                                 * we've already filtered out pages that are in the laundry
@@ -1571,12 +1243,12 @@ vm_object_cache_evict(
 #endif
                                vm_page_free_prepare_queues(p);
 
-                               assert(p->pageq.next == 0 && p->pageq.prev == 0);
+                               assert(p->vmp_pageq.next == 0 && p->vmp_pageq.prev == 0);
                                /*
                                 * Add this page to our list of reclaimed pages,
                                 * to be freed later.
                                 */
-                               p->snext = local_free_q;
+                               p->vmp_snext = local_free_q;
                                local_free_q = p;
 
                                ep_freed++;
@@ -1617,7 +1289,7 @@ vm_object_cache_evict(
        }
        /*
         * put the page queues lock back to the caller's
-        * idea of it 
+        * idea of it
         */
        vm_page_lock_queues();
 
@@ -1626,101 +1298,8 @@ vm_object_cache_evict(
        vm_object_cache_pages_skipped += ep_skipped;
 
        KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, ep_freed, 0, 0, 0, 0);
-       return (ep_freed);
-}
-
-
-#if VM_OBJECT_CACHE
-/*
- *     Check to see whether we really need to trim
- *     down the cache. If so, remove an object from
- *     the cache, terminate it, and repeat.
- *
- *     Called with, and returns with, cache lock unlocked.
- */
-vm_object_t
-vm_object_cache_trim(
-       boolean_t called_from_vm_object_deallocate)
-{
-       vm_object_t object = VM_OBJECT_NULL;
-       vm_object_t shadow;
-
-       for (;;) {
-
-               /*
-                *      If we no longer need to trim the cache,
-                *      then we are done.
-                */
-               if (vm_object_cached_count <= vm_object_cached_max)
-                       return VM_OBJECT_NULL;
-
-               vm_object_cache_lock();
-               if (vm_object_cached_count <= vm_object_cached_max) {
-                       vm_object_cache_unlock();
-                       return VM_OBJECT_NULL;
-               }
-
-               /*
-                *      We must trim down the cache, so remove
-                *      the first object in the cache.
-                */
-               XPR(XPR_VM_OBJECT,
-               "vm_object_cache_trim: removing from front of cache (%x, %x)\n",
-                       vm_object_cached_list.next,
-                       vm_object_cached_list.prev, 0, 0, 0);
-
-               object = (vm_object_t) queue_first(&vm_object_cached_list);
-               if(object == (vm_object_t) &vm_object_cached_list) {
-                       /* something's wrong with the calling parameter or */
-                       /* the value of vm_object_cached_count, just fix   */
-                       /* and return */
-                       if(vm_object_cached_max < 0)
-                               vm_object_cached_max = 0;
-                       vm_object_cached_count = 0;
-                       vm_object_cache_unlock();
-                       return VM_OBJECT_NULL;
-               }
-               vm_object_lock(object);
-               queue_remove(&vm_object_cached_list, object, vm_object_t,
-                            cached_list);
-               vm_object_cached_count--;
-
-               vm_object_cache_unlock();
-               /*
-                *      Since this object is in the cache, we know
-                *      that it is initialized and has no references.
-                *      Take a reference to avoid recursive deallocations.
-                */
-
-               assert(object->pager_initialized);
-               assert(object->ref_count == 0);
-               vm_object_lock_assert_exclusive(object);
-               object->ref_count++;
-
-               /*
-                *      Terminate the object.
-                *      If the object had a shadow, we let vm_object_deallocate
-                *      deallocate it. "pageout" objects have a shadow, but
-                *      maintain a "paging reference" rather than a normal
-                *      reference.
-                *      (We are careful here to limit recursion.)
-                */
-               shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
-
-               if(vm_object_terminate(object) != KERN_SUCCESS)
-                       continue;
-
-               if (shadow != VM_OBJECT_NULL) {
-                       if (called_from_vm_object_deallocate) {
-                               return shadow;
-                       } else {
-                               vm_object_deallocate(shadow);
-                       }
-               }
-       }
+       return ep_freed;
 }
-#endif
-
 
 /*
  *     Routine:        vm_object_terminate
@@ -1741,16 +1320,13 @@ vm_object_cache_trim(
  */
 static kern_return_t
 vm_object_terminate(
-       vm_object_t     object)
+       vm_object_t     object)
 {
-       vm_object_t     shadow_object;
-
-       XPR(XPR_VM_OBJECT, "vm_object_terminate, object 0x%X ref %d\n",
-               object, object->ref_count, 0, 0, 0);
+       vm_object_t     shadow_object;
 
        vm_object_lock_assert_exclusive(object);
 
-       if (!object->pageout && (!object->temporary || object->can_persist) &&
+       if (!object->pageout && (!object->internal && object->can_persist) &&
            (object->pager != NULL || object->shadow_severed)) {
                /*
                 * Clear pager_trusted bit so that the pages get yanked
@@ -1792,16 +1368,12 @@ vm_object_terminate(
        object->terminating = TRUE;
        object->alive = FALSE;
 
-       if ( !object->internal && (object->objq.next || object->objq.prev))
+       if (!object->internal &&
+           object->cached_list.next &&
+           object->cached_list.prev) {
                vm_object_cache_remove(object);
-
-       if (object->hashed) {
-               lck_mtx_t       *lck;
-
-               lck = vm_object_hash_lock_spin(object->pager);
-               vm_object_remove(object);
-               vm_object_hash_unlock(lck);
        }
+
        /*
         *      Detach the object from its shadow if we are the shadow's
         *      copy. The reference we hold on the shadow must be dropped
@@ -1810,8 +1382,9 @@ vm_object_terminate(
        if (((shadow_object = object->shadow) != VM_OBJECT_NULL) &&
            !(object->pageout)) {
                vm_object_lock(shadow_object);
-               if (shadow_object->copy == object)
+               if (shadow_object->copy == object) {
                        shadow_object->copy = VM_OBJECT_NULL;
+               }
                vm_object_unlock(shadow_object);
        }
 
@@ -1883,7 +1456,7 @@ void
 vm_object_reap(
        vm_object_t object)
 {
-       memory_object_t         pager;
+       memory_object_t         pager;
 
        vm_object_lock_assert_exclusive(object);
        assert(object->paging_in_progress == 0);
@@ -1897,32 +1470,45 @@ vm_object_reap(
         * from its pager, to properly account for compressed pages.
         */
        if (object->internal &&
-           object->purgable != VM_PURGABLE_DENY) {
-               vm_purgeable_accounting(object,
-                                       object->purgable,
-                                       TRUE); /* disown */
+           (object->purgable != VM_PURGABLE_DENY ||
+           object->vo_ledger_tag)) {
+               int ledger_flags;
+               kern_return_t kr;
+
+               ledger_flags = 0;
+               if (object->vo_no_footprint) {
+                       ledger_flags |= VM_LEDGER_FLAG_NO_FOOTPRINT;
+               }
+               assert(!object->alive);
+               assert(object->terminating);
+               kr = vm_object_ownership_change(object,
+                   object->vo_ledger_tag,   /* unchanged */
+                   NULL,                    /* no owner */
+                   ledger_flags,
+                   FALSE);                  /* task_objq not locked */
+               assert(kr == KERN_SUCCESS);
+               assert(object->vo_owner == NULL);
        }
 
        pager = object->pager;
        object->pager = MEMORY_OBJECT_NULL;
 
-       if (pager != MEMORY_OBJECT_NULL)
+       if (pager != MEMORY_OBJECT_NULL) {
                memory_object_control_disable(object->pager_control);
+       }
 
        object->ref_count--;
-#if    TASK_SWAPPER
+#if     TASK_SWAPPER
        assert(object->res_count == 0);
-#endif /* TASK_SWAPPER */
+#endif  /* TASK_SWAPPER */
 
-       assert (object->ref_count == 0);
+       assert(object->ref_count == 0);
 
        /*
         * remove from purgeable queue if it's on
         */
        if (object->internal) {
-               task_t owner;
-
-               owner = object->vo_purgeable_owner;
+               assert(VM_OBJECT_OWNER(object) == TASK_NULL);
 
                VM_OBJECT_UNWIRED(object);
 
@@ -1931,8 +1517,6 @@ vm_object_reap(
                } else if (object->purgable == VM_PURGABLE_VOLATILE) {
                        purgeable_q_t queue;
 
-                       assert(object->vo_purgeable_owner == NULL);
-
                        queue = vm_purgeable_object_remove(object);
                        assert(queue);
 
@@ -1943,60 +1527,69 @@ vm_object_reap(
                                 */
                                vm_page_lock_queues();
                                vm_purgeable_token_delete_first(queue);
-        
-                               assert(queue->debug_count_objects>=0);
+
+                               assert(queue->debug_count_objects >= 0);
                                vm_page_unlock_queues();
                        }
 
                        /*
                         * Update "vm_page_purgeable_count" in bulk and mark
-                        * object as VM_PURGABLE_EMPTY to avoid updating 
+                        * object as VM_PURGABLE_EMPTY to avoid updating
                         * "vm_page_purgeable_count" again in vm_page_remove()
                         * when reaping the pages.
                         */
                        unsigned int delta;
                        assert(object->resident_page_count >=
-                              object->wired_page_count);
+                           object->wired_page_count);
                        delta = (object->resident_page_count -
-                                object->wired_page_count);
+                           object->wired_page_count);
                        if (delta != 0) {
                                assert(vm_page_purgeable_count >= delta);
                                OSAddAtomic(-delta,
-                                           (SInt32 *)&vm_page_purgeable_count);
+                                   (SInt32 *)&vm_page_purgeable_count);
                        }
                        if (object->wired_page_count != 0) {
                                assert(vm_page_purgeable_wired_count >=
-                                      object->wired_page_count);
+                                   object->wired_page_count);
                                OSAddAtomic(-object->wired_page_count,
-                                           (SInt32 *)&vm_page_purgeable_wired_count);
+                                   (SInt32 *)&vm_page_purgeable_wired_count);
                        }
                        object->purgable = VM_PURGABLE_EMPTY;
-               }
-               else if (object->purgable == VM_PURGABLE_NONVOLATILE ||
-                        object->purgable == VM_PURGABLE_EMPTY) {
+               } else if (object->purgable == VM_PURGABLE_NONVOLATILE ||
+                   object->purgable == VM_PURGABLE_EMPTY) {
                        /* remove from nonvolatile queue */
-                       assert(object->vo_purgeable_owner == TASK_NULL);
                        vm_purgeable_nonvolatile_dequeue(object);
                } else {
                        panic("object %p in unexpected purgeable state 0x%x\n",
-                             object, object->purgable);
+                           object, object->purgable);
+               }
+               if (object->transposed &&
+                   object->cached_list.next != NULL &&
+                   object->cached_list.prev == NULL) {
+                       /*
+                        * object->cached_list.next "points" to the
+                        * object that was transposed with this object.
+                        */
+               } else {
+                       assert(object->cached_list.next == NULL);
                }
-               assert(object->objq.next == NULL);
-               assert(object->objq.prev == NULL);
+               assert(object->cached_list.prev == NULL);
        }
-    
-       /*
-        *      Clean or free the pages, as appropriate.
-        *      It is possible for us to find busy/absent pages,
-        *      if some faults on this object were aborted.
-        */
+
        if (object->pageout) {
+               /*
+                * free all remaining pages tabled on
+                * this object
+                * clean up it's shadow
+                */
                assert(object->shadow != VM_OBJECT_NULL);
 
                vm_pageout_object_terminate(object);
-
-       } else if (((object->temporary && !object->can_persist) || (pager == MEMORY_OBJECT_NULL))) {
-
+       } else if (object->resident_page_count) {
+               /*
+                * free all remaining pages tabled on
+                * this object
+                */
                vm_object_reap_pages(object, REAP_REAP);
        }
        assert(vm_page_queue_empty(&object->memq));
@@ -2011,7 +1604,7 @@ vm_object_reap(
         */
        if (pager != MEMORY_OBJECT_NULL) {
                vm_object_unlock(object);
-               vm_object_release_pager(pager, object->hashed);
+               vm_object_release_pager(pager);
                vm_object_lock(object);
        }
 
@@ -2026,7 +1619,7 @@ vm_object_reap(
 #if VM_OBJECT_TRACKING
        if (vm_object_tracking_inited) {
                btlog_remove_entries_for_element(vm_object_tracking_btlog,
-                                                object);
+                   object);
        }
 #endif /* VM_OBJECT_TRACKING */
 
@@ -2043,39 +1636,39 @@ unsigned int vm_max_batch = 256;
 
 #define V_O_R_MAX_BATCH 128
 
-#define BATCH_LIMIT(max)       (vm_max_batch >= max ? max : vm_max_batch)
-
-
-#define VM_OBJ_REAP_FREELIST(_local_free_q, do_disconnect)             \
-       MACRO_BEGIN                                                     \
-       if (_local_free_q) {                                            \
-               if (do_disconnect) {                                    \
-                       vm_page_t m;                                    \
-                       for (m = _local_free_q;                         \
-                            m != VM_PAGE_NULL;                         \
-                            m = m->snext) {            \
-                               if (m->pmapped) {                       \
-                                       pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); \
-                               }                                       \
-                       }                                               \
-               }                                                       \
-               vm_page_free_list(_local_free_q, TRUE);                 \
-               _local_free_q = VM_PAGE_NULL;                           \
-       }                                                               \
+#define BATCH_LIMIT(max)        (vm_max_batch >= max ? max : vm_max_batch)
+
+
+#define VM_OBJ_REAP_FREELIST(_local_free_q, do_disconnect)              \
+       MACRO_BEGIN                                                     \
+       if (_local_free_q) {                                            \
+               if (do_disconnect) {                                    \
+                       vm_page_t m;                                    \
+                       for (m = _local_free_q;                         \
+                            m != VM_PAGE_NULL;                         \
+                            m = m->vmp_snext) {                        \
+                               if (m->vmp_pmapped) {                   \
+                                       pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); \
+                               }                                       \
+                       }                                               \
+               }                                                       \
+               vm_page_free_list(_local_free_q, TRUE);                 \
+               _local_free_q = VM_PAGE_NULL;                           \
+       }                                                               \
        MACRO_END
 
 
 void
 vm_object_reap_pages(
-       vm_object_t     object,
-       int             reap_type)
+       vm_object_t     object,
+       int             reap_type)
 {
-       vm_page_t       p;
-       vm_page_t       next;
-       vm_page_t       local_free_q = VM_PAGE_NULL;
-       int             loop_count;
-       boolean_t       disconnect_on_release;
-       pmap_flush_context      pmap_flush_context_storage;
+       vm_page_t       p;
+       vm_page_t       next;
+       vm_page_t       local_free_q = VM_PAGE_NULL;
+       int             loop_count;
+       boolean_t       disconnect_on_release;
+       pmap_flush_context      pmap_flush_context_storage;
 
        if (reap_type == REAP_DATA_FLUSH) {
                /*
@@ -2093,30 +1686,29 @@ vm_object_reap_pages(
                 */
                disconnect_on_release = FALSE;
        }
-               
+
 restart_after_sleep:
-       if (vm_page_queue_empty(&object->memq))
+       if (vm_page_queue_empty(&object->memq)) {
                return;
+       }
        loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
 
-       if (reap_type == REAP_PURGEABLE)
+       if (reap_type == REAP_PURGEABLE) {
                pmap_flush_context_init(&pmap_flush_context_storage);
+       }
 
        vm_page_lockspin_queues();
 
        next = (vm_page_t)vm_page_queue_first(&object->memq);
 
        while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) {
-
                p = next;
-               next = (vm_page_t)vm_page_queue_next(&next->listq);
+               next = (vm_page_t)vm_page_queue_next(&next->vmp_listq);
 
                if (--loop_count == 0) {
-                                       
                        vm_page_unlock_queues();
 
                        if (local_free_q) {
-
                                if (reap_type == REAP_PURGEABLE) {
                                        pmap_flush(&pmap_flush_context_storage);
                                        pmap_flush_context_init(&pmap_flush_context_storage);
@@ -2127,34 +1719,33 @@ restart_after_sleep:
                                 * hogging the page queue lock too long
                                 */
                                VM_OBJ_REAP_FREELIST(local_free_q,
-                                                    disconnect_on_release);
-                       } else
+                                   disconnect_on_release);
+                       } else {
                                mutex_pause(0);
+                       }
 
                        loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
 
                        vm_page_lockspin_queues();
                }
                if (reap_type == REAP_DATA_FLUSH || reap_type == REAP_TERMINATE) {
-
-                       if (p->busy || p->cleaning) {
-
+                       if (p->vmp_busy || p->vmp_cleaning) {
                                vm_page_unlock_queues();
                                /*
                                 * free the pages reclaimed so far
                                 */
                                VM_OBJ_REAP_FREELIST(local_free_q,
-                                                    disconnect_on_release);
+                                   disconnect_on_release);
 
                                PAGE_SLEEP(object, p, THREAD_UNINT);
 
                                goto restart_after_sleep;
                        }
-                       if (p->laundry)
+                       if (p->vmp_laundry) {
                                vm_pageout_steal_laundry(p, TRUE);
+                       }
                }
                switch (reap_type) {
-
                case REAP_DATA_FLUSH:
                        if (VM_PAGE_WIRED(p)) {
                                /*
@@ -2166,7 +1757,7 @@ restart_after_sleep:
                                continue;
                        }
                        break;
-                       
+
                case REAP_PURGEABLE:
                        if (VM_PAGE_WIRED(p)) {
                                /*
@@ -2175,10 +1766,11 @@ restart_after_sleep:
                                vm_page_purged_wired++;
                                continue;
                        }
-                       if (p->laundry && !p->busy && !p->cleaning)
+                       if (p->vmp_laundry && !p->vmp_busy && !p->vmp_cleaning) {
                                vm_pageout_steal_laundry(p, TRUE);
+                       }
 
-                       if (p->cleaning || p->laundry || p->absent) {
+                       if (p->vmp_cleaning || p->vmp_laundry || p->vmp_absent) {
                                /*
                                 * page is being acted upon,
                                 * so don't mess with it
@@ -2186,15 +1778,16 @@ restart_after_sleep:
                                vm_page_purged_others++;
                                continue;
                        }
-                       if (p->busy) {
+                       if (p->vmp_busy) {
                                /*
                                 * We can't reclaim a busy page but we can
                                 * make it more likely to be paged (it's not wired) to make
                                 * sure that it gets considered by
                                 * vm_pageout_scan() later.
                                 */
-                               if (VM_PAGE_PAGEABLE(p))
+                               if (VM_PAGE_PAGEABLE(p)) {
                                        vm_page_deactivate(p);
+                               }
                                vm_page_purged_busy++;
                                continue;
                        }
@@ -2204,7 +1797,7 @@ restart_after_sleep:
                        /*
                         * we can discard this page...
                         */
-                       if (p->pmapped == TRUE) {
+                       if (p->vmp_pmapped == TRUE) {
                                /*
                                 * unmap the page
                                 */
@@ -2215,7 +1808,7 @@ restart_after_sleep:
                        break;
 
                case REAP_TERMINATE:
-                       if (p->absent || p->private) {
+                       if (p->vmp_absent || p->vmp_private) {
                                /*
                                 *      For private pages, VM_PAGE_FREE just
                                 *      leaves the page structure around for
@@ -2225,33 +1818,33 @@ restart_after_sleep:
                                 */
                                break;
                        }
-                       if (p->fictitious) {
-                               assert (VM_PAGE_GET_PHYS_PAGE(p) == vm_page_guard_addr);
+                       if (p->vmp_fictitious) {
+                               assert(VM_PAGE_GET_PHYS_PAGE(p) == vm_page_guard_addr);
                                break;
                        }
-                       if (!p->dirty && p->wpmapped)
-                               p->dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p));
-
-                       if ((p->dirty || p->precious) && !p->error && object->alive) {
+                       if (!p->vmp_dirty && p->vmp_wpmapped) {
+                               p->vmp_dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p));
+                       }
 
+                       if ((p->vmp_dirty || p->vmp_precious) && !p->vmp_error && object->alive) {
                                assert(!object->internal);
-                               
-                               p->free_when_done = TRUE;
 
-                               if (!p->laundry) {
+                               p->vmp_free_when_done = TRUE;
+
+                               if (!p->vmp_laundry) {
                                        vm_page_queues_remove(p, TRUE);
                                        /*
                                         * flush page... page will be freed
                                         * upon completion of I/O
                                         */
-                                       (void)vm_pageout_cluster(p, FALSE, FALSE);
+                                       vm_pageout_cluster(p);
                                }
                                vm_page_unlock_queues();
                                /*
                                 * free the pages reclaimed so far
                                 */
                                VM_OBJ_REAP_FREELIST(local_free_q,
-                                                    disconnect_on_release);
+                                   disconnect_on_release);
 
                                vm_object_paging_wait(object, THREAD_UNINT);
 
@@ -2263,12 +1856,12 @@ restart_after_sleep:
                        break;
                }
                vm_page_free_prepare_queues(p);
-               assert(p->pageq.next == 0 && p->pageq.prev == 0);
+               assert(p->vmp_pageq.next == 0 && p->vmp_pageq.prev == 0);
                /*
                 * Add this page to our list of reclaimed pages,
                 * to be freed later.
                 */
-               p->snext = local_free_q;
+               p->vmp_snext = local_free_q;
                local_free_q = p;
        }
        vm_page_unlock_queues();
@@ -2276,17 +1869,18 @@ restart_after_sleep:
        /*
         * Free the remaining reclaimed pages
         */
-       if (reap_type == REAP_PURGEABLE)
+       if (reap_type == REAP_PURGEABLE) {
                pmap_flush(&pmap_flush_context_storage);
+       }
 
        VM_OBJ_REAP_FREELIST(local_free_q,
-                            disconnect_on_release);
+           disconnect_on_release);
 }
 
 
 void
 vm_object_reap_async(
-       vm_object_t     object)
+       vm_object_t     object)
 {
        vm_object_lock_assert_exclusive(object);
 
@@ -2296,7 +1890,7 @@ vm_object_reap_async(
 
        /* enqueue the VM object... */
        queue_enter(&vm_object_reaper_queue, object,
-                   vm_object_t, cached_list);
+           vm_object_t, cached_list);
 
        vm_object_reaper_unlock();
 
@@ -2308,22 +1902,22 @@ vm_object_reap_async(
 void
 vm_object_reaper_thread(void)
 {
-       vm_object_t     object, shadow_object;
+       vm_object_t     object, shadow_object;
 
        vm_object_reaper_lock_spin();
 
        while (!queue_empty(&vm_object_reaper_queue)) {
                queue_remove_first(&vm_object_reaper_queue,
-                                  object,
-                                  vm_object_t,
-                                  cached_list);
+                   object,
+                   vm_object_t,
+                   cached_list);
 
                vm_object_reaper_unlock();
                vm_object_lock(object);
 
                assert(object->terminating);
                assert(!object->alive);
-               
+
                /*
                 * The pageout daemon might be playing with our pages.
                 * Now that the object is dead, it won't touch any more
@@ -2333,15 +1927,15 @@ vm_object_reaper_thread(void)
                 * itself.
                 */
                while (object->paging_in_progress != 0 ||
-                       object->activity_in_progress != 0) {
+                   object->activity_in_progress != 0) {
                        vm_object_wait(object,
-                                      VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
-                                      THREAD_UNINT);
+                           VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
+                           THREAD_UNINT);
                        vm_object_lock(object);
                }
 
                shadow_object =
-                       object->pageout ? VM_OBJECT_NULL : object->shadow;
+                   object->pageout ? VM_OBJECT_NULL : object->shadow;
 
                vm_object_reap(object);
                /* cache is unlocked and object is no longer valid */
@@ -2367,64 +1961,21 @@ vm_object_reaper_thread(void)
        /*NOTREACHED*/
 }
 
-/*
- *     Routine:        vm_object_pager_wakeup
- *     Purpose:        Wake up anyone waiting for termination of a pager.
- */
-
-static void
-vm_object_pager_wakeup(
-       memory_object_t pager)
-{
-       vm_object_hash_entry_t  entry;
-       boolean_t               waiting = FALSE;
-       lck_mtx_t               *lck;
-
-       /*
-        *      If anyone was waiting for the memory_object_terminate
-        *      to be queued, wake them up now.
-        */
-       lck = vm_object_hash_lock_spin(pager);
-       entry = vm_object_hash_lookup(pager, TRUE);
-       if (entry != VM_OBJECT_HASH_ENTRY_NULL)
-               waiting = entry->waiting;
-       vm_object_hash_unlock(lck);
-
-       if (entry != VM_OBJECT_HASH_ENTRY_NULL) {
-               if (waiting)
-                       thread_wakeup((event_t) pager);
-               vm_object_hash_entry_free(entry);
-       }
-}
-
 /*
  *     Routine:        vm_object_release_pager
  *     Purpose:        Terminate the pager and, upon completion,
  *                     release our last reference to it.
- *                     just like memory_object_terminate, except
- *                     that we wake up anyone blocked in vm_object_enter
- *                     waiting for termination message to be queued
- *                     before calling memory_object_init.
  */
 static void
 vm_object_release_pager(
-       memory_object_t pager,
-       boolean_t       hashed)
+       memory_object_t pager)
 {
-
        /*
         *      Terminate the pager.
         */
 
        (void) memory_object_terminate(pager);
 
-       if (hashed == TRUE) {
-               /*
-                *      Wakeup anyone waiting for this terminate
-                *      and remove the entry from the hash
-                */
-               vm_object_pager_wakeup(pager);
-       }
        /*
         *      Release reference to pager.
         */
@@ -2440,13 +1991,14 @@ vm_object_release_pager(
  */
 kern_return_t
 vm_object_destroy(
-       vm_object_t             object,
-       __unused kern_return_t          reason)
+       vm_object_t             object,
+       __unused kern_return_t          reason)
 {
-       memory_object_t         old_pager;
+       memory_object_t         old_pager;
 
-       if (object == VM_OBJECT_NULL)
-               return(KERN_SUCCESS);
+       if (object == VM_OBJECT_NULL) {
+               return KERN_SUCCESS;
+       }
 
        /*
         *      Remove the pager association immediately.
@@ -2462,19 +2014,11 @@ vm_object_destroy(
        object->named = FALSE;
        object->alive = FALSE;
 
-       if (object->hashed) {
-               lck_mtx_t       *lck;
-               /*
-                *      Rip out the pager from the vm_object now...
-                */
-               lck = vm_object_hash_lock_spin(object->pager);
-               vm_object_remove(object);
-               vm_object_hash_unlock(lck);
-       }
        old_pager = object->pager;
        object->pager = MEMORY_OBJECT_NULL;
-       if (old_pager != MEMORY_OBJECT_NULL)
+       if (old_pager != MEMORY_OBJECT_NULL) {
                memory_object_control_disable(object->pager_control);
+       }
 
        /*
         * Wait for the existing paging activity (that got
@@ -2488,9 +2032,9 @@ vm_object_destroy(
         *      Terminate the object now.
         */
        if (old_pager != MEMORY_OBJECT_NULL) {
-               vm_object_release_pager(old_pager, object->hashed);
+               vm_object_release_pager(old_pager);
 
-               /* 
+               /*
                 * JMM - Release the caller's reference.  This assumes the
                 * caller had a reference to release, which is a big (but
                 * currently valid) assumption if this is driven from the
@@ -2498,75 +2042,9 @@ vm_object_destroy(
                 * this call)..
                 */
                vm_object_deallocate(object);
-
        }
-       return(KERN_SUCCESS);
-}
-
-
-#if VM_OBJECT_CACHE
-
-#define VM_OBJ_DEACT_ALL_STATS DEBUG
-#if VM_OBJ_DEACT_ALL_STATS
-uint32_t vm_object_deactivate_all_pages_batches = 0;
-uint32_t vm_object_deactivate_all_pages_pages = 0;
-#endif /* VM_OBJ_DEACT_ALL_STATS */
-/*
- *     vm_object_deactivate_all_pages
- *
- *     Deactivate all pages in the specified object.  (Keep its pages
- *     in memory even though it is no longer referenced.)
- *
- *     The object must be locked.
- */
-static void
-vm_object_deactivate_all_pages(
-       vm_object_t     object)
-{
-       vm_page_t               p;
-       int                     loop_count;
-#if VM_OBJ_DEACT_ALL_STATS
-       int                     pages_count;
-#endif /* VM_OBJ_DEACT_ALL_STATS */
-#define V_O_D_A_P_MAX_BATCH    256
-
-       loop_count = BATCH_LIMIT(V_O_D_A_P_MAX_BATCH);
-#if VM_OBJ_DEACT_ALL_STATS
-       pages_count = 0;
-#endif /* VM_OBJ_DEACT_ALL_STATS */
-       vm_page_lock_queues();
-       vm_page_queue_iterate(&object->memq, p, vm_page_t, listq) {
-               if (--loop_count == 0) {
-#if VM_OBJ_DEACT_ALL_STATS
-                       hw_atomic_add(&vm_object_deactivate_all_pages_batches,
-                                     1);
-                       hw_atomic_add(&vm_object_deactivate_all_pages_pages,
-                                     pages_count);
-                       pages_count = 0;
-#endif /* VM_OBJ_DEACT_ALL_STATS */
-                       lck_mtx_yield(&vm_page_queue_lock);
-                       loop_count = BATCH_LIMIT(V_O_D_A_P_MAX_BATCH);
-               }
-               if (!p->busy && (p->vm_page_q_state != VM_PAGE_ON_THROTTLED_Q)) {
-#if VM_OBJ_DEACT_ALL_STATS
-                       pages_count++;
-#endif /* VM_OBJ_DEACT_ALL_STATS */
-                       vm_page_deactivate(p);
-               }
-       }
-#if VM_OBJ_DEACT_ALL_STATS
-       if (pages_count) {
-               hw_atomic_add(&vm_object_deactivate_all_pages_batches, 1);
-               hw_atomic_add(&vm_object_deactivate_all_pages_pages,
-                             pages_count);
-               pages_count = 0;
-       }
-#endif /* VM_OBJ_DEACT_ALL_STATS */
-       vm_page_unlock_queues();
+       return KERN_SUCCESS;
 }
-#endif /* VM_OBJECT_CACHE */
-
-
 
 /*
  * The "chunk" macros are used by routines below when looking for pages to deactivate.  These
@@ -2589,31 +2067,31 @@ vm_object_deactivate_all_pages(
  * out with all the bits set.  The macros below hide all these details from the caller.
  */
 
-#define PAGES_IN_A_CHUNK       64      /* The number of pages in the chunk must */
-                                       /* be the same as the number of bits in  */
-                                       /* the chunk_state_t type. We use 64     */
-                                       /* just for convenience.                 */
+#define PAGES_IN_A_CHUNK        64      /* The number of pages in the chunk must */
+                                        /* be the same as the number of bits in  */
+                                        /* the chunk_state_t type. We use 64     */
+                                        /* just for convenience.                */
 
-#define CHUNK_SIZE     (PAGES_IN_A_CHUNK * PAGE_SIZE_64)       /* Size of a chunk in bytes */
+#define CHUNK_SIZE      (PAGES_IN_A_CHUNK * PAGE_SIZE_64)       /* Size of a chunk in bytes */
 
-typedef uint64_t       chunk_state_t;
+typedef uint64_t        chunk_state_t;
 
 /*
  * The bit map uses negative logic, so we start out with all 64 bits set to indicate
  * that no pages have been processed yet.  Also, if len is less than the full CHUNK_SIZE,
  * then we mark pages beyond the len as having been "processed" so that we don't waste time
- * looking at pages in that range.  This can save us from unnecessarily chasing down the 
+ * looking at pages in that range.  This can save us from unnecessarily chasing down the
  * shadow chain.
  */
 
-#define CHUNK_INIT(c, len)                                             \
-       MACRO_BEGIN                                                     \
-       uint64_t p;                                                     \
-                                                                       \
-       (c) = 0xffffffffffffffffLL;                                     \
-                                                                       \
-       for (p = (len) / PAGE_SIZE_64; p < PAGES_IN_A_CHUNK; p++)       \
-               MARK_PAGE_HANDLED(c, p);                                \
+#define CHUNK_INIT(c, len)                                              \
+       MACRO_BEGIN                                                     \
+       uint64_t p;                                                     \
+                                                                        \
+       (c) = 0xffffffffffffffffLL;                                     \
+                                                                        \
+       for (p = (len) / PAGE_SIZE_64; p < PAGES_IN_A_CHUNK; p++)       \
+               MARK_PAGE_HANDLED(c, p);                                \
        MACRO_END
 
 
@@ -2621,14 +2099,14 @@ typedef uint64_t        chunk_state_t;
  * Return true if all pages in the chunk have not yet been processed.
  */
 
-#define CHUNK_NOT_COMPLETE(c)  ((c) != 0)
+#define CHUNK_NOT_COMPLETE(c)   ((c) != 0)
 
 /*
  * Return true if the page at offset 'p' in the bit map has already been handled
  * while processing a higher level object in the shadow chain.
  */
 
-#define PAGE_ALREADY_HANDLED(c, p)     (((c) & (1LL << (p))) == 0)
+#define PAGE_ALREADY_HANDLED(c, p)      (((c) & (1ULL << (p))) == 0)
 
 /*
  * Mark the page at offset 'p' in the bit map as having been processed.
@@ -2636,7 +2114,7 @@ typedef uint64_t  chunk_state_t;
 
 #define MARK_PAGE_HANDLED(c, p) \
 MACRO_BEGIN \
-       (c) = (c) & ~(1LL << (p)); \
+       (c) = (c) & ~(1ULL << (p)); \
 MACRO_END
 
 
@@ -2647,15 +2125,14 @@ MACRO_END
 
 static boolean_t
 page_is_paged_out(
-       vm_object_t             object,
-       vm_object_offset_t      offset)
+       vm_object_t             object,
+       vm_object_offset_t      offset)
 {
        if (object->internal &&
-          object->alive &&
-          !object->terminating &&
-          object->pager_ready) {
-
-               if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) 
+           object->alive &&
+           !object->terminating &&
+           object->pager_ready) {
+               if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset)
                    == VM_EXTERNAL_STATE_EXISTS) {
                        return TRUE;
                }
@@ -2687,24 +2164,24 @@ int madvise_free_debug = 0;
 
 static void
 deactivate_pages_in_object(
-       vm_object_t             object,
-       vm_object_offset_t      offset,
-       vm_object_size_t        size,
+       vm_object_t             object,
+       vm_object_offset_t      offset,
+       vm_object_size_t        size,
        boolean_t               kill_page,
-       boolean_t               reusable_page,
-       boolean_t               all_reusable,
-       chunk_state_t           *chunk_state,
+       boolean_t               reusable_page,
+       boolean_t               all_reusable,
+       chunk_state_t           *chunk_state,
        pmap_flush_context      *pfc,
-       struct pmap             *pmap,
-       vm_map_offset_t         pmap_offset)
+       struct pmap             *pmap,
+       vm_map_offset_t         pmap_offset)
 {
-       vm_page_t       m;
-       int             p;
-       struct vm_page_delayed_work     dw_array[DEFAULT_DELAYED_WORK_LIMIT];
-       struct vm_page_delayed_work     *dwp;
-       int             dw_count;
-       int             dw_limit;
-       unsigned int    reusable = 0;
+       vm_page_t       m;
+       int             p;
+       struct vm_page_delayed_work     dw_array[DEFAULT_DELAYED_WORK_LIMIT];
+       struct vm_page_delayed_work     *dwp;
+       int             dw_count;
+       int             dw_limit;
+       unsigned int    reusable = 0;
 
        /*
         * Examine each page in the chunk.  The variable 'p' is the page number relative to the start of the
@@ -2717,23 +2194,22 @@ deactivate_pages_in_object(
        dw_count = 0;
        dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
 
-       for(p = 0; size && CHUNK_NOT_COMPLETE(*chunk_state); p++, size -= PAGE_SIZE_64, offset += PAGE_SIZE_64, pmap_offset += PAGE_SIZE_64) {
-
+       for (p = 0; size && CHUNK_NOT_COMPLETE(*chunk_state); p++, size -= PAGE_SIZE_64, offset += PAGE_SIZE_64, pmap_offset += PAGE_SIZE_64) {
                /*
                 * If this offset has already been found and handled in a higher level object, then don't
                 * do anything with it in the current shadow object.
                 */
 
-               if (PAGE_ALREADY_HANDLED(*chunk_state, p))
+               if (PAGE_ALREADY_HANDLED(*chunk_state, p)) {
                        continue;
-       
+               }
+
                /*
                 * See if the page at this offset is around.  First check to see if the page is resident,
                 * then if not, check the existence map or with the pager.
                 */
 
-               if ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
-
+               if ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
                        /*
                         * We found a page we were looking for.  Mark it as "handled" now in the chunk_state
                         * so that we won't bother looking for a page at this offset again if there are more
@@ -2741,11 +2217,12 @@ deactivate_pages_in_object(
                         */
 
                        MARK_PAGE_HANDLED(*chunk_state, p);
-       
-                       if (( !VM_PAGE_WIRED(m)) && (!m->private) && (!m->gobbled) && (!m->busy) && (!m->laundry)) {
-                               int     clear_refmod;
-                               int     pmap_options;
-       
+
+                       if ((!VM_PAGE_WIRED(m)) && (!m->vmp_private) && (!m->vmp_gobbled) && (!m->vmp_busy) &&
+                           (!m->vmp_laundry) && (!m->vmp_cleaning) && !(m->vmp_free_when_done)) {
+                               int     clear_refmod;
+                               int     pmap_options;
+
                                dwp->dw_mask = 0;
 
                                pmap_options = 0;
@@ -2761,11 +2238,11 @@ deactivate_pages_in_object(
                                                 */
                                                pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m));
                                        }
-                                       m->precious = FALSE;
-                                       m->dirty = FALSE;
+                                       m->vmp_precious = FALSE;
+                                       m->vmp_dirty = FALSE;
 
                                        clear_refmod |= VM_MEM_MODIFIED;
-                                       if (m->vm_page_q_state == VM_PAGE_ON_THROTTLED_Q) {
+                                       if (m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) {
                                                /*
                                                 * This page is now clean and
                                                 * reclaimable.  Move it out
@@ -2778,10 +2255,10 @@ deactivate_pages_in_object(
 
                                        VM_COMPRESSOR_PAGER_STATE_CLR(object, offset);
 
-                                       if (reusable_page && !m->reusable) {
+                                       if (reusable_page && !m->vmp_reusable) {
                                                assert(!all_reusable);
                                                assert(!object->all_reusable);
-                                               m->reusable = TRUE;
+                                               m->vmp_reusable = TRUE;
                                                object->reusable_page_count++;
                                                assert(object->resident_page_count >= object->reusable_page_count);
                                                reusable++;
@@ -2790,26 +2267,28 @@ deactivate_pages_in_object(
                                                 * "reusable" (to update pmap
                                                 * stats for all mappings).
                                                 */
-                                               pmap_options |= PMAP_OPTIONS_SET_REUSABLE;
+                                               pmap_options |= PMAP_OPTIONS_SET_REUSABLE;
                                        }
                                }
                                pmap_options |= PMAP_OPTIONS_NOFLUSH;
                                pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m),
-                                                         clear_refmod,
-                                                         pmap_options,
-                                                         (void *)pfc);
+                                   clear_refmod,
+                                   pmap_options,
+                                   (void *)pfc);
 
-                               if ((m->vm_page_q_state != VM_PAGE_ON_THROTTLED_Q) && !(reusable_page || all_reusable))
+                               if ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && !(reusable_page || all_reusable)) {
                                        dwp->dw_mask |= DW_move_page;
-                               
-                               if (dwp->dw_mask)
+                               }
+
+                               if (dwp->dw_mask) {
                                        VM_PAGE_ADD_DELAYED_WORK(dwp, m,
-                                                                dw_count);
+                                           dw_count);
+                               }
 
                                if (dw_count >= dw_limit) {
                                        if (reusable) {
                                                OSAddAtomic(reusable,
-                                                           &vm_page_stats_reusable.reusable_count);
+                                                   &vm_page_stats_reusable.reusable_count);
                                                vm_page_stats_reusable.reusable += reusable;
                                                reusable = 0;
                                        }
@@ -2819,9 +2298,7 @@ deactivate_pages_in_object(
                                        dw_count = 0;
                                }
                        }
-
                } else {
-
                        /*
                         * The page at this offset isn't memory resident, check to see if it's
                         * been paged out.  If so, mark it as handled so we don't bother looking
@@ -2832,12 +2309,11 @@ deactivate_pages_in_object(
                                MARK_PAGE_HANDLED(*chunk_state, p);
 
                                /*
-                                * If we're killing a non-resident page, then clear the page in the existence 
+                                * If we're killing a non-resident page, then clear the page in the existence
                                 * map so we don't bother paging it back in if it's touched again in the future.
                                 */
 
                                if ((kill_page) && (object->internal)) {
-
                                        VM_COMPRESSOR_PAGER_STATE_CLR(object, offset);
 
                                        if (pmap != PMAP_NULL) {
@@ -2852,7 +2328,7 @@ deactivate_pages_in_object(
                                                        pmap,
                                                        pmap_offset,
                                                        (pmap_offset +
-                                                        PAGE_SIZE),
+                                                       PAGE_SIZE),
                                                        PMAP_OPTIONS_REMOVE);
                                        }
                                }
@@ -2862,12 +2338,13 @@ deactivate_pages_in_object(
 
        if (reusable) {
                OSAddAtomic(reusable, &vm_page_stats_reusable.reusable_count);
-               vm_page_stats_reusable.reusable += reusable;    
+               vm_page_stats_reusable.reusable += reusable;
                reusable = 0;
        }
-               
-       if (dw_count)
+
+       if (dw_count) {
                vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
+       }
 }
 
 
@@ -2882,20 +2359,20 @@ deactivate_pages_in_object(
 
 static vm_object_size_t
 deactivate_a_chunk(
-       vm_object_t             orig_object,
-       vm_object_offset_t      offset,
-       vm_object_size_t        size,
+       vm_object_t             orig_object,
+       vm_object_offset_t      offset,
+       vm_object_size_t        size,
        boolean_t               kill_page,
-       boolean_t               reusable_page,
-       boolean_t               all_reusable,
+       boolean_t               reusable_page,
+       boolean_t               all_reusable,
        pmap_flush_context      *pfc,
-       struct pmap             *pmap,
-       vm_map_offset_t         pmap_offset)
+       struct pmap             *pmap,
+       vm_map_offset_t         pmap_offset)
 {
-       vm_object_t             object;
-       vm_object_t             tmp_object;
-       vm_object_size_t        length;
-       chunk_state_t           chunk_state;
+       vm_object_t             object;
+       vm_object_t             tmp_object;
+       vm_object_size_t        length;
+       chunk_state_t           chunk_state;
 
 
        /*
@@ -2939,18 +2416,20 @@ deactivate_a_chunk(
                        kill_page = FALSE;
                        reusable_page = FALSE;
                        all_reusable = FALSE;
-                       offset += object->vo_shadow_offset;
-                       vm_object_lock(tmp_object);
+                       offset += object->vo_shadow_offset;
+                       vm_object_lock(tmp_object);
                }
 
-               if (object != orig_object)
-                       vm_object_unlock(object);
+               if (object != orig_object) {
+                       vm_object_unlock(object);
+               }
 
                object = tmp_object;
        }
 
-       if (object && object != orig_object)
-               vm_object_unlock(object);
+       if (object && object != orig_object) {
+               vm_object_unlock(object);
+       }
 
        return length;
 }
@@ -2965,21 +2444,21 @@ deactivate_a_chunk(
 
 __private_extern__ void
 vm_object_deactivate_pages(
-       vm_object_t             object,
-       vm_object_offset_t      offset,
-       vm_object_size_t        size,
+       vm_object_t             object,
+       vm_object_offset_t      offset,
+       vm_object_size_t        size,
        boolean_t               kill_page,
-       boolean_t               reusable_page,
-       struct pmap             *pmap,
-       vm_map_offset_t         pmap_offset)
+       boolean_t               reusable_page,
+       struct pmap             *pmap,
+       vm_map_offset_t         pmap_offset)
 {
-       vm_object_size_t        length;
-       boolean_t               all_reusable;
-       pmap_flush_context      pmap_flush_context_storage;
+       vm_object_size_t        length;
+       boolean_t               all_reusable;
+       pmap_flush_context      pmap_flush_context_storage;
 
        /*
         * We break the range up into chunks and do one chunk at a time.  This is for
-        * efficiency and performance while handling the shadow chains and the locks.   
+        * efficiency and performance while handling the shadow chains and the locks.
         * The deactivate_a_chunk() function returns how much of the range it processed.
         * We keep calling this routine until the given size is exhausted.
         */
@@ -2988,7 +2467,7 @@ vm_object_deactivate_pages(
        all_reusable = FALSE;
 #if 11
        /*
-        * For the sake of accurate "reusable" pmap stats, we need 
+        * For the sake of accurate "reusable" pmap stats, we need
         * to tell pmap about each page that is no longer "reusable",
         * so we can't do the "all_reusable" optimization.
         */
@@ -3004,7 +2483,7 @@ vm_object_deactivate_pages(
 #endif
 
        if ((reusable_page || all_reusable) && object->all_reusable) {
-               /* This means MADV_FREE_REUSABLE has been called twice, which 
+               /* This means MADV_FREE_REUSABLE has been called twice, which
                 * is probably illegal. */
                return;
        }
@@ -3029,7 +2508,7 @@ vm_object_deactivate_pages(
                        /* update global stats */
                        reusable = object->resident_page_count;
                        OSAddAtomic(reusable,
-                                   &vm_page_stats_reusable.reusable_count);
+                           &vm_page_stats_reusable.reusable_count);
                        vm_page_stats_reusable.reusable += reusable;
                        vm_page_stats_reusable.all_reusable_calls++;
                }
@@ -3040,37 +2519,37 @@ vm_object_deactivate_pages(
 
 void
 vm_object_reuse_pages(
-       vm_object_t             object,
-       vm_object_offset_t      start_offset,
-       vm_object_offset_t      end_offset,
-       boolean_t               allow_partial_reuse)
+       vm_object_t             object,
+       vm_object_offset_t      start_offset,
+       vm_object_offset_t      end_offset,
+       boolean_t               allow_partial_reuse)
 {
-       vm_object_offset_t      cur_offset;
-       vm_page_t               m;
-       unsigned int            reused, reusable;
-
-#define VM_OBJECT_REUSE_PAGE(object, m, reused)                                \
-       MACRO_BEGIN                                                     \
-               if ((m) != VM_PAGE_NULL &&                              \
-                   (m)->reusable) {                                    \
-                       assert((object)->reusable_page_count <=         \
-                              (object)->resident_page_count);          \
-                       assert((object)->reusable_page_count > 0);      \
-                       (object)->reusable_page_count--;                \
-                       (m)->reusable = FALSE;                          \
-                       (reused)++;                                     \
-                       /*                                              \
-                        * Tell pmap that this page is no longer        \
-                        * "reusable", to update the "reusable" stats   \
-                        * for all the pmaps that have mapped this      \
-                        * page.                                        \
-                        */                                             \
-                       pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE((m)), \
-                                                 0, /* refmod */       \
-                                                 (PMAP_OPTIONS_CLEAR_REUSABLE \
-                                                  | PMAP_OPTIONS_NOFLUSH), \
-                                                 NULL);                \
-               }                                                       \
+       vm_object_offset_t      cur_offset;
+       vm_page_t               m;
+       unsigned int            reused, reusable;
+
+#define VM_OBJECT_REUSE_PAGE(object, m, reused)                         \
+       MACRO_BEGIN                                                     \
+               if ((m) != VM_PAGE_NULL &&                              \
+                   (m)->vmp_reusable) {                                \
+                       assert((object)->reusable_page_count <=         \
+                              (object)->resident_page_count);          \
+                       assert((object)->reusable_page_count > 0);      \
+                       (object)->reusable_page_count--;                \
+                       (m)->vmp_reusable = FALSE;                      \
+                       (reused)++;                                     \
+       /* \
+        * Tell pmap that this page is no longer \
+        * "reusable", to update the "reusable" stats \
+        * for all the pmaps that have mapped this \
+        * page. \
+        */                                                             \
+                       pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE((m)), \
+                                                 0, /* refmod */       \
+                                                 (PMAP_OPTIONS_CLEAR_REUSABLE \
+                                                  | PMAP_OPTIONS_NOFLUSH), \
+                                                 NULL);                \
+               }                                                       \
        MACRO_END
 
        reused = 0;
@@ -3080,7 +2559,7 @@ vm_object_reuse_pages(
 
        if (object->all_reusable) {
                panic("object %p all_reusable: can't update pmap stats\n",
-                     object);
+                   object);
                assert(object->reusable_page_count == 0);
                object->all_reusable = FALSE;
                if (end_offset - start_offset == object->vo_size ||
@@ -3089,25 +2568,25 @@ vm_object_reuse_pages(
                        reused = object->resident_page_count;
                } else {
                        vm_page_stats_reusable.partial_reuse_calls++;
-                       vm_page_queue_iterate(&object->memq, m, vm_page_t, listq) {
-                               if (m->offset < start_offset ||
-                                   m->offset >= end_offset) {
-                                       m->reusable = TRUE;
+                       vm_page_queue_iterate(&object->memq, m, vmp_listq) {
+                               if (m->vmp_offset < start_offset ||
+                                   m->vmp_offset >= end_offset) {
+                                       m->vmp_reusable = TRUE;
                                        object->reusable_page_count++;
                                        assert(object->resident_page_count >= object->reusable_page_count);
                                        continue;
                                } else {
-                                       assert(!m->reusable);
+                                       assert(!m->vmp_reusable);
                                        reused++;
                                }
                        }
                }
        } else if (object->resident_page_count >
-                  ((end_offset - start_offset) >> PAGE_SHIFT)) {
+           ((end_offset - start_offset) >> PAGE_SHIFT)) {
                vm_page_stats_reusable.partial_reuse_calls++;
                for (cur_offset = start_offset;
-                    cur_offset < end_offset;
-                    cur_offset += PAGE_SIZE_64) {
+                   cur_offset < end_offset;
+                   cur_offset += PAGE_SIZE_64) {
                        if (object->reusable_page_count == 0) {
                                break;
                        }
@@ -3116,12 +2595,12 @@ vm_object_reuse_pages(
                }
        } else {
                vm_page_stats_reusable.partial_reuse_calls++;
-               vm_page_queue_iterate(&object->memq, m, vm_page_t, listq) {
+               vm_page_queue_iterate(&object->memq, m, vmp_listq) {
                        if (object->reusable_page_count == 0) {
                                break;
                        }
-                       if (m->offset < start_offset ||
-                           m->offset >= end_offset) {
+                       if (m->vmp_offset < start_offset ||
+                           m->vmp_offset >= end_offset) {
                                continue;
                        }
                        VM_OBJECT_REUSE_PAGE(object, m, reused);
@@ -3129,7 +2608,7 @@ vm_object_reuse_pages(
        }
 
        /* update global stats */
-       OSAddAtomic(reusable-reused, &vm_page_stats_reusable.reusable_count);
+       OSAddAtomic(reusable - reused, &vm_page_stats_reusable.reusable_count);
        vm_page_stats_reusable.reused += reused;
        vm_page_stats_reusable.reusable += reusable;
 }
@@ -3151,7 +2630,7 @@ vm_object_reuse_pages(
  *             remove access to all pages in shadowed objects.
  *
  *             The object must *not* be locked.  The object must
- *             be temporary/internal.  
+ *             be internal.
  *
  *              If pmap is not NULL, this routine assumes that
  *              the only mappings for the pages are in that
@@ -3160,32 +2639,33 @@ vm_object_reuse_pages(
 
 __private_extern__ void
 vm_object_pmap_protect(
-       vm_object_t                     object,
-       vm_object_offset_t              offset,
-       vm_object_size_t                size,
-       pmap_t                          pmap,
-       vm_map_offset_t                 pmap_start,
-       vm_prot_t                       prot)
+       vm_object_t                     object,
+       vm_object_offset_t              offset,
+       vm_object_size_t                size,
+       pmap_t                          pmap,
+       vm_map_offset_t                 pmap_start,
+       vm_prot_t                       prot)
 {
        vm_object_pmap_protect_options(object, offset, size,
-                                      pmap, pmap_start, prot, 0);
+           pmap, pmap_start, prot, 0);
 }
 
 __private_extern__ void
 vm_object_pmap_protect_options(
-       vm_object_t                     object,
-       vm_object_offset_t              offset,
-       vm_object_size_t                size,
-       pmap_t                          pmap,
-       vm_map_offset_t                 pmap_start,
-       vm_prot_t                       prot,
-       int                             options)
+       vm_object_t                     object,
+       vm_object_offset_t              offset,
+       vm_object_size_t                size,
+       pmap_t                          pmap,
+       vm_map_offset_t                 pmap_start,
+       vm_prot_t                       prot,
+       int                             options)
 {
-       pmap_flush_context      pmap_flush_context_storage;
-       boolean_t               delayed_pmap_flush = FALSE;
+       pmap_flush_context      pmap_flush_context_storage;
+       boolean_t               delayed_pmap_flush = FALSE;
 
-       if (object == VM_OBJECT_NULL)
+       if (object == VM_OBJECT_NULL) {
                return;
+       }
        size = vm_object_round_page(size);
        offset = vm_object_trunc_page(offset);
 
@@ -3195,11 +2675,11 @@ vm_object_pmap_protect_options(
                if (pmap != NULL) {
                        vm_object_unlock(object);
                        pmap_protect_options(pmap,
-                                            pmap_start,
-                                            pmap_start + size,
-                                            prot,
-                                            options & ~PMAP_OPTIONS_NOFLUSH,
-                                            NULL);
+                           pmap_start,
+                           pmap_start + size,
+                           prot,
+                           options & ~PMAP_OPTIONS_NOFLUSH,
+                           NULL);
                } else {
                        vm_object_offset_t phys_start, phys_end, phys_addr;
 
@@ -3213,8 +2693,8 @@ vm_object_pmap_protect_options(
                        delayed_pmap_flush = FALSE;
 
                        for (phys_addr = phys_start;
-                            phys_addr < phys_end;
-                            phys_addr += PAGE_SIZE_64) {
+                           phys_addr < phys_end;
+                           phys_addr += PAGE_SIZE_64) {
                                pmap_page_protect_options(
                                        (ppnum_t) (phys_addr >> PAGE_SHIFT),
                                        prot,
@@ -3222,8 +2702,9 @@ vm_object_pmap_protect_options(
                                        (void *)&pmap_flush_context_storage);
                                delayed_pmap_flush = TRUE;
                        }
-                       if (delayed_pmap_flush == TRUE)
+                       if (delayed_pmap_flush == TRUE) {
                                pmap_flush(&pmap_flush_context_storage);
+                       }
                }
                return;
        }
@@ -3231,122 +2712,123 @@ vm_object_pmap_protect_options(
        assert(object->internal);
 
        while (TRUE) {
-          if (ptoa_64(object->resident_page_count) > size/2 && pmap != PMAP_NULL) {
-               vm_object_unlock(object);
-               pmap_protect_options(pmap, pmap_start, pmap_start + size, prot,
-                                    options & ~PMAP_OPTIONS_NOFLUSH, NULL);
-               return;
-           }
-
-          pmap_flush_context_init(&pmap_flush_context_storage);
-          delayed_pmap_flush = FALSE;
-
-           /*
-            * if we are doing large ranges with respect to resident
-            * page count then we should interate over pages otherwise
-            * inverse page look-up will be faster
-            */
-           if (ptoa_64(object->resident_page_count / 4) <  size) {
-               vm_page_t               p;
-               vm_object_offset_t      end;
-
-               end = offset + size;
-
-               vm_page_queue_iterate(&object->memq, p, vm_page_t, listq) {
-                       if (!p->fictitious && (offset <= p->offset) && (p->offset < end)) {
-                               vm_map_offset_t start;
-
-                               start = pmap_start + p->offset - offset;
-
-                               if (pmap != PMAP_NULL)
-                                       pmap_protect_options(
-                                               pmap,
-                                               start,
-                                               start + PAGE_SIZE_64,
-                                               prot,
-                                               options | PMAP_OPTIONS_NOFLUSH,
-                                               &pmap_flush_context_storage);
-                               else
-                                       pmap_page_protect_options(
-                                               VM_PAGE_GET_PHYS_PAGE(p),
-                                               prot,
-                                               options | PMAP_OPTIONS_NOFLUSH,
-                                               &pmap_flush_context_storage);
+               if (ptoa_64(object->resident_page_count) > size / 2 && pmap != PMAP_NULL) {
+                       vm_object_unlock(object);
+                       pmap_protect_options(pmap, pmap_start, pmap_start + size, prot,
+                           options & ~PMAP_OPTIONS_NOFLUSH, NULL);
+                       return;
+               }
+
+               pmap_flush_context_init(&pmap_flush_context_storage);
+               delayed_pmap_flush = FALSE;
+
+               /*
+                * if we are doing large ranges with respect to resident
+                * page count then we should interate over pages otherwise
+                * inverse page look-up will be faster
+                */
+               if (ptoa_64(object->resident_page_count / 4) < size) {
+                       vm_page_t               p;
+                       vm_object_offset_t      end;
+
+                       end = offset + size;
+
+                       vm_page_queue_iterate(&object->memq, p, vmp_listq) {
+                               if (!p->vmp_fictitious && (offset <= p->vmp_offset) && (p->vmp_offset < end)) {
+                                       vm_map_offset_t start;
+
+                                       start = pmap_start + p->vmp_offset - offset;
+
+                                       if (pmap != PMAP_NULL) {
+                                               pmap_protect_options(
+                                                       pmap,
+                                                       start,
+                                                       start + PAGE_SIZE_64,
+                                                       prot,
+                                                       options | PMAP_OPTIONS_NOFLUSH,
+                                                       &pmap_flush_context_storage);
+                                       } else {
+                                               pmap_page_protect_options(
+                                                       VM_PAGE_GET_PHYS_PAGE(p),
+                                                       prot,
+                                                       options | PMAP_OPTIONS_NOFLUSH,
+                                                       &pmap_flush_context_storage);
+                                       }
                                        delayed_pmap_flush = TRUE;
+                               }
                        }
-               }
+               } else {
+                       vm_page_t               p;
+                       vm_object_offset_t      end;
+                       vm_object_offset_t      target_off;
 
-          } else {
-               vm_page_t               p;
-               vm_object_offset_t      end;
-               vm_object_offset_t      target_off;
+                       end = offset + size;
 
-               end = offset + size;
+                       for (target_off = offset;
+                           target_off < end; target_off += PAGE_SIZE) {
+                               p = vm_page_lookup(object, target_off);
 
-               for (target_off = offset; 
-                    target_off < end; target_off += PAGE_SIZE) {
+                               if (p != VM_PAGE_NULL) {
+                                       vm_object_offset_t start;
 
-                       p = vm_page_lookup(object, target_off);
+                                       start = pmap_start + (p->vmp_offset - offset);
 
-                       if (p != VM_PAGE_NULL) {
-                               vm_object_offset_t start;
-
-                               start = pmap_start + (p->offset - offset);
-
-                               if (pmap != PMAP_NULL)
-                                       pmap_protect_options(
-                                               pmap,
-                                               start,
-                                               start + PAGE_SIZE_64,
-                                               prot,
-                                               options | PMAP_OPTIONS_NOFLUSH,
-                                               &pmap_flush_context_storage);
-                               else
-                                       pmap_page_protect_options(
-                                               VM_PAGE_GET_PHYS_PAGE(p),
-                                               prot,
-                                               options | PMAP_OPTIONS_NOFLUSH,
-                                               &pmap_flush_context_storage);
+                                       if (pmap != PMAP_NULL) {
+                                               pmap_protect_options(
+                                                       pmap,
+                                                       start,
+                                                       start + PAGE_SIZE_64,
+                                                       prot,
+                                                       options | PMAP_OPTIONS_NOFLUSH,
+                                                       &pmap_flush_context_storage);
+                                       } else {
+                                               pmap_page_protect_options(
+                                                       VM_PAGE_GET_PHYS_PAGE(p),
+                                                       prot,
+                                                       options | PMAP_OPTIONS_NOFLUSH,
+                                                       &pmap_flush_context_storage);
+                                       }
                                        delayed_pmap_flush = TRUE;
-                       }
+                               }
+                       }
+               }
+               if (delayed_pmap_flush == TRUE) {
+                       pmap_flush(&pmap_flush_context_storage);
                }
-           }
-           if (delayed_pmap_flush == TRUE)
-                   pmap_flush(&pmap_flush_context_storage);
 
-           if (prot == VM_PROT_NONE) {
-               /*
-                * Must follow shadow chain to remove access
-                * to pages in shadowed objects.
-                */
-               vm_object_t     next_object;
-
-               next_object = object->shadow;
-               if (next_object != VM_OBJECT_NULL) {
-                   offset += object->vo_shadow_offset;
-                   vm_object_lock(next_object);
-                   vm_object_unlock(object);
-                   object = next_object;
-               }
-               else {
-                   /*
-                    * End of chain - we are done.
-                    */
-                   break;
-               }
-           }
-           else {
-               /*
-                * Pages in shadowed objects may never have
-                * write permission - we may stop here.
-                */
-               break;
-           }
+               if (prot == VM_PROT_NONE) {
+                       /*
+                        * Must follow shadow chain to remove access
+                        * to pages in shadowed objects.
+                        */
+                       vm_object_t     next_object;
+
+                       next_object = object->shadow;
+                       if (next_object != VM_OBJECT_NULL) {
+                               offset += object->vo_shadow_offset;
+                               vm_object_lock(next_object);
+                               vm_object_unlock(object);
+                               object = next_object;
+                       } else {
+                               /*
+                                * End of chain - we are done.
+                                */
+                               break;
+                       }
+               } else {
+                       /*
+                        * Pages in shadowed objects may never have
+                        * write permission - we may stop here.
+                        */
+                       break;
+               }
        }
 
        vm_object_unlock(object);
 }
 
+uint32_t vm_page_busy_absent_skipped = 0;
+
 /*
  *     Routine:        vm_object_copy_slowly
  *
@@ -3379,24 +2861,21 @@ vm_object_pmap_protect_options(
  */
 __private_extern__ kern_return_t
 vm_object_copy_slowly(
-       vm_object_t             src_object,
-       vm_object_offset_t      src_offset,
-       vm_object_size_t        size,
-       boolean_t               interruptible,
-       vm_object_t             *_result_object)        /* OUT */
+       vm_object_t             src_object,
+       vm_object_offset_t      src_offset,
+       vm_object_size_t        size,
+       boolean_t               interruptible,
+       vm_object_t             *_result_object)        /* OUT */
 {
-       vm_object_t             new_object;
-       vm_object_offset_t      new_offset;
-
-       struct vm_object_fault_info fault_info;
+       vm_object_t             new_object;
+       vm_object_offset_t      new_offset;
 
-       XPR(XPR_VM_OBJECT, "v_o_c_slowly obj 0x%x off 0x%x size 0x%x\n",
-           src_object, src_offset, size, 0, 0);
+       struct vm_object_fault_info fault_info = {};
 
        if (size == 0) {
                vm_object_unlock(src_object);
                *_result_object = VM_OBJECT_NULL;
-               return(KERN_INVALID_ARGUMENT);
+               return KERN_INVALID_ARGUMENT;
        }
 
        /*
@@ -3418,104 +2897,130 @@ vm_object_copy_slowly(
        new_object = vm_object_allocate(size);
        new_offset = 0;
 
-       assert(size == trunc_page_64(size));    /* Will the loop terminate? */
+       assert(size == trunc_page_64(size));    /* Will the loop terminate? */
 
        fault_info.interruptible = interruptible;
        fault_info.behavior  = VM_BEHAVIOR_SEQUENTIAL;
-       fault_info.user_tag = 0;
-       fault_info.pmap_options = 0;
        fault_info.lo_offset = src_offset;
        fault_info.hi_offset = src_offset + size;
-       fault_info.no_cache  = FALSE;
        fault_info.stealth = TRUE;
-       fault_info.io_sync = FALSE;
-       fault_info.cs_bypass = FALSE;
-       fault_info.mark_zf_absent = FALSE;
-       fault_info.batch_pmap_op = FALSE;
-
-       for ( ;
-           size != 0 ;
-           src_offset += PAGE_SIZE_64, 
-                       new_offset += PAGE_SIZE_64, size -= PAGE_SIZE_64
+
+       for (;
+           size != 0;
+           src_offset += PAGE_SIZE_64,
+           new_offset += PAGE_SIZE_64, size -= PAGE_SIZE_64
            ) {
-               vm_page_t       new_page;
+               vm_page_t       new_page;
                vm_fault_return_t result;
 
                vm_object_lock(new_object);
 
                while ((new_page = vm_page_alloc(new_object, new_offset))
-                               == VM_PAGE_NULL) {
-
+                   == VM_PAGE_NULL) {
                        vm_object_unlock(new_object);
 
                        if (!vm_page_wait(interruptible)) {
                                vm_object_deallocate(new_object);
                                vm_object_deallocate(src_object);
                                *_result_object = VM_OBJECT_NULL;
-                               return(MACH_SEND_INTERRUPTED);
+                               return MACH_SEND_INTERRUPTED;
                        }
                        vm_object_lock(new_object);
                }
                vm_object_unlock(new_object);
 
                do {
-                       vm_prot_t       prot = VM_PROT_READ;
-                       vm_page_t       _result_page;
-                       vm_page_t       top_page;
-                       vm_page_t       result_page;
-                       kern_return_t   error_code;
-                       vm_object_t     result_page_object;
+                       vm_prot_t       prot = VM_PROT_READ;
+                       vm_page_t       _result_page;
+                       vm_page_t       top_page;
+                       vm_page_t       result_page;
+                       kern_return_t   error_code;
+                       vm_object_t     result_page_object;
 
 
                        vm_object_lock(src_object);
 
                        if (src_object->internal &&
                            src_object->shadow == VM_OBJECT_NULL &&
-                           (vm_page_lookup(src_object,
-                                           src_offset) == VM_PAGE_NULL) &&
                            (src_object->pager == NULL ||
-                            (VM_COMPRESSOR_PAGER_STATE_GET(src_object,
-                                                           src_offset) ==
-                             VM_EXTERNAL_STATE_ABSENT))) {
-                               /*
-                                * This page is neither resident nor compressed
-                                * and there's no shadow object below 
-                                * "src_object", so this page is really missing.
-                                * There's no need to zero-fill it just to copy
-                                * it:  let's leave it missing in "new_object"
-                                * and get zero-filled on demand.
-                                */
-                               vm_object_unlock(src_object);
-                               /* free the unused "new_page"... */
-                               vm_object_lock(new_object);
-                               VM_PAGE_FREE(new_page);
-                               new_page = VM_PAGE_NULL;
-                               vm_object_unlock(new_object);
-                               /* ...and go to next page in "src_object" */
-                               result = VM_FAULT_SUCCESS;
-                               break;
+                           (VM_COMPRESSOR_PAGER_STATE_GET(src_object,
+                           src_offset) ==
+                           VM_EXTERNAL_STATE_ABSENT))) {
+                               boolean_t can_skip_page;
+
+                               _result_page = vm_page_lookup(src_object,
+                                   src_offset);
+                               if (_result_page == VM_PAGE_NULL) {
+                                       /*
+                                        * This page is neither resident nor
+                                        * compressed and there's no shadow
+                                        * object below "src_object", so this
+                                        * page is really missing.
+                                        * There's no need to zero-fill it just
+                                        * to copy it:  let's leave it missing
+                                        * in "new_object" and get zero-filled
+                                        * on demand.
+                                        */
+                                       can_skip_page = TRUE;
+                               } else if (workaround_41447923 &&
+                                   src_object->pager == NULL &&
+                                   _result_page != VM_PAGE_NULL &&
+                                   _result_page->vmp_busy &&
+                                   _result_page->vmp_absent &&
+                                   src_object->purgable == VM_PURGABLE_DENY &&
+                                   !src_object->blocked_access) {
+                                       /*
+                                        * This page is "busy" and "absent"
+                                        * but not because we're waiting for
+                                        * it to be decompressed.  It must
+                                        * be because it's a "no zero fill"
+                                        * page that is currently not
+                                        * accessible until it gets overwritten
+                                        * by a device driver.
+                                        * Since its initial state would have
+                                        * been "zero-filled", let's leave the
+                                        * copy page missing and get zero-filled
+                                        * on demand.
+                                        */
+                                       assert(src_object->internal);
+                                       assert(src_object->shadow == NULL);
+                                       assert(src_object->pager == NULL);
+                                       can_skip_page = TRUE;
+                                       vm_page_busy_absent_skipped++;
+                               } else {
+                                       can_skip_page = FALSE;
+                               }
+                               if (can_skip_page) {
+                                       vm_object_unlock(src_object);
+                                       /* free the unused "new_page"... */
+                                       vm_object_lock(new_object);
+                                       VM_PAGE_FREE(new_page);
+                                       new_page = VM_PAGE_NULL;
+                                       vm_object_unlock(new_object);
+                                       /* ...and go to next page in "src_object" */
+                                       result = VM_FAULT_SUCCESS;
+                                       break;
+                               }
                        }
 
                        vm_object_paging_begin(src_object);
 
-                       if (size > (vm_size_t) -1) {
-                               /* 32-bit overflow */
-                               fault_info.cluster_size = (vm_size_t) (0 - PAGE_SIZE);
-                       } else {
-                               fault_info.cluster_size = (vm_size_t) size;
-                               assert(fault_info.cluster_size == size);
+                       /* cap size at maximum UPL size */
+                       upl_size_t cluster_size;
+                       if (os_convert_overflow(size, &cluster_size)) {
+                               cluster_size = 0 - (upl_size_t)PAGE_SIZE;
                        }
+                       fault_info.cluster_size = cluster_size;
 
-                       XPR(XPR_VM_FAULT,"vm_object_copy_slowly -> vm_fault_page",0,0,0,0,0);
                        _result_page = VM_PAGE_NULL;
                        result = vm_fault_page(src_object, src_offset,
-                               VM_PROT_READ, FALSE,
-                               FALSE, /* page not looked up */
-                               &prot, &_result_page, &top_page,
-                               (int *)0,
-                               &error_code, FALSE, FALSE, &fault_info);
+                           VM_PROT_READ, FALSE,
+                           FALSE,     /* page not looked up */
+                           &prot, &_result_page, &top_page,
+                           (int *)0,
+                           &error_code, FALSE, FALSE, &fault_info);
 
-                       switch(result) {
+                       switch (result) {
                        case VM_FAULT_SUCCESS:
                                result_page = _result_page;
                                result_page_object = VM_PAGE_OBJECT(result_page);
@@ -3545,8 +3050,8 @@ vm_object_copy_slowly(
                                PAGE_WAKEUP_DONE(result_page);
 
                                vm_page_lockspin_queues();
-                               if ((result_page->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q) ||
-                                   (result_page->vm_page_q_state == VM_PAGE_NOT_ON_Q)) {
+                               if ((result_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ||
+                                   (result_page->vmp_q_state == VM_PAGE_NOT_ON_Q)) {
                                        vm_page_activate(result_page);
                                }
                                vm_page_activate(new_page);
@@ -3558,33 +3063,34 @@ vm_object_copy_slowly(
                                 */
 
                                vm_fault_cleanup(result_page_object,
-                                                top_page);
+                                   top_page);
 
                                break;
-                               
+
                        case VM_FAULT_RETRY:
                                break;
 
                        case VM_FAULT_MEMORY_SHORTAGE:
-                               if (vm_page_wait(interruptible))
+                               if (vm_page_wait(interruptible)) {
                                        break;
-                               /* fall thru */
+                               }
+                       /* fall thru */
 
                        case VM_FAULT_INTERRUPTED:
                                vm_object_lock(new_object);
                                VM_PAGE_FREE(new_page);
                                vm_object_unlock(new_object);
-                                       
+
                                vm_object_deallocate(new_object);
                                vm_object_deallocate(src_object);
                                *_result_object = VM_OBJECT_NULL;
-                               return(MACH_SEND_INTERRUPTED);
+                               return MACH_SEND_INTERRUPTED;
 
                        case VM_FAULT_SUCCESS_NO_VM_PAGE:
                                /* success but no VM page: fail */
                                vm_object_paging_end(src_object);
                                vm_object_unlock(src_object);
-                               /*FALLTHROUGH*/
+                       /*FALLTHROUGH*/
                        case VM_FAULT_MEMORY_ERROR:
                                /*
                                 * A policy choice:
@@ -3601,12 +3107,12 @@ vm_object_copy_slowly(
                                vm_object_deallocate(new_object);
                                vm_object_deallocate(src_object);
                                *_result_object = VM_OBJECT_NULL;
-                               return(error_code ? error_code:
-                                      KERN_MEMORY_ERROR);
+                               return error_code ? error_code:
+                                      KERN_MEMORY_ERROR;
 
                        default:
                                panic("vm_object_copy_slowly: unexpected error"
-                                     " 0x%x from vm_fault_page()\n", result);
+                                   " 0x%x from vm_fault_page()\n", result);
                        }
                } while (result != VM_FAULT_SUCCESS);
        }
@@ -3616,7 +3122,7 @@ vm_object_copy_slowly(
         */
        vm_object_deallocate(src_object);
        *_result_object = new_object;
-       return(KERN_SUCCESS);
+       return KERN_SUCCESS;
 }
 
 /*
@@ -3639,21 +3145,19 @@ vm_object_copy_slowly(
 /*ARGSUSED*/
 __private_extern__ boolean_t
 vm_object_copy_quickly(
-       vm_object_t             *_object,               /* INOUT */
-       __unused vm_object_offset_t     offset, /* IN */
-       __unused vm_object_size_t       size,   /* IN */
-       boolean_t               *_src_needs_copy,       /* OUT */
-       boolean_t               *_dst_needs_copy)       /* OUT */
+       vm_object_t             *_object,               /* INOUT */
+       __unused vm_object_offset_t     offset, /* IN */
+       __unused vm_object_size_t       size,   /* IN */
+       boolean_t               *_src_needs_copy,       /* OUT */
+       boolean_t               *_dst_needs_copy)       /* OUT */
 {
-       vm_object_t     object = *_object;
+       vm_object_t     object = *_object;
        memory_object_copy_strategy_t copy_strategy;
 
-       XPR(XPR_VM_OBJECT, "v_o_c_quickly obj 0x%x off 0x%x size 0x%x\n",
-           *_object, offset, size, 0, 0);
        if (object == VM_OBJECT_NULL) {
                *_src_needs_copy = FALSE;
                *_dst_needs_copy = FALSE;
-               return(TRUE);
+               return TRUE;
        }
 
        vm_object_lock(object);
@@ -3686,13 +3190,13 @@ vm_object_copy_quickly(
 
        case MEMORY_OBJECT_COPY_DELAY:
                vm_object_unlock(object);
-               return(FALSE);
+               return FALSE;
 
        default:
                vm_object_unlock(object);
-               return(FALSE);
+               return FALSE;
        }
-       return(TRUE);
+       return TRUE;
 }
 
 static int copy_call_count = 0;
@@ -3719,15 +3223,15 @@ static int copy_call_restart_count = 0;
  */
 static kern_return_t
 vm_object_copy_call(
-       vm_object_t             src_object,
-       vm_object_offset_t      src_offset,
-       vm_object_size_t        size,
-       vm_object_t             *_result_object)        /* OUT */
+       vm_object_t             src_object,
+       vm_object_offset_t      src_offset,
+       vm_object_size_t        size,
+       vm_object_t             *_result_object)        /* OUT */
 {
-       kern_return_t   kr;
-       vm_object_t     copy;
-       boolean_t       check_ready = FALSE;
-       uint32_t        try_failed_count = 0;
+       kern_return_t   kr;
+       vm_object_t     copy;
+       boolean_t       check_ready = FALSE;
+       uint32_t        try_failed_count = 0;
 
        /*
         *      If a copy is already in progress, wait and retry.
@@ -3745,7 +3249,7 @@ vm_object_copy_call(
        copy_call_count++;
        while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) {
                vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL,
-                              THREAD_UNINT);
+                   THREAD_UNINT);
                copy_call_restart_count++;
        }
 
@@ -3768,7 +3272,7 @@ vm_object_copy_call(
         *      via memory_object_create_copy.
         */
 
-       kr = KERN_FAILURE;      /* XXX need to change memory_object.defs */
+       kr = KERN_FAILURE;      /* XXX need to change memory_object.defs */
        if (kr != KERN_SUCCESS) {
                return kr;
        }
@@ -3779,7 +3283,7 @@ vm_object_copy_call(
        vm_object_lock(src_object);
        while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) {
                vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL,
-                              THREAD_UNINT);
+                   THREAD_UNINT);
                copy_call_sleep_count++;
        }
 Retry:
@@ -3789,16 +3293,18 @@ Retry:
                vm_object_unlock(src_object);
 
                try_failed_count++;
-               mutex_pause(try_failed_count);  /* wait a bit */
+               mutex_pause(try_failed_count);  /* wait a bit */
 
                vm_object_lock(src_object);
                goto Retry;
        }
-       if (copy->vo_size < src_offset+size)
-               copy->vo_size = src_offset+size;
+       if (copy->vo_size < src_offset + size) {
+               copy->vo_size = src_offset + size;
+       }
 
-       if (!copy->pager_ready)
+       if (!copy->pager_ready) {
                check_ready = TRUE;
+       }
 
        /*
         *      Return the copy.
@@ -3840,24 +3346,24 @@ static int copy_delayed_protect_iterate = 0;
  */
 __private_extern__ vm_object_t
 vm_object_copy_delayed(
-       vm_object_t             src_object,
-       vm_object_offset_t      src_offset,
-       vm_object_size_t        size,
-       boolean_t               src_object_shared)
+       vm_object_t             src_object,
+       vm_object_offset_t      src_offset,
+       vm_object_size_t        size,
+       boolean_t               src_object_shared)
 {
-       vm_object_t             new_copy = VM_OBJECT_NULL;
-       vm_object_t             old_copy;
-       vm_page_t               p;
-       vm_object_size_t        copy_size = src_offset + size;
-       pmap_flush_context      pmap_flush_context_storage;
-       boolean_t               delayed_pmap_flush = FALSE;
+       vm_object_t             new_copy = VM_OBJECT_NULL;
+       vm_object_t             old_copy;
+       vm_page_t               p;
+       vm_object_size_t        copy_size = src_offset + size;
+       pmap_flush_context      pmap_flush_context_storage;
+       boolean_t               delayed_pmap_flush = FALSE;
 
 
        int collisions = 0;
        /*
         *      The user-level memory manager wants to see all of the changes
         *      to this object, but it has promised not to make any changes on
-        *      its own.
+        *      its own.
         *
         *      Perform an asymmetric copy-on-write, as follows:
         *              Create a new object, called a "copy object" to hold
@@ -3894,16 +3400,16 @@ vm_object_copy_delayed(
         */
 
        copy_size = vm_object_round_page(copy_size);
- Retry:
+Retry:
+
        /*
         * Wait for paging in progress.
         */
        if (!src_object->true_share &&
            (src_object->paging_in_progress != 0 ||
-            src_object->activity_in_progress != 0)) {
-               if (src_object_shared == TRUE) {
-                       vm_object_unlock(src_object);
+           src_object->activity_in_progress != 0)) {
+               if (src_object_shared == TRUE) {
+                       vm_object_unlock(src_object);
                        vm_object_lock(src_object);
                        src_object_shared = FALSE;
                        goto Retry;
@@ -3917,33 +3423,37 @@ vm_object_copy_delayed(
 
        old_copy = src_object->copy;
        if (old_copy != VM_OBJECT_NULL) {
-               int lock_granted;
+               int lock_granted;
 
                /*
                 *      Try to get the locks (out of order)
                 */
-               if (src_object_shared == TRUE)
-                       lock_granted = vm_object_lock_try_shared(old_copy);
-               else
-                       lock_granted = vm_object_lock_try(old_copy);
+               if (src_object_shared == TRUE) {
+                       lock_granted = vm_object_lock_try_shared(old_copy);
+               } else {
+                       lock_granted = vm_object_lock_try(old_copy);
+               }
 
                if (!lock_granted) {
                        vm_object_unlock(src_object);
 
-                       if (collisions++ == 0)
+                       if (collisions++ == 0) {
                                copy_delayed_lock_contention++;
+                       }
                        mutex_pause(collisions);
 
                        /* Heisenberg Rules */
                        copy_delayed_lock_collisions++;
 
-                       if (collisions > copy_delayed_max_collisions)
+                       if (collisions > copy_delayed_max_collisions) {
                                copy_delayed_max_collisions = collisions;
+                       }
 
-                       if (src_object_shared == TRUE)
-                               vm_object_lock_shared(src_object);
-                       else
-                               vm_object_lock(src_object);
+                       if (src_object_shared == TRUE) {
+                               vm_object_lock_shared(src_object);
+                       } else {
+                               vm_object_lock(src_object);
+                       }
 
                        goto Retry;
                }
@@ -3965,10 +3475,10 @@ vm_object_copy_delayed(
                         */
 
                        if (old_copy->vo_size < copy_size) {
-                               if (src_object_shared == TRUE) {
-                                       vm_object_unlock(old_copy);
+                               if (src_object_shared == TRUE) {
+                                       vm_object_unlock(old_copy);
                                        vm_object_unlock(src_object);
-                               
+
                                        vm_object_lock(src_object);
                                        src_object_shared = FALSE;
                                        goto Retry;
@@ -3985,10 +3495,10 @@ vm_object_copy_delayed(
                                pmap_flush_context_init(&pmap_flush_context_storage);
                                delayed_pmap_flush = FALSE;
 
-                               vm_page_queue_iterate(&src_object->memq, p, vm_page_t, listq) {
-                                       if (!p->fictitious && 
-                                           p->offset >= old_copy->vo_size && 
-                                           p->offset < copy_size) {
+                               vm_page_queue_iterate(&src_object->memq, p, vmp_listq) {
+                                       if (!p->vmp_fictitious &&
+                                           p->vmp_offset >= old_copy->vo_size &&
+                                           p->vmp_offset < copy_size) {
                                                if (VM_PAGE_WIRED(p)) {
                                                        vm_object_unlock(old_copy);
                                                        vm_object_unlock(src_object);
@@ -3997,26 +3507,29 @@ vm_object_copy_delayed(
                                                                vm_object_unlock(new_copy);
                                                                vm_object_deallocate(new_copy);
                                                        }
-                                                       if (delayed_pmap_flush == TRUE)
+                                                       if (delayed_pmap_flush == TRUE) {
                                                                pmap_flush(&pmap_flush_context_storage);
+                                                       }
 
                                                        return VM_OBJECT_NULL;
                                                } else {
                                                        pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p), (VM_PROT_ALL & ~VM_PROT_WRITE),
-                                                                                 PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
+                                                           PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
                                                        delayed_pmap_flush = TRUE;
                                                }
                                        }
                                }
-                               if (delayed_pmap_flush == TRUE)
+                               if (delayed_pmap_flush == TRUE) {
                                        pmap_flush(&pmap_flush_context_storage);
+                               }
 
                                old_copy->vo_size = copy_size;
                        }
-                       if (src_object_shared == TRUE)
-                               vm_object_reference_shared(old_copy);
-                       else
-                               vm_object_reference_locked(old_copy);
+                       if (src_object_shared == TRUE) {
+                               vm_object_reference_shared(old_copy);
+                       } else {
+                               vm_object_reference_locked(old_copy);
+                       }
                        vm_object_unlock(old_copy);
                        vm_object_unlock(src_object);
 
@@ -4024,18 +3537,19 @@ vm_object_copy_delayed(
                                vm_object_unlock(new_copy);
                                vm_object_deallocate(new_copy);
                        }
-                       return(old_copy);
+                       return old_copy;
                }
-               
-               
+
+
 
                /*
-                * Adjust the size argument so that the newly-created 
+                * Adjust the size argument so that the newly-created
                 * copy object will be large enough to back either the
                 * old copy object or the new mapping.
                 */
-               if (old_copy->vo_size > copy_size)
+               if (old_copy->vo_size > copy_size) {
                        copy_size = old_copy->vo_size;
+               }
 
                if (new_copy == VM_OBJECT_NULL) {
                        vm_object_unlock(old_copy);
@@ -4047,7 +3561,7 @@ vm_object_copy_delayed(
                        src_object_shared = FALSE;
                        goto Retry;
                }
-               new_copy->vo_size = copy_size;  
+               new_copy->vo_size = copy_size;
 
                /*
                 *      The copy-object is always made large enough to
@@ -4058,7 +3572,6 @@ vm_object_copy_delayed(
 
                assert((old_copy->shadow == src_object) &&
                    (old_copy->vo_shadow_offset == (vm_object_offset_t) 0));
-
        } else if (new_copy == VM_OBJECT_NULL) {
                vm_object_unlock(src_object);
                new_copy = vm_object_allocate(copy_size);
@@ -4085,28 +3598,31 @@ vm_object_copy_delayed(
        pmap_flush_context_init(&pmap_flush_context_storage);
        delayed_pmap_flush = FALSE;
 
-       vm_page_queue_iterate(&src_object->memq, p, vm_page_t, listq) {
-               if (!p->fictitious && p->offset < copy_size) {
+       vm_page_queue_iterate(&src_object->memq, p, vmp_listq) {
+               if (!p->vmp_fictitious && p->vmp_offset < copy_size) {
                        if (VM_PAGE_WIRED(p)) {
-                               if (old_copy)
+                               if (old_copy) {
                                        vm_object_unlock(old_copy);
+                               }
                                vm_object_unlock(src_object);
                                vm_object_unlock(new_copy);
                                vm_object_deallocate(new_copy);
 
-                               if (delayed_pmap_flush == TRUE)
+                               if (delayed_pmap_flush == TRUE) {
                                        pmap_flush(&pmap_flush_context_storage);
+                               }
 
                                return VM_OBJECT_NULL;
                        } else {
                                pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p), (VM_PROT_ALL & ~VM_PROT_WRITE),
-                                                         PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
+                                   PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
                                delayed_pmap_flush = TRUE;
                        }
                }
        }
-       if (delayed_pmap_flush == TRUE)
+       if (delayed_pmap_flush == TRUE) {
                pmap_flush(&pmap_flush_context_storage);
+       }
 
        if (old_copy != VM_OBJECT_NULL) {
                /*
@@ -4123,7 +3639,7 @@ vm_object_copy_delayed(
                old_copy->shadow = new_copy;
                vm_object_lock_assert_exclusive(new_copy);
                assert(new_copy->ref_count > 0);
-               new_copy->ref_count++;          /* for old_copy->shadow ref. */
+               new_copy->ref_count++;          /* for old_copy->shadow ref. */
 
 #if TASK_SWAPPER
                if (old_copy->res_count) {
@@ -4132,7 +3648,7 @@ vm_object_copy_delayed(
                }
 #endif
 
-               vm_object_unlock(old_copy);     /* done with old_copy */
+               vm_object_unlock(old_copy);     /* done with old_copy */
        }
 
        /*
@@ -4141,7 +3657,7 @@ vm_object_copy_delayed(
        vm_object_lock_assert_exclusive(new_copy);
        new_copy->shadow = src_object;
        new_copy->vo_shadow_offset = 0;
-       new_copy->shadowed = TRUE;      /* caller must set needs_copy */
+       new_copy->shadowed = TRUE;      /* caller must set needs_copy */
 
        vm_object_lock_assert_exclusive(src_object);
        vm_object_reference_locked(src_object);
@@ -4149,10 +3665,6 @@ vm_object_copy_delayed(
        vm_object_unlock(src_object);
        vm_object_unlock(new_copy);
 
-       XPR(XPR_VM_OBJECT,
-               "vm_object_copy_delayed: used copy object %X for source %X\n",
-               new_copy, src_object, 0, 0, 0);
-
        return new_copy;
 }
 
@@ -4166,16 +3678,16 @@ vm_object_copy_delayed(
  */
 __private_extern__ kern_return_t
 vm_object_copy_strategically(
-       vm_object_t             src_object,
-       vm_object_offset_t      src_offset,
-       vm_object_size_t        size,
-       vm_object_t             *dst_object,    /* OUT */
-       vm_object_offset_t      *dst_offset,    /* OUT */
-       boolean_t               *dst_needs_copy) /* OUT */
+       vm_object_t             src_object,
+       vm_object_offset_t      src_offset,
+       vm_object_size_t        size,
+       vm_object_t             *dst_object,    /* OUT */
+       vm_object_offset_t      *dst_offset,    /* OUT */
+       boolean_t               *dst_needs_copy) /* OUT */
 {
-       boolean_t       result;
-       boolean_t       interruptible = THREAD_ABORTSAFE; /* XXX */
-       boolean_t       object_lock_shared = FALSE;
+       boolean_t       result;
+       boolean_t       interruptible = THREAD_ABORTSAFE; /* XXX */
+       boolean_t       object_lock_shared = FALSE;
        memory_object_copy_strategy_t copy_strategy;
 
        assert(src_object != VM_OBJECT_NULL);
@@ -4183,10 +3695,11 @@ vm_object_copy_strategically(
        copy_strategy = src_object->copy_strategy;
 
        if (copy_strategy == MEMORY_OBJECT_COPY_DELAY) {
-               vm_object_lock_shared(src_object);
+               vm_object_lock_shared(src_object);
                object_lock_shared = TRUE;
-       } else
-               vm_object_lock(src_object);
+       } else {
+               vm_object_lock(src_object);
+       }
 
        /*
         *      The copy strategy is only valid if the memory manager
@@ -4197,20 +3710,20 @@ vm_object_copy_strategically(
                wait_result_t wait_result;
 
                if (object_lock_shared == TRUE) {
-                       vm_object_unlock(src_object);
+                       vm_object_unlock(src_object);
                        vm_object_lock(src_object);
                        object_lock_shared = FALSE;
                        continue;
                }
-               wait_result = vm_object_sleep(  src_object,
-                                               VM_OBJECT_EVENT_PAGER_READY,
-                                               interruptible);
+               wait_result = vm_object_sleep(  src_object,
+                   VM_OBJECT_EVENT_PAGER_READY,
+                   interruptible);
                if (wait_result != THREAD_AWAKENED) {
                        vm_object_unlock(src_object);
                        *dst_object = VM_OBJECT_NULL;
                        *dst_offset = 0;
                        *dst_needs_copy = FALSE;
-                       return(MACH_SEND_INTERRUPTED);
+                       return MACH_SEND_INTERRUPTED;
                }
        }
 
@@ -4219,9 +3732,9 @@ vm_object_copy_strategically(
         */
 
        switch (copy_strategy) {
-           case MEMORY_OBJECT_COPY_DELAY:
+       case MEMORY_OBJECT_COPY_DELAY:
                *dst_object = vm_object_copy_delayed(src_object,
-                                                    src_offset, size, object_lock_shared);
+                   src_offset, size, object_lock_shared);
                if (*dst_object != VM_OBJECT_NULL) {
                        *dst_offset = src_offset;
                        *dst_needs_copy = TRUE;
@@ -4229,37 +3742,36 @@ vm_object_copy_strategically(
                        break;
                }
                vm_object_lock(src_object);
-               /* fall thru when delayed copy not allowed */
+       /* fall thru when delayed copy not allowed */
 
-           case MEMORY_OBJECT_COPY_NONE:
+       case MEMORY_OBJECT_COPY_NONE:
                result = vm_object_copy_slowly(src_object, src_offset, size,
-                                              interruptible, dst_object);
+                   interruptible, dst_object);
                if (result == KERN_SUCCESS) {
                        *dst_offset = 0;
                        *dst_needs_copy = FALSE;
                }
                break;
 
-           case MEMORY_OBJECT_COPY_CALL:
+       case MEMORY_OBJECT_COPY_CALL:
                result = vm_object_copy_call(src_object, src_offset, size,
-                               dst_object);
+                   dst_object);
                if (result == KERN_SUCCESS) {
                        *dst_offset = src_offset;
                        *dst_needs_copy = TRUE;
                }
                break;
 
-           case MEMORY_OBJECT_COPY_SYMMETRIC:
-               XPR(XPR_VM_OBJECT, "v_o_c_strategically obj 0x%x off 0x%x size 0x%x\n", src_object, src_offset, size, 0, 0);
+       case MEMORY_OBJECT_COPY_SYMMETRIC:
                vm_object_unlock(src_object);
                result = KERN_MEMORY_RESTART_COPY;
                break;
 
-           default:
+       default:
                panic("copy_strategically: bad strategy");
                result = KERN_INVALID_ARGUMENT;
        }
-       return(result);
+       return result;
 }
 
 /*
@@ -4276,17 +3788,18 @@ boolean_t vm_object_shadow_check = TRUE;
 
 __private_extern__ boolean_t
 vm_object_shadow(
-       vm_object_t             *object,        /* IN/OUT */
-       vm_object_offset_t      *offset,        /* IN/OUT */
-       vm_object_size_t        length)
+       vm_object_t             *object,        /* IN/OUT */
+       vm_object_offset_t      *offset,        /* IN/OUT */
+       vm_object_size_t        length)
 {
-       vm_object_t     source;
-       vm_object_t     result;
+       vm_object_t     source;
+       vm_object_t     result;
 
        source = *object;
        assert(source != VM_OBJECT_NULL);
-       if (source == VM_OBJECT_NULL)
+       if (source == VM_OBJECT_NULL) {
                return FALSE;
+       }
 
 #if 0
        /*
@@ -4320,17 +3833,22 @@ vm_object_shadow(
 
        if (vm_object_shadow_check &&
            source->vo_size == length &&
-           source->ref_count == 1 &&
-           (source->shadow == VM_OBJECT_NULL ||
-            source->shadow->copy == VM_OBJECT_NULL) )
-       {
-               /* lock the object and check again */
+           source->ref_count == 1) {
+               /*
+                * Lock the object and check again.
+                * We also check to see if there's
+                * a shadow or copy object involved.
+                * We can't do that earlier because
+                * without the object locked, there
+                * could be a collapse and the chain
+                * gets modified leaving us with an
+                * invalid pointer.
+                */
                vm_object_lock(source);
                if (source->vo_size == length &&
                    source->ref_count == 1 &&
                    (source->shadow == VM_OBJECT_NULL ||
-                    source->shadow->copy == VM_OBJECT_NULL))
-               {
+                   source->shadow->copy == VM_OBJECT_NULL)) {
                        source->shadowed = FALSE;
                        vm_object_unlock(source);
                        return FALSE;
@@ -4343,8 +3861,9 @@ vm_object_shadow(
         *      Allocate a new object with the given length
         */
 
-       if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL)
+       if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL) {
                panic("vm_object_shadow: no object for shadowing");
+       }
 
        /*
         *      The new object shadows the source object, adding
@@ -4354,7 +3873,7 @@ vm_object_shadow(
         *      count.
         */
        result->shadow = source;
-       
+
        /*
         *      Store the offset into the source object,
         *      and fix up the offset into the new object.
@@ -4430,10 +3949,6 @@ vm_object_shadow(
  *     [Furthermore, each routine must cope with the simultaneous
  *     or previous operations of the others.]
  *
- *     In addition to the lock on the object, the vm_object_hash_lock
- *     governs the associations.  References gained through the
- *     association require use of the hash lock.
- *
  *     Because the pager field may be cleared spontaneously, it
  *     cannot be used to determine whether a memory object has
  *     ever been associated with a particular vm_object.  [This
@@ -4452,252 +3967,88 @@ vm_object_shadow(
 
 
 /*
- *     Routine:        vm_object_enter
+ *     Routine:        vm_object_memory_object_associate
  *     Purpose:
- *             Find a VM object corresponding to the given
- *             pager; if no such object exists, create one,
- *             and initialize the pager.
+ *             Associate a VM object to the given pager.
+ *             If a VM object is not provided, create one.
+ *             Initialize the pager.
  */
 vm_object_t
-vm_object_enter(
-       memory_object_t         pager,
-       vm_object_size_t        size,
-       boolean_t               internal,
-       boolean_t               init,
-       boolean_t               named)
+vm_object_memory_object_associate(
+       memory_object_t         pager,
+       vm_object_t             object,
+       vm_object_size_t        size,
+       boolean_t               named)
 {
-       vm_object_t             object;
-       vm_object_t             new_object;
-       boolean_t               must_init;
-       vm_object_hash_entry_t  entry, new_entry;
-       uint32_t        try_failed_count = 0;
-       lck_mtx_t       *lck;
+       memory_object_control_t control;
 
-       if (pager == MEMORY_OBJECT_NULL)
-               return(vm_object_allocate(size));
+       assert(pager != MEMORY_OBJECT_NULL);
 
-       new_object = VM_OBJECT_NULL;
-       new_entry = VM_OBJECT_HASH_ENTRY_NULL;
-       must_init = init;
+       if (object != VM_OBJECT_NULL) {
+               assert(object->internal);
+               assert(object->pager_created);
+               assert(!object->pager_initialized);
+               assert(!object->pager_ready);
+               assert(object->pager_trusted);
+       } else {
+               object = vm_object_allocate(size);
+               assert(object != VM_OBJECT_NULL);
+               object->internal = FALSE;
+               object->pager_trusted = FALSE;
+               /* copy strategy invalid until set by memory manager */
+               object->copy_strategy = MEMORY_OBJECT_COPY_INVALID;
+       }
 
        /*
-        *      Look for an object associated with this port.
+        *      Allocate request port.
         */
-Retry:
-       lck = vm_object_hash_lock_spin(pager);
-       do {
-               entry = vm_object_hash_lookup(pager, FALSE);
 
-               if (entry == VM_OBJECT_HASH_ENTRY_NULL) {
-                       if (new_object == VM_OBJECT_NULL) {
-                               /*
-                                *      We must unlock to create a new object;
-                                *      if we do so, we must try the lookup again.
-                                */
-                               vm_object_hash_unlock(lck);
-                               assert(new_entry == VM_OBJECT_HASH_ENTRY_NULL);
-                               new_entry = vm_object_hash_entry_alloc(pager);
-                               new_object = vm_object_allocate(size);
-                               /*
-                                * Set new_object->hashed now, while noone
-                                * knows about this object yet and we
-                                * don't need to lock it.  Once it's in
-                                * the hash table, we would have to lock
-                                * the object to set its "hashed" bit and
-                                * we can't lock the object while holding
-                                * the hash lock as a spinlock...
-                                */
-                               new_object->hashed = TRUE;
-                               lck = vm_object_hash_lock_spin(pager);
-                       } else {
-                               /*
-                                *      Lookup failed twice, and we have something
-                                *      to insert; set the object.
-                                */
-                               /*
-                                * We can't lock the object here since we're
-                                * holding the hash lock as a spin lock.
-                                * We've already pre-set "new_object->hashed"
-                                * when we created "new_object" above, so we
-                                * won't need to modify the object in
-                                * vm_object_hash_insert().
-                                */
-                               assert(new_object->hashed);
-                               vm_object_hash_insert(new_entry, new_object);
-                               entry = new_entry;
-                               new_entry = VM_OBJECT_HASH_ENTRY_NULL;
-                               new_object = VM_OBJECT_NULL;
-                               must_init = TRUE;
-                       }
-               } else if (entry->object == VM_OBJECT_NULL) {
-                       /*
-                        *      If a previous object is being terminated,
-                        *      we must wait for the termination message
-                        *      to be queued (and lookup the entry again).
-                        */
-                       entry->waiting = TRUE;
-                       entry = VM_OBJECT_HASH_ENTRY_NULL;
-                       assert_wait((event_t) pager, THREAD_UNINT);
-                       vm_object_hash_unlock(lck);
+       control = memory_object_control_allocate(object);
+       assert(control != MEMORY_OBJECT_CONTROL_NULL);
 
-                       thread_block(THREAD_CONTINUE_NULL);
-                       lck = vm_object_hash_lock_spin(pager);
-               }
-       } while (entry == VM_OBJECT_HASH_ENTRY_NULL);
+       vm_object_lock(object);
 
-       object = entry->object;
-       assert(object != VM_OBJECT_NULL);
+       assert(!object->pager_ready);
+       assert(!object->pager_initialized);
+       assert(object->pager == NULL);
+       assert(object->pager_control == NULL);
 
-       if (!must_init) {
-               if ( !vm_object_lock_try(object)) {
+       /*
+        *      Copy the reference we were given.
+        */
 
-                       vm_object_hash_unlock(lck);
+       memory_object_reference(pager);
+       object->pager_created = TRUE;
+       object->pager = pager;
+       object->pager_control = control;
+       object->pager_ready = FALSE;
 
-                       try_failed_count++;
-                       mutex_pause(try_failed_count);  /* wait a bit */
-                       goto Retry;
-               }
-               assert(!internal || object->internal);
-#if VM_OBJECT_CACHE
-               if (object->ref_count == 0) {
-                       if ( !vm_object_cache_lock_try()) {
+       vm_object_unlock(object);
 
-                               vm_object_hash_unlock(lck);
-                               vm_object_unlock(object);
+       /*
+        *      Let the pager know we're using it.
+        */
 
-                               try_failed_count++;
-                               mutex_pause(try_failed_count);  /* wait a bit */
-                               goto Retry;
-                       }
-                       XPR(XPR_VM_OBJECT_CACHE,
-                           "vm_object_enter: removing %x from cache, head (%x, %x)\n",
-                               object,
-                               vm_object_cached_list.next,
-                               vm_object_cached_list.prev, 0,0);
-                       queue_remove(&vm_object_cached_list, object,
-                                    vm_object_t, cached_list);
-                       vm_object_cached_count--;
+       (void) memory_object_init(pager,
+           object->pager_control,
+           PAGE_SIZE);
 
-                       vm_object_cache_unlock();
-               }
-#endif
-               if (named) {
-                       assert(!object->named);
-                       object->named = TRUE;
-               }
-               vm_object_lock_assert_exclusive(object);
-               object->ref_count++;
-               vm_object_res_reference(object);
+       vm_object_lock(object);
+       if (named) {
+               object->named = TRUE;
+       }
+       if (object->internal) {
+               object->pager_ready = TRUE;
+               vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
+       }
 
-               vm_object_hash_unlock(lck);
-               vm_object_unlock(object);
+       object->pager_initialized = TRUE;
+       vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED);
 
-               VM_STAT_INCR(hits);
-       } else
-               vm_object_hash_unlock(lck);
+       vm_object_unlock(object);
 
-       assert(object->ref_count > 0);
-
-       VM_STAT_INCR(lookups);
-
-       XPR(XPR_VM_OBJECT,
-               "vm_o_enter: pager 0x%x obj 0x%x must_init %d\n",
-               pager, object, must_init, 0, 0);
-
-       /*
-        *      If we raced to create a vm_object but lost, let's
-        *      throw away ours.
-        */
-
-       if (new_object != VM_OBJECT_NULL) {
-               /*
-                * Undo the pre-setting of "new_object->hashed" before
-                * deallocating "new_object", since we did not insert it
-                * into the hash table after all.
-                */
-               assert(new_object->hashed);
-               new_object->hashed = FALSE;
-               vm_object_deallocate(new_object);
-       }
-
-       if (new_entry != VM_OBJECT_HASH_ENTRY_NULL)
-               vm_object_hash_entry_free(new_entry);
-
-       if (must_init) {
-               memory_object_control_t control;
-
-               /*
-                *      Allocate request port.
-                */
-
-               control = memory_object_control_allocate(object);
-               assert (control != MEMORY_OBJECT_CONTROL_NULL);
-
-               vm_object_lock(object);
-               assert(object != kernel_object);
-
-               /*
-                *      Copy the reference we were given.
-                */
-
-               memory_object_reference(pager);
-               object->pager_created = TRUE;
-               object->pager = pager;
-               object->internal = internal;
-               object->pager_trusted = internal;
-               if (!internal) {
-                       /* copy strategy invalid until set by memory manager */
-                       object->copy_strategy = MEMORY_OBJECT_COPY_INVALID;
-               }
-               object->pager_control = control;
-               object->pager_ready = FALSE;
-
-               vm_object_unlock(object);
-
-               /*
-                *      Let the pager know we're using it.
-                */
-
-               (void) memory_object_init(pager,
-                       object->pager_control,
-                       PAGE_SIZE);
-
-               vm_object_lock(object);
-               if (named)
-                       object->named = TRUE;
-               if (internal) {
-                       vm_object_lock_assert_exclusive(object);
-                       object->pager_ready = TRUE;
-                       vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
-               }
-
-               object->pager_initialized = TRUE;
-               vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED);
-       } else {
-               vm_object_lock(object);
-       }
-
-       /*
-        *      [At this point, the object must be locked]
-        */
-
-       /*
-        *      Wait for the work above to be done by the first
-        *      thread to map this object.
-        */
-
-       while (!object->pager_initialized) {
-               vm_object_sleep(object,
-                               VM_OBJECT_EVENT_INITIALIZED,
-                               THREAD_UNINT);
-       }
-       vm_object_unlock(object);
-
-       XPR(XPR_VM_OBJECT,
-           "vm_object_enter: vm_object %x, memory_object %x, internal %d\n",
-           object, object->pager, internal, 0,0);
-       return(object);
-}
+       return object;
+}
 
 /*
  *     Routine:        vm_object_compressor_pager_create
@@ -4715,12 +4066,10 @@ Retry:
 
 void
 vm_object_compressor_pager_create(
-       vm_object_t     object)
+       vm_object_t     object)
 {
-       memory_object_t         pager;
-       vm_object_hash_entry_t  entry;
-       lck_mtx_t               *lck;
-       vm_object_t             pager_object = VM_OBJECT_NULL;
+       memory_object_t         pager;
+       vm_object_t             pager_object = VM_OBJECT_NULL;
 
        assert(object != kernel_object);
 
@@ -4736,71 +4085,68 @@ vm_object_compressor_pager_create(
                 */
                while (!object->pager_initialized) {
                        vm_object_sleep(object,
-                                       VM_OBJECT_EVENT_INITIALIZED,
-                                       THREAD_UNINT);
+                           VM_OBJECT_EVENT_INITIALIZED,
+                           THREAD_UNINT);
                }
                vm_object_paging_end(object);
                return;
        }
 
+       if ((uint32_t) (object->vo_size / PAGE_SIZE) !=
+           (object->vo_size / PAGE_SIZE)) {
+#if DEVELOPMENT || DEBUG
+               printf("vm_object_compressor_pager_create(%p): "
+                   "object size 0x%llx >= 0x%llx\n",
+                   object,
+                   (uint64_t) object->vo_size,
+                   0x0FFFFFFFFULL * PAGE_SIZE);
+#endif /* DEVELOPMENT || DEBUG */
+               vm_object_paging_end(object);
+               return;
+       }
+
        /*
         *      Indicate that a memory object has been assigned
         *      before dropping the lock, to prevent a race.
         */
 
        object->pager_created = TRUE;
+       object->pager_trusted = TRUE;
        object->paging_offset = 0;
-               
-       vm_object_unlock(object);
 
-       if ((uint32_t) (object->vo_size/PAGE_SIZE) !=
-           (object->vo_size/PAGE_SIZE)) {
-               panic("vm_object_compressor_pager_create(%p): "
-                     "object size 0x%llx >= 0x%llx\n",
-                     object,
-                     (uint64_t) object->vo_size,
-                     0x0FFFFFFFFULL*PAGE_SIZE);
-       }
+       vm_object_unlock(object);
 
        /*
         *      Create the [internal] pager, and associate it with this object.
         *
         *      We make the association here so that vm_object_enter()
-        *      can look up the object to complete initializing it.  No
+        *      can look up the object to complete initializing it.  No
         *      user will ever map this object.
         */
        {
-               assert(object->temporary);
-
                /* create our new memory object */
-               assert((uint32_t) (object->vo_size/PAGE_SIZE) ==
-                      (object->vo_size/PAGE_SIZE));
+               assert((uint32_t) (object->vo_size / PAGE_SIZE) ==
+                   (object->vo_size / PAGE_SIZE));
                (void) compressor_memory_object_create(
                        (memory_object_size_t) object->vo_size,
                        &pager);
                if (pager == NULL) {
                        panic("vm_object_compressor_pager_create(): "
-                             "no pager for object %p size 0x%llx\n",
-                             object, (uint64_t) object->vo_size);
+                           "no pager for object %p size 0x%llx\n",
+                           object, (uint64_t) object->vo_size);
                }
-       }
-
-       entry = vm_object_hash_entry_alloc(pager);
-
-       vm_object_lock(object);
-       lck = vm_object_hash_lock_spin(pager);
-       vm_object_hash_insert(entry, object);
-       vm_object_hash_unlock(lck);
-       vm_object_unlock(object);
+       }
 
        /*
         *      A reference was returned by
         *      memory_object_create(), and it is
-        *      copied by vm_object_enter().
+        *      copied by vm_object_memory_object_associate().
         */
 
-       pager_object = vm_object_enter(pager, object->vo_size, TRUE, TRUE, FALSE);
-
+       pager_object = vm_object_memory_object_associate(pager,
+           object,
+           object->vo_size,
+           FALSE);
        if (pager_object != object) {
                panic("vm_object_compressor_pager_create: mismatch (pager: %p, pager_object: %p, orig_object: %p, orig_object size: 0x%llx)\n", pager, pager_object, object, (uint64_t) object->vo_size);
        }
@@ -4818,46 +4164,20 @@ vm_object_compressor_pager_create(
        vm_object_paging_end(object);
 }
 
-/*
- *     Routine:        vm_object_remove
- *     Purpose:
- *             Eliminate the pager/object association
- *             for this pager.
- *     Conditions:
- *             The object cache must be locked.
- */
-__private_extern__ void
-vm_object_remove(
-       vm_object_t     object)
-{
-       memory_object_t pager;
-
-       if ((pager = object->pager) != MEMORY_OBJECT_NULL) {
-               vm_object_hash_entry_t  entry;
-
-               entry = vm_object_hash_lookup(pager, FALSE);
-               if (entry != VM_OBJECT_HASH_ENTRY_NULL)
-                       entry->object = VM_OBJECT_NULL;
-       }
-
-}
-
 /*
  *     Global variables for vm_object_collapse():
  *
  *             Counts for normal collapses and bypasses.
  *             Debugging variables, to watch or disable collapse.
  */
-static long    object_collapses = 0;
-static long    object_bypasses  = 0;
-
-static boolean_t       vm_object_collapse_allowed = TRUE;
-static boolean_t       vm_object_bypass_allowed = TRUE;
+static long     object_collapses = 0;
+static long     object_bypasses  = 0;
 
-unsigned long vm_object_collapse_encrypted = 0;
+static boolean_t        vm_object_collapse_allowed = TRUE;
+static boolean_t        vm_object_bypass_allowed = TRUE;
 
 void vm_object_do_collapse_compressor(vm_object_t object,
-                                     vm_object_t backing_object);
+    vm_object_t backing_object);
 void
 vm_object_do_collapse_compressor(
        vm_object_t object,
@@ -4879,13 +4199,13 @@ vm_object_do_collapse_compressor(
         */
 
        for (backing_offset = object->vo_shadow_offset;
-            backing_offset < object->vo_shadow_offset + object->vo_size;
-            backing_offset += PAGE_SIZE) {
+           backing_offset < object->vo_shadow_offset + object->vo_size;
+           backing_offset += PAGE_SIZE) {
                memory_object_offset_t backing_pager_offset;
 
                /* find the next compressed page at or after this offset */
                backing_pager_offset = (backing_offset +
-                                       backing_object->paging_offset);
+                   backing_object->paging_offset);
                backing_pager_offset = vm_compressor_pager_next_compressed(
                        backing_object->pager,
                        backing_pager_offset);
@@ -4894,7 +4214,7 @@ vm_object_do_collapse_compressor(
                        break;
                }
                backing_offset = (backing_pager_offset -
-                                 backing_object->paging_offset);
+                   backing_object->paging_offset);
 
                new_offset = backing_offset - object->vo_shadow_offset;
 
@@ -4905,9 +4225,9 @@ vm_object_do_collapse_compressor(
 
                if ((vm_page_lookup(object, new_offset) != VM_PAGE_NULL) ||
                    (vm_compressor_pager_state_get(object->pager,
-                                                  (new_offset +
-                                                   object->paging_offset)) ==
-                    VM_EXTERNAL_STATE_EXISTS)) {
+                   (new_offset +
+                   object->paging_offset)) ==
+                   VM_EXTERNAL_STATE_EXISTS)) {
                        /*
                         * This page already exists in object, resident or
                         * compressed.
@@ -4968,14 +4288,13 @@ vm_object_do_collapse(
         *      will be overwritten by any of the parent's
         *      pages that shadow them.
         */
-       
+
        while (!vm_page_queue_empty(&backing_object->memq)) {
-               
                p = (vm_page_t) vm_page_queue_first(&backing_object->memq);
-               
-               new_offset = (p->offset - backing_offset);
-               
-               assert(!p->busy || p->absent);
+
+               new_offset = (p->vmp_offset - backing_offset);
+
+               assert(!p->vmp_busy || p->vmp_absent);
 
                /*
                 *      If the parent has a page here, or if
@@ -4984,27 +4303,14 @@ vm_object_do_collapse(
                 *
                 *      Otherwise, move it as planned.
                 */
-               
-               if (p->offset < backing_offset || new_offset >= size) {
+
+               if (p->vmp_offset < backing_offset || new_offset >= size) {
                        VM_PAGE_FREE(p);
                } else {
-                       /*
-                        * ENCRYPTED SWAP:
-                        * The encryption key includes the "pager" and the
-                        * "paging_offset".  These will not change during the 
-                        * object collapse, so we can just move an encrypted
-                        * page from one object to the other in this case.
-                        * We can't decrypt the page here, since we can't drop
-                        * the object lock.
-                        */
-                       if (p->encrypted) {
-                               vm_object_collapse_encrypted++;
-                       }
                        pp = vm_page_lookup(object, new_offset);
                        if (pp == VM_PAGE_NULL) {
-
                                if (VM_COMPRESSOR_PAGER_STATE_GET(object,
-                                                                 new_offset)
+                                   new_offset)
                                    == VM_EXTERNAL_STATE_EXISTS) {
                                        /*
                                         * Parent object has this page
@@ -5017,13 +4323,12 @@ vm_object_do_collapse(
                                        /*
                                         *      Parent now has no page.
                                         *      Move the backing object's page
-                                        *      up.
+                                        *      up.
                                         */
-                                       vm_page_rename(p, object, new_offset,
-                                                      TRUE);
+                                       vm_page_rename(p, object, new_offset);
                                }
                        } else {
-                               assert(! pp->absent);
+                               assert(!pp->vmp_absent);
 
                                /*
                                 *      Parent object has a real page.
@@ -5038,17 +4343,13 @@ vm_object_do_collapse(
        if (vm_object_collapse_compressor_allowed &&
            object->pager != MEMORY_OBJECT_NULL &&
            backing_object->pager != MEMORY_OBJECT_NULL) {
-
                /* move compressed pages from backing_object to object */
                vm_object_do_collapse_compressor(object, backing_object);
-
        } else if (backing_object->pager != MEMORY_OBJECT_NULL) {
-               vm_object_hash_entry_t  entry;
-
                assert((!object->pager_created &&
-                       (object->pager == MEMORY_OBJECT_NULL)) ||
-                      (!backing_object->pager_created &&
-                       (backing_object->pager == MEMORY_OBJECT_NULL)));
+                   (object->pager == MEMORY_OBJECT_NULL)) ||
+                   (!backing_object->pager_created &&
+                   (backing_object->pager == MEMORY_OBJECT_NULL)));
                /*
                 *      Move the pager from backing_object to object.
                 *
@@ -5063,17 +4364,6 @@ vm_object_do_collapse(
                assert(object->pager == NULL);
                object->pager = backing_object->pager;
 
-               if (backing_object->hashed) {
-                       lck_mtx_t       *lck;
-
-                       lck = vm_object_hash_lock_spin(backing_object->pager);
-                       entry = vm_object_hash_lookup(object->pager, FALSE);
-                       assert(entry != VM_OBJECT_HASH_ENTRY_NULL);
-                       entry->object = object;
-                       vm_object_hash_unlock(lck);
-
-                       object->hashed = TRUE;
-               }
                object->pager_created = backing_object->pager_created;
                object->pager_control = backing_object->pager_control;
                object->pager_ready = backing_object->pager_ready;
@@ -5082,7 +4372,7 @@ vm_object_do_collapse(
                    backing_object->paging_offset + backing_offset;
                if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
                        memory_object_control_collapse(object->pager_control,
-                                                      object);
+                           object);
                }
                /* the backing_object has lost its pager: reset all fields */
                backing_object->pager_created = FALSE;
@@ -5096,7 +4386,7 @@ vm_object_do_collapse(
         *      Note that the reference to backing_object->shadow
         *      moves from within backing_object to within object.
         */
-       
+
        assert(!object->phys_contiguous);
        assert(!backing_object->phys_contiguous);
        object->shadow = backing_object->shadow;
@@ -5110,7 +4400,7 @@ vm_object_do_collapse(
                object->vo_shadow_offset = 0;
        }
        assert((object->shadow == VM_OBJECT_NULL) ||
-              (object->shadow->copy != backing_object));
+           (object->shadow->copy != backing_object));
 
        /*
         *      Discard backing_object.
@@ -5120,7 +4410,7 @@ vm_object_do_collapse(
         *      all that is necessary is to dispose of it.
         */
        object_collapses++;
-       
+
        assert(backing_object->ref_count == 1);
        assert(backing_object->resident_page_count == 0);
        assert(backing_object->paging_in_progress == 0);
@@ -5142,20 +4432,16 @@ vm_object_do_collapse(
        backing_object->alive = FALSE;
        vm_object_unlock(backing_object);
 
-       XPR(XPR_VM_OBJECT, "vm_object_collapse, collapsed 0x%X\n",
-               backing_object, 0,0,0,0);
-
 #if VM_OBJECT_TRACKING
        if (vm_object_tracking_inited) {
                btlog_remove_entries_for_element(vm_object_tracking_btlog,
-                                                backing_object);
+                   backing_object);
        }
 #endif /* VM_OBJECT_TRACKING */
 
        vm_object_lock_destroy(backing_object);
 
        zfree(vm_object_zone, backing_object);
-       
 }
 
 static void
@@ -5167,13 +4453,13 @@ vm_object_do_bypass(
         *      Make the parent shadow the next object
         *      in the chain.
         */
-       
+
        vm_object_lock_assert_exclusive(object);
        vm_object_lock_assert_exclusive(backing_object);
 
-#if    TASK_SWAPPER
+#if     TASK_SWAPPER
        /*
-        *      Do object reference in-line to 
+        *      Do object reference in-line to
         *      conditionally increment shadow's
         *      residence count.  If object is not
         *      resident, leave residence count
@@ -5183,13 +4469,14 @@ vm_object_do_bypass(
                vm_object_lock(backing_object->shadow);
                vm_object_lock_assert_exclusive(backing_object->shadow);
                backing_object->shadow->ref_count++;
-               if (object->res_count != 0)
+               if (object->res_count != 0) {
                        vm_object_res_reference(backing_object->shadow);
+               }
                vm_object_unlock(backing_object->shadow);
        }
-#else  /* TASK_SWAPPER */
+#else   /* TASK_SWAPPER */
        vm_object_reference(backing_object->shadow);
-#endif /* TASK_SWAPPER */
+#endif  /* TASK_SWAPPER */
 
        assert(!object->phys_contiguous);
        assert(!backing_object->phys_contiguous);
@@ -5200,30 +4487,30 @@ vm_object_do_bypass(
                /* no shadow, therefore no shadow offset... */
                object->vo_shadow_offset = 0;
        }
-       
+
        /*
         *      Backing object might have had a copy pointer
-        *      to us.  If it did, clear it. 
+        *      to us.  If it did, clear it.
         */
        if (backing_object->copy == object) {
                backing_object->copy = VM_OBJECT_NULL;
        }
-       
+
        /*
         *      Drop the reference count on backing_object.
-#if    TASK_SWAPPER
+        #if    TASK_SWAPPER
         *      Since its ref_count was at least 2, it
         *      will not vanish; so we don't need to call
         *      vm_object_deallocate.
         *      [with a caveat for "named" objects]
-        * 
+        *
         *      The res_count on the backing object is
         *      conditionally decremented.  It's possible
         *      (via vm_pageout_scan) to get here with
         *      a "swapped" object, which has a 0 res_count,
         *      in which case, the backing object res_count
         *      is already down by one.
-#else
+        #else
         *      Don't call vm_object_deallocate unless
         *      ref_count drops to zero.
         *
@@ -5231,31 +4518,31 @@ vm_object_do_bypass(
         *      backing object could be bypassed but not
         *      collapsed, such as when the backing object
         *      is temporary and cachable.
-#endif
+        #endif
         */
        if (backing_object->ref_count > 2 ||
            (!backing_object->named && backing_object->ref_count > 1)) {
                vm_object_lock_assert_exclusive(backing_object);
                backing_object->ref_count--;
-#if    TASK_SWAPPER
-               if (object->res_count != 0)
+#if     TASK_SWAPPER
+               if (object->res_count != 0) {
                        vm_object_res_deallocate(backing_object);
+               }
                assert(backing_object->ref_count > 0);
-#endif /* TASK_SWAPPER */
+#endif  /* TASK_SWAPPER */
                vm_object_unlock(backing_object);
        } else {
-
                /*
                 *      Drop locks so that we can deallocate
                 *      the backing object.
                 */
 
-#if    TASK_SWAPPER
+#if     TASK_SWAPPER
                if (object->res_count == 0) {
                        /* XXX get a reference for the deallocate below */
                        vm_object_res_reference(backing_object);
                }
-#endif /* TASK_SWAPPER */
+#endif  /* TASK_SWAPPER */
                /*
                 * vm_object_collapse (the caller of this function) is
                 * now called from contexts that may not guarantee that a
@@ -5282,11 +4569,11 @@ vm_object_do_bypass(
                vm_object_lock(object);
                vm_object_activity_end(object);
        }
-       
+
        object_bypasses++;
 }
 
-               
+
 /*
  *     vm_object_collapse:
  *
@@ -5304,29 +4591,27 @@ static unsigned long vm_object_collapse_do_bypass = 0;
 
 __private_extern__ void
 vm_object_collapse(
-       vm_object_t                             object,
-       vm_object_offset_t                      hint_offset,
-       boolean_t                               can_bypass)
+       vm_object_t                             object,
+       vm_object_offset_t                      hint_offset,
+       boolean_t                               can_bypass)
 {
-       vm_object_t                             backing_object;
-       unsigned int                            rcount;
-       unsigned int                            size;
-       vm_object_t                             original_object;
-       int                                     object_lock_type;
-       int                                     backing_object_lock_type;
+       vm_object_t                             backing_object;
+       unsigned int                            rcount;
+       unsigned int                            size;
+       vm_object_t                             original_object;
+       int                                     object_lock_type;
+       int                                     backing_object_lock_type;
 
        vm_object_collapse_calls++;
 
-       if (! vm_object_collapse_allowed &&
-           ! (can_bypass && vm_object_bypass_allowed)) {
+       if (!vm_object_collapse_allowed &&
+           !(can_bypass && vm_object_bypass_allowed)) {
                return;
        }
 
-       XPR(XPR_VM_OBJECT, "vm_object_collapse, obj 0x%X\n", 
-               object, 0,0,0,0);
-
-       if (object == VM_OBJECT_NULL)
+       if (object == VM_OBJECT_NULL) {
                return;
+       }
 
        original_object = object;
 
@@ -5353,7 +4638,7 @@ retry:
                /*
                 *      There is a backing object, and
                 */
-       
+
                backing_object = object->shadow;
                if (backing_object == VM_OBJECT_NULL) {
                        if (object != original_object) {
@@ -5390,7 +4675,7 @@ retry:
                 *              The backing object is internal.
                 *
                 */
-       
+
                if (!backing_object->internal ||
                    backing_object->paging_in_progress != 0 ||
                    backing_object->activity_in_progress != 0) {
@@ -5414,9 +4699,9 @@ retry:
                if (object->purgable != VM_PURGABLE_DENY ||
                    backing_object->purgable != VM_PURGABLE_DENY) {
                        panic("vm_object_collapse() attempting to collapse "
-                             "purgeable object: %p(%d) %p(%d)\n",
-                             object, object->purgable,
-                             backing_object, backing_object->purgable);
+                           "purgeable object: %p(%d) %p(%d)\n",
+                           object, object->purgable,
+                           backing_object, backing_object->purgable);
                        /* try and collapse the rest of the shadow chain */
                        if (object != original_object) {
                                vm_object_unlock(object);
@@ -5425,7 +4710,7 @@ retry:
                        object_lock_type = backing_object_lock_type;
                        continue;
                }
-       
+
                /*
                 *      The backing object can't be a copy-object:
                 *      the shadow_offset for the copy-object must stay
@@ -5462,20 +4747,19 @@ retry:
                 */
                if (backing_object->ref_count == 1 &&
                    (vm_object_collapse_compressor_allowed ||
-                    !object->pager_created 
-                    || (!backing_object->pager_created)
+                   !object->pager_created
+                   || (!backing_object->pager_created)
                    ) && vm_object_collapse_allowed) {
-
                        /*
                         * We need the exclusive lock on the VM objects.
                         */
                        if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
                                /*
-                                * We have an object and its shadow locked 
+                                * We have an object and its shadow locked
                                 * "shared".  We can't just upgrade the locks
                                 * to "exclusive", as some other thread might
                                 * also have these objects locked "shared" and
-                                * attempt to upgrade one or the other to 
+                                * attempt to upgrade one or the other to
                                 * "exclusive".  The upgrades would block
                                 * forever waiting for the other "shared" locks
                                 * to get released.
@@ -5484,19 +4768,14 @@ retry:
                                 * have changed) with "exclusive" locking.
                                 */
                                vm_object_unlock(backing_object);
-                               if (object != original_object)
+                               if (object != original_object) {
                                        vm_object_unlock(object);
+                               }
                                object_lock_type = OBJECT_LOCK_EXCLUSIVE;
                                backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
                                goto retry;
                        }
 
-                       XPR(XPR_VM_OBJECT, 
-                  "vm_object_collapse: %x to %x, pager %x, pager_control %x\n",
-                               backing_object, object,
-                               backing_object->pager, 
-                               backing_object->pager_control, 0);
-
                        /*
                         *      Collapse the object with its backing
                         *      object, and try again with the object's
@@ -5513,7 +4792,7 @@ retry:
                 *      or permitted, so let's try bypassing it.
                 */
 
-               if (! (can_bypass && vm_object_bypass_allowed)) {
+               if (!(can_bypass && vm_object_bypass_allowed)) {
                        /* try and collapse the rest of the shadow chain */
                        if (object != original_object) {
                                vm_object_unlock(object);
@@ -5533,9 +4812,9 @@ retry:
                rcount = object->resident_page_count;
 
                if (rcount != size) {
-                       vm_object_offset_t      offset;
-                       vm_object_offset_t      backing_offset;
-                       unsigned int            backing_rcount;
+                       vm_object_offset_t      offset;
+                       vm_object_offset_t      backing_offset;
+                       unsigned int            backing_rcount;
 
                        /*
                         *      If the backing object has a pager but no pagemap,
@@ -5570,8 +4849,8 @@ retry:
                        backing_offset = object->vo_shadow_offset;
                        backing_rcount = backing_object->resident_page_count;
 
-                       if ( (int)backing_rcount - (int)(atop(backing_object->vo_size) - size) > (int)rcount) {
-                                /*
+                       if ((int)backing_rcount - (int)(atop(backing_object->vo_size) - size) > (int)rcount) {
+                               /*
                                 * we have enough pages in the backing object to guarantee that
                                 * at least 1 of them must be 'uncovered' by a resident page
                                 * in the object we're evaluating, so move on and
@@ -5598,23 +4877,24 @@ retry:
                         *
                         */
 
-#define EXISTS_IN_OBJECT(obj, off, rc)                 \
-       ((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off))   \
-         == VM_EXTERNAL_STATE_EXISTS) ||               \
+#define EXISTS_IN_OBJECT(obj, off, rc)                  \
+       ((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off))   \
+         == VM_EXTERNAL_STATE_EXISTS) ||               \
         ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
 
                        /*
                         * Check the hint location first
                         * (since it is often the quickest way out of here).
                         */
-                       if (object->cow_hint != ~(vm_offset_t)0)
+                       if (object->cow_hint != ~(vm_offset_t)0) {
                                hint_offset = (vm_object_offset_t)object->cow_hint;
-                       else
+                       } else {
                                hint_offset = (hint_offset > 8 * PAGE_SIZE_64) ?
-                                             (hint_offset - 8 * PAGE_SIZE_64) : 0;
+                                   (hint_offset - 8 * PAGE_SIZE_64) : 0;
+                       }
 
                        if (EXISTS_IN_OBJECT(backing_object, hint_offset +
-                                            backing_offset, backing_rcount) &&
+                           backing_offset, backing_rcount) &&
                            !EXISTS_IN_OBJECT(object, hint_offset, rcount)) {
                                /* dependency right at the hint */
                                object->cow_hint = (vm_offset_t) hint_offset; /* atomic */
@@ -5634,7 +4914,7 @@ retry:
                         * walk the backing_object's resident pages first.
                         *
                         * NOTE: Pages may be in both the existence map and/or
-                         * resident, so if we don't find a dependency while
+                        * resident, so if we don't find a dependency while
                         * walking the backing object's resident page list
                         * directly, and there is an existence map, we'll have
                         * to run the offset based 2nd pass.  Because we may
@@ -5648,20 +4928,19 @@ retry:
                                backing_rcount = backing_object->resident_page_count;
                                p = (vm_page_t)vm_page_queue_first(&backing_object->memq);
                                do {
-                                       offset = (p->offset - backing_offset);
+                                       offset = (p->vmp_offset - backing_offset);
 
                                        if (offset < object->vo_size &&
                                            offset != hint_offset &&
                                            !EXISTS_IN_OBJECT(object, offset, rc)) {
                                                /* found a dependency */
                                                object->cow_hint = (vm_offset_t) offset; /* atomic */
-                                               
+
                                                break;
                                        }
-                                       p = (vm_page_t) vm_page_queue_next(&p->listq);
-
+                                       p = (vm_page_t) vm_page_queue_next(&p->vmp_listq);
                                } while (--backing_rcount);
-                               if (backing_rcount != 0 ) {
+                               if (backing_rcount != 0) {
                                        /* try and collapse the rest of the shadow chain */
                                        if (object != original_object) {
                                                vm_object_unlock(object);
@@ -5678,13 +4957,12 @@ retry:
                         */
                        if (backing_rcount) {
                                offset = hint_offset;
-                               
-                               while((offset =
-                                     (offset + PAGE_SIZE_64 < object->vo_size) ?
-                                     (offset + PAGE_SIZE_64) : 0) != hint_offset) {
 
+                               while ((offset =
+                                   (offset + PAGE_SIZE_64 < object->vo_size) ?
+                                   (offset + PAGE_SIZE_64) : 0) != hint_offset) {
                                        if (EXISTS_IN_OBJECT(backing_object, offset +
-                                           backing_offset, backing_rcount) &&
+                                           backing_offset, backing_rcount) &&
                                            !EXISTS_IN_OBJECT(object, offset, rcount)) {
                                                /* found a dependency */
                                                object->cow_hint = (vm_offset_t) offset; /* atomic */
@@ -5708,8 +4986,9 @@ retry:
                 */
                if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
                        vm_object_unlock(backing_object);
-                       if (object != original_object)
+                       if (object != original_object) {
                                vm_object_unlock(object);
+                       }
                        object_lock_type = OBJECT_LOCK_EXCLUSIVE;
                        backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
                        goto retry;
@@ -5736,10 +5015,10 @@ retry:
 
        /* NOT REACHED */
        /*
-       if (object != original_object) {
-               vm_object_unlock(object);
-       }
-       */
+        *  if (object != original_object) {
+        *       vm_object_unlock(object);
+        *  }
+        */
 }
 
 /*
@@ -5758,11 +5037,11 @@ unsigned int vm_object_page_remove_iterate = 0;
 
 __private_extern__ void
 vm_object_page_remove(
-       vm_object_t             object,
-       vm_object_offset_t      start,
-       vm_object_offset_t      end)
+       vm_object_t             object,
+       vm_object_offset_t      start,
+       vm_object_offset_t      end)
 {
-       vm_page_t       p, next;
+       vm_page_t       p, next;
 
        /*
         *      One and two page removals are most popular.
@@ -5770,15 +5049,16 @@ vm_object_page_remove(
         *      It balances vm_object_lookup vs iteration.
         */
 
-       if (atop_64(end - start) < (unsigned)object->resident_page_count/16) {
+       if (atop_64(end - start) < (unsigned)object->resident_page_count / 16) {
                vm_object_page_remove_lookup++;
 
                for (; start < end; start += PAGE_SIZE_64) {
                        p = vm_page_lookup(object, start);
                        if (p != VM_PAGE_NULL) {
-                               assert(!p->cleaning && !p->laundry);
-                               if (!p->fictitious && p->pmapped)
-                                       pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
+                               assert(!p->vmp_cleaning && !p->vmp_laundry);
+                               if (!p->vmp_fictitious && p->vmp_pmapped) {
+                                       pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
+                               }
                                VM_PAGE_FREE(p);
                        }
                }
@@ -5787,11 +5067,12 @@ vm_object_page_remove(
 
                p = (vm_page_t) vm_page_queue_first(&object->memq);
                while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t) p)) {
-                       next = (vm_page_t) vm_page_queue_next(&p->listq);
-                       if ((start <= p->offset) && (p->offset < end)) {
-                               assert(!p->cleaning && !p->laundry);
-                               if (!p->fictitious && p->pmapped)
-                                       pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
+                       next = (vm_page_t) vm_page_queue_next(&p->vmp_listq);
+                       if ((start <= p->vmp_offset) && (p->vmp_offset < end)) {
+                               assert(!p->vmp_cleaning && !p->vmp_laundry);
+                               if (!p->vmp_fictitious && p->vmp_pmapped) {
+                                       pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
+                               }
                                VM_PAGE_FREE(p);
                        }
                        p = next;
@@ -5827,31 +5108,27 @@ static int vm_object_coalesce_count = 0;
 
 __private_extern__ boolean_t
 vm_object_coalesce(
-       vm_object_t                     prev_object,
-       vm_object_t                     next_object,
-       vm_object_offset_t              prev_offset,
+       vm_object_t                     prev_object,
+       vm_object_t                     next_object,
+       vm_object_offset_t              prev_offset,
        __unused vm_object_offset_t next_offset,
-       vm_object_size_t                prev_size,
-       vm_object_size_t                next_size)
+       vm_object_size_t                prev_size,
+       vm_object_size_t                next_size)
 {
-       vm_object_size_t        newsize;
+       vm_object_size_t        newsize;
 
-#ifdef lint
+#ifdef  lint
        next_offset++;
-#endif /* lint */
+#endif  /* lint */
 
        if (next_object != VM_OBJECT_NULL) {
-               return(FALSE);
+               return FALSE;
        }
 
        if (prev_object == VM_OBJECT_NULL) {
-               return(TRUE);
+               return TRUE;
        }
 
-       XPR(XPR_VM_OBJECT,
-       "vm_object_coalesce: 0x%X prev_off 0x%X prev_size 0x%X next_size 0x%X\n",
-               prev_object, prev_offset, prev_size, next_size, 0);
-
        vm_object_lock(prev_object);
 
        /*
@@ -5879,7 +5156,7 @@ vm_object_coalesce(
            (prev_object->paging_in_progress != 0) ||
            (prev_object->activity_in_progress != 0)) {
                vm_object_unlock(prev_object);
-               return(FALSE);
+               return FALSE;
        }
 
        vm_object_coalesce_count++;
@@ -5889,8 +5166,8 @@ vm_object_coalesce(
         *      a previous deallocation.
         */
        vm_object_page_remove(prev_object,
-               prev_offset + prev_size,
-               prev_offset + prev_size + next_size);
+           prev_offset + prev_size,
+           prev_offset + prev_size + next_size);
 
        /*
         *      Extend the object if necessary.
@@ -5901,29 +5178,30 @@ vm_object_coalesce(
        }
 
        vm_object_unlock(prev_object);
-       return(TRUE);
+       return TRUE;
 }
 
 kern_return_t
 vm_object_populate_with_private(
-               vm_object_t             object,
-               vm_object_offset_t      offset,
-               ppnum_t                 phys_page,
-               vm_size_t               size)
+       vm_object_t             object,
+       vm_object_offset_t      offset,
+       ppnum_t                 phys_page,
+       vm_size_t               size)
 {
-       ppnum_t                 base_page;
-       vm_object_offset_t      base_offset;
+       ppnum_t                 base_page;
+       vm_object_offset_t      base_offset;
 
 
-       if (!object->private)
+       if (!object->private) {
                return KERN_FAILURE;
+       }
 
        base_page = phys_page;
 
        vm_object_lock(object);
 
        if (!object->phys_contiguous) {
-               vm_page_t       m;
+               vm_page_t       m;
 
                if ((base_offset = trunc_page_64(offset)) != offset) {
                        vm_object_unlock(object);
@@ -5935,66 +5213,58 @@ vm_object_populate_with_private(
                        m = vm_page_lookup(object, base_offset);
 
                        if (m != VM_PAGE_NULL) {
-                               if (m->fictitious) {
+                               if (m->vmp_fictitious) {
                                        if (VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr) {
-
                                                vm_page_lockspin_queues();
-                                               m->private = TRUE;
+                                               m->vmp_private = TRUE;
                                                vm_page_unlock_queues();
 
-                                               m->fictitious = FALSE;
+                                               m->vmp_fictitious = FALSE;
                                                VM_PAGE_SET_PHYS_PAGE(m, base_page);
                                        }
                                } else if (VM_PAGE_GET_PHYS_PAGE(m) != base_page) {
-
-                                       if ( !m->private) {
+                                       if (!m->vmp_private) {
                                                /*
                                                 * we'd leak a real page... that can't be right
                                                 */
                                                panic("vm_object_populate_with_private - %p not private", m);
                                        }
-                                       if (m->pmapped) {
-                                               /*
+                                       if (m->vmp_pmapped) {
+                                               /*
                                                 * pmap call to clear old mapping
                                                 */
-                                               pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
+                                               pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
                                        }
                                        VM_PAGE_SET_PHYS_PAGE(m, base_page);
                                }
-                               if (m->encrypted) {
-                                       /*
-                                        * we should never see this on a ficticious or private page
-                                        */
-                                       panic("vm_object_populate_with_private - %p encrypted", m);
-                               }
-
                        } else {
-                               while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL)
-                                       vm_page_more_fictitious();      
+                               while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL) {
+                                       vm_page_more_fictitious();
+                               }
 
                                /*
                                 * private normally requires lock_queues but since we
                                 * are initializing the page, its not necessary here
                                 */
-                               m->private = TRUE;
-                               m->fictitious = FALSE;
+                               m->vmp_private = TRUE;
+                               m->vmp_fictitious = FALSE;
                                VM_PAGE_SET_PHYS_PAGE(m, base_page);
-                               m->unusual = TRUE;
-                               m->busy = FALSE;
+                               m->vmp_unusual = TRUE;
+                               m->vmp_busy = FALSE;
 
-                               vm_page_insert(m, object, base_offset);
+                               vm_page_insert(m, object, base_offset);
                        }
-                       base_page++;                                                                    /* Go to the next physical page */
+                       base_page++;                                                                    /* Go to the next physical page */
                        base_offset += PAGE_SIZE;
                        size -= PAGE_SIZE;
                }
        } else {
                /* NOTE: we should check the original settings here */
                /* if we have a size > zero a pmap call should be made */
-               /* to disable the range */      
+               /* to disable the range */
 
                /* pmap_? */
-               
+
                /* shadows on contiguous memory are not allowed */
                /* we therefore can use the offset field */
                object->vo_shadow_offset = (vm_object_offset_t)phys_page << PAGE_SHIFT;
@@ -6005,131 +5275,41 @@ vm_object_populate_with_private(
        return KERN_SUCCESS;
 }
 
-/*
- *     memory_object_free_from_cache:
- *
- *     Walk the vm_object cache list, removing and freeing vm_objects 
- *     which are backed by the pager identified by the caller, (pager_ops).  
- *     Remove up to "count" objects, if there are that may available
- *     in the cache.
- *
- *     Walk the list at most once, return the number of vm_objects
- *     actually freed.
- */
-
-__private_extern__ kern_return_t
-memory_object_free_from_cache(
-       __unused host_t         host,
-       __unused memory_object_pager_ops_t pager_ops,
-       int             *count)
-{
-#if VM_OBJECT_CACHE
-       int     object_released = 0;
-
-       vm_object_t object = VM_OBJECT_NULL;
-       vm_object_t shadow;
-
-/*
-       if(host == HOST_NULL)
-               return(KERN_INVALID_ARGUMENT);
-*/
-
- try_again:
-       vm_object_cache_lock();
-
-       queue_iterate(&vm_object_cached_list, object, 
-                                       vm_object_t, cached_list) {
-               if (object->pager &&
-                   (pager_ops == object->pager->mo_pager_ops)) {
-                       vm_object_lock(object);
-                       queue_remove(&vm_object_cached_list, object, 
-                                       vm_object_t, cached_list);
-                       vm_object_cached_count--;
-
-                       vm_object_cache_unlock();
-                       /*
-                       *       Since this object is in the cache, we know
-                       *       that it is initialized and has only a pager's
-                       *       (implicit) reference. Take a reference to avoid
-                       *       recursive deallocations.
-                       */
-
-                       assert(object->pager_initialized);
-                       assert(object->ref_count == 0);
-                       vm_object_lock_assert_exclusive(object);
-                       object->ref_count++;
-
-                       /*
-                       *       Terminate the object.
-                       *       If the object had a shadow, we let 
-                       *       vm_object_deallocate deallocate it. 
-                       *       "pageout" objects have a shadow, but
-                       *       maintain a "paging reference" rather 
-                       *       than a normal reference.
-                       *       (We are careful here to limit recursion.)
-                       */
-                       shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
-
-                       if ((vm_object_terminate(object) == KERN_SUCCESS)
-                                       && (shadow != VM_OBJECT_NULL)) {
-                               vm_object_deallocate(shadow);
-                       }
-               
-                       if(object_released++ == *count)
-                               return KERN_SUCCESS;
-                       goto try_again;
-               }
-       }
-       vm_object_cache_unlock();
-       *count  = object_released;
-#else
-       *count = 0;
-#endif
-       return KERN_SUCCESS;
-}
-
-
 
 kern_return_t
 memory_object_create_named(
-       memory_object_t pager,
-       memory_object_offset_t  size,
-       memory_object_control_t         *control)
+       memory_object_t pager,
+       memory_object_offset_t  size,
+       memory_object_control_t         *control)
 {
-       vm_object_t             object;
-       vm_object_hash_entry_t  entry;
-       lck_mtx_t               *lck;
+       vm_object_t             object;
 
        *control = MEMORY_OBJECT_CONTROL_NULL;
-       if (pager == MEMORY_OBJECT_NULL)
+       if (pager == MEMORY_OBJECT_NULL) {
                return KERN_INVALID_ARGUMENT;
+       }
 
-       lck = vm_object_hash_lock_spin(pager);
-       entry = vm_object_hash_lookup(pager, FALSE);
-
-       if ((entry != VM_OBJECT_HASH_ENTRY_NULL) &&
-                       (entry->object != VM_OBJECT_NULL)) {
-               if (entry->object->named == TRUE)
-                       panic("memory_object_create_named: caller already holds the right");    }
-       vm_object_hash_unlock(lck);
-
-       if ((object = vm_object_enter(pager, size, FALSE, FALSE, TRUE)) == VM_OBJECT_NULL) {
-               return(KERN_INVALID_OBJECT);
+       object = vm_object_memory_object_associate(pager,
+           VM_OBJECT_NULL,
+           size,
+           TRUE);
+       if (object == VM_OBJECT_NULL) {
+               return KERN_INVALID_OBJECT;
        }
-       
+
        /* wait for object (if any) to be ready */
        if (object != VM_OBJECT_NULL) {
                vm_object_lock(object);
                object->named = TRUE;
                while (!object->pager_ready) {
                        vm_object_sleep(object,
-                                       VM_OBJECT_EVENT_PAGER_READY,
-                                       THREAD_UNINT);
+                           VM_OBJECT_EVENT_PAGER_READY,
+                           THREAD_UNINT);
                }
                *control = object->pager_control;
                vm_object_unlock(object);
        }
-       return (KERN_SUCCESS);
+       return KERN_SUCCESS;
 }
 
 
@@ -6147,22 +5327,22 @@ memory_object_create_named(
  */
 kern_return_t
 memory_object_recover_named(
-       memory_object_control_t control,
-       boolean_t               wait_on_terminating)
+       memory_object_control_t control,
+       boolean_t               wait_on_terminating)
 {
-       vm_object_t             object;
+       vm_object_t             object;
 
        object = memory_object_control_to_vm_object(control);
        if (object == VM_OBJECT_NULL) {
-               return (KERN_INVALID_ARGUMENT);
+               return KERN_INVALID_ARGUMENT;
        }
 restart:
        vm_object_lock(object);
 
        if (object->terminating && wait_on_terminating) {
-               vm_object_wait(object, 
-                       VM_OBJECT_EVENT_PAGING_IN_PROGRESS, 
-                       THREAD_UNINT);
+               vm_object_wait(object,
+                   VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
+                   THREAD_UNINT);
                goto restart;
        }
 
@@ -6175,40 +5355,22 @@ restart:
                vm_object_unlock(object);
                return KERN_SUCCESS;
        }
-#if VM_OBJECT_CACHE
-       if ((object->ref_count == 0) && (!object->terminating)) {
-               if (!vm_object_cache_lock_try()) {
-                       vm_object_unlock(object);
-                       goto restart;
-               }
-               queue_remove(&vm_object_cached_list, object,
-                                    vm_object_t, cached_list);
-               vm_object_cached_count--;
-               XPR(XPR_VM_OBJECT_CACHE,
-                   "memory_object_recover_named: removing %X, head (%X, %X)\n",
-                   object, 
-                   vm_object_cached_list.next,
-                   vm_object_cached_list.prev, 0,0);
-               
-               vm_object_cache_unlock();
-       }
-#endif
        object->named = TRUE;
        vm_object_lock_assert_exclusive(object);
        object->ref_count++;
        vm_object_res_reference(object);
        while (!object->pager_ready) {
                vm_object_sleep(object,
-                               VM_OBJECT_EVENT_PAGER_READY,
-                               THREAD_UNINT);
+                   VM_OBJECT_EVENT_PAGER_READY,
+                   THREAD_UNINT);
        }
        vm_object_unlock(object);
-       return (KERN_SUCCESS);
+       return KERN_SUCCESS;
 }
 
 
 /*
- *     vm_object_release_name:  
+ *     vm_object_release_name:
  *
  *     Enforces name semantic on memory_object reference count decrement
  *     This routine should not be called unless the caller holds a name
@@ -6219,26 +5381,26 @@ restart:
  *     being the name.
  *     If the decision is made to proceed the name field flag is set to
  *     false and the reference count is decremented.  If the RESPECT_CACHE
- *     flag is set and the reference count has gone to zero, the 
+ *     flag is set and the reference count has gone to zero, the
  *     memory_object is checked to see if it is cacheable otherwise when
  *     the reference count is zero, it is simply terminated.
  */
 
 __private_extern__ kern_return_t
 vm_object_release_name(
-       vm_object_t     object,
-       int             flags)
+       vm_object_t     object,
+       int             flags)
 {
-       vm_object_t     shadow;
-       boolean_t       original_object = TRUE;
+       vm_object_t     shadow;
+       boolean_t       original_object = TRUE;
 
        while (object != VM_OBJECT_NULL) {
-
                vm_object_lock(object);
 
                assert(object->alive);
-               if (original_object)
+               if (original_object) {
                        assert(object->named);
+               }
                assert(object->ref_count > 0);
 
                /*
@@ -6249,16 +5411,16 @@ vm_object_release_name(
                if (object->pager_created && !object->pager_initialized) {
                        assert(!object->can_persist);
                        vm_object_assert_wait(object,
-                                       VM_OBJECT_EVENT_INITIALIZED,
-                                       THREAD_UNINT);
+                           VM_OBJECT_EVENT_INITIALIZED,
+                           THREAD_UNINT);
                        vm_object_unlock(object);
                        thread_block(THREAD_CONTINUE_NULL);
                        continue;
                }
 
                if (((object->ref_count > 1)
-                       && (flags & MEMORY_OBJECT_TERMINATE_IDLE))
-                       || (object->terminating)) {
+                   && (flags & MEMORY_OBJECT_TERMINATE_IDLE))
+                   || (object->terminating)) {
                        vm_object_unlock(object);
                        return KERN_FAILURE;
                } else {
@@ -6267,11 +5429,12 @@ vm_object_release_name(
                                return KERN_SUCCESS;
                        }
                }
-               
+
                if ((flags & MEMORY_OBJECT_RESPECT_CACHE) &&
-                                       (object->ref_count == 1)) {
-                       if (original_object)
+                   (object->ref_count == 1)) {
+                       if (original_object) {
                                object->named = FALSE;
+                       }
                        vm_object_unlock(object);
                        /* let vm_object_deallocate push this thing into */
                        /* the cache, if that it is where it is bound */
@@ -6299,8 +5462,9 @@ vm_object_release_name(
                        vm_object_lock_assert_exclusive(object);
                        object->ref_count--;
                        assert(object->ref_count > 0);
-                       if(original_object)
+                       if (original_object) {
                                object->named = FALSE;
+                       }
                        vm_object_unlock(object);
                        return KERN_SUCCESS;
                }
@@ -6313,30 +5477,27 @@ vm_object_release_name(
 
 __private_extern__ kern_return_t
 vm_object_lock_request(
-       vm_object_t                     object,
-       vm_object_offset_t              offset,
-       vm_object_size_t                size,
-       memory_object_return_t          should_return,
-       int                             flags,
-       vm_prot_t                       prot)
+       vm_object_t                     object,
+       vm_object_offset_t              offset,
+       vm_object_size_t                size,
+       memory_object_return_t          should_return,
+       int                             flags,
+       vm_prot_t                       prot)
 {
-       __unused boolean_t      should_flush;
+       __unused boolean_t      should_flush;
 
        should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
 
-        XPR(XPR_MEMORY_OBJECT,
-           "vm_o_lock_request, obj 0x%X off 0x%X size 0x%X flags %X prot %X\n",
-           object, offset, size, 
-           (((should_return&1)<<1)|should_flush), prot);
-
        /*
         *      Check for bogus arguments.
         */
-       if (object == VM_OBJECT_NULL)
-               return (KERN_INVALID_ARGUMENT);
+       if (object == VM_OBJECT_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
-       if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE)
-               return (KERN_INVALID_ARGUMENT);
+       if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
        size = round_page_64(size);
 
@@ -6348,12 +5509,12 @@ vm_object_lock_request(
        vm_object_paging_begin(object);
 
        (void)vm_object_update(object,
-               offset, size, NULL, NULL, should_return, flags, prot);
+           offset, size, NULL, NULL, should_return, flags, prot);
 
        vm_object_paging_end(object);
        vm_object_unlock(object);
 
-       return (KERN_SUCCESS);
+       return KERN_SUCCESS;
 }
 
 /*
@@ -6368,17 +5529,18 @@ vm_object_lock_request(
  * On entry the object must be locked and it must be
  * purgeable with no delayed copies pending.
  */
-void
+uint64_t
 vm_object_purge(vm_object_t object, int flags)
 {
-       unsigned int    object_page_count = 0;
-       unsigned int    pgcount = 0;
-       boolean_t       skipped_object = FALSE;
+       unsigned int    object_page_count = 0, pgcount = 0;
+       uint64_t        total_purged_pgcount = 0;
+       boolean_t       skipped_object = FALSE;
 
-        vm_object_lock_assert_exclusive(object);
+       vm_object_lock_assert_exclusive(object);
 
-       if (object->purgable == VM_PURGABLE_DENY)
-               return;
+       if (object->purgable == VM_PURGABLE_DENY) {
+               return 0;
+       }
 
        assert(object->copy == VM_OBJECT_NULL);
        assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
@@ -6401,31 +5563,36 @@ vm_object_purge(vm_object_t object, int flags)
        if (object->purgable == VM_PURGABLE_VOLATILE) {
                unsigned int delta;
                assert(object->resident_page_count >=
-                      object->wired_page_count);
+                   object->wired_page_count);
                delta = (object->resident_page_count -
-                        object->wired_page_count);
+                   object->wired_page_count);
                if (delta != 0) {
                        assert(vm_page_purgeable_count >=
-                              delta);
+                           delta);
                        OSAddAtomic(-delta,
-                                   (SInt32 *)&vm_page_purgeable_count);
+                           (SInt32 *)&vm_page_purgeable_count);
                }
                if (object->wired_page_count != 0) {
                        assert(vm_page_purgeable_wired_count >=
-                              object->wired_page_count);
+                           object->wired_page_count);
                        OSAddAtomic(-object->wired_page_count,
-                                   (SInt32 *)&vm_page_purgeable_wired_count);
+                           (SInt32 *)&vm_page_purgeable_wired_count);
                }
                object->purgable = VM_PURGABLE_EMPTY;
        }
        assert(object->purgable == VM_PURGABLE_EMPTY);
-       
+
        object_page_count = object->resident_page_count;
 
        vm_object_reap_pages(object, REAP_PURGEABLE);
 
-       if (object->pager != NULL) {
+       if (object->resident_page_count >= object_page_count) {
+               total_purged_pgcount = 0;
+       } else {
+               total_purged_pgcount = object_page_count - object->resident_page_count;
+       }
 
+       if (object->pager != NULL) {
                assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
 
                if (object->activity_in_progress == 0 &&
@@ -6443,15 +5610,15 @@ vm_object_purge(vm_object_t object, int flags)
                        if (pgcount) {
                                pgcount = vm_compressor_pager_reap_pages(object->pager, flags);
                                vm_compressor_pager_count(object->pager,
-                                                         -pgcount,
-                                                         FALSE, /* shared */
-                                                         object);
-                               vm_purgeable_compressed_update(object,
-                                                              -pgcount);
+                                   -pgcount,
+                                   FALSE,                       /* shared */
+                                   object);
+                               vm_object_owner_compressed_update(object,
+                                   -pgcount);
                        }
-                       if ( !(flags & C_DONT_BLOCK)) {
+                       if (!(flags & C_DONT_BLOCK)) {
                                assert(vm_compressor_pager_get_count(object->pager)
-                                      == 0);
+                                   == 0);
                        }
                } else {
                        /*
@@ -6461,7 +5628,7 @@ vm_object_purge(vm_object_t object, int flags)
                         * the VM object is not locked, so it could race
                         * with us.
                         *
-                        * We can't really synchronize this without possibly 
+                        * We can't really synchronize this without possibly
                         * causing a deadlock when the compressor needs to
                         * allocate or free memory while compressing or
                         * decompressing a page from a purgeable object
@@ -6477,15 +5644,18 @@ vm_object_purge(vm_object_t object, int flags)
 
        vm_object_lock_assert_exclusive(object);
 
+       total_purged_pgcount += pgcount;
+
        KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ONE)),
-                             VM_KERNEL_UNSLIDE_OR_PERM(object), /* purged object */
-                             object_page_count,
-                             pgcount,
-                             skipped_object,
-                             0);
+           VM_KERNEL_UNSLIDE_OR_PERM(object),                   /* purged object */
+           object_page_count,
+           total_purged_pgcount,
+           skipped_object,
+           0);
 
+       return total_purged_pgcount;
 }
-                               
+
 
 /*
  * vm_object_purgeable_control() allows the caller to control and investigate the
@@ -6564,12 +5734,12 @@ vm_object_purge(vm_object_t object, int flags)
  */
 kern_return_t
 vm_object_purgable_control(
-       vm_object_t     object,
-       vm_purgable_t   control,
-       int             *state)
+       vm_object_t     object,
+       vm_purgable_t   control,
+       int             *state)
 {
-       int             old_state;
-       int             new_state;
+       int             old_state;
+       int             new_state;
 
        if (object == VM_OBJECT_NULL) {
                /*
@@ -6584,11 +5754,12 @@ vm_object_purgable_control(
         * Get current state of the purgeable object.
         */
        old_state = object->purgable;
-       if (old_state == VM_PURGABLE_DENY)
+       if (old_state == VM_PURGABLE_DENY) {
                return KERN_INVALID_ARGUMENT;
-    
+       }
+
        /* purgeable cant have delayed copies - now or in the future */
-       assert(object->copy == VM_OBJECT_NULL); 
+       assert(object->copy == VM_OBJECT_NULL);
        assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
 
        /*
@@ -6599,6 +5770,16 @@ vm_object_purgable_control(
                return KERN_SUCCESS;
        }
 
+       if (control == VM_PURGABLE_SET_STATE &&
+           object->purgeable_only_by_kernel) {
+               return KERN_PROTECTION_FAILURE;
+       }
+
+       if (control != VM_PURGABLE_SET_STATE &&
+           control != VM_PURGABLE_SET_STATE_FROM_KERNEL) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
        if ((*state) & VM_PURGABLE_DEBUG_EMPTY) {
                object->volatile_empty = TRUE;
        }
@@ -6607,13 +5788,24 @@ vm_object_purgable_control(
        }
 
        new_state = *state & VM_PURGABLE_STATE_MASK;
-       if (new_state == VM_PURGABLE_VOLATILE &&
-           object->volatile_empty) {
-               new_state = VM_PURGABLE_EMPTY;
+       if (new_state == VM_PURGABLE_VOLATILE) {
+               if (old_state == VM_PURGABLE_EMPTY) {
+                       /* what's been emptied must stay empty */
+                       new_state = VM_PURGABLE_EMPTY;
+               }
+               if (object->volatile_empty) {
+                       /* debugging mode: go straight to empty */
+                       new_state = VM_PURGABLE_EMPTY;
+               }
        }
 
        switch (new_state) {
        case VM_PURGABLE_DENY:
+               /*
+                * Attempting to convert purgeable memory to non-purgeable:
+                * not allowed.
+                */
+               return KERN_INVALID_ARGUMENT;
        case VM_PURGABLE_NONVOLATILE:
                object->purgable = new_state;
 
@@ -6621,28 +5813,28 @@ vm_object_purgable_control(
                        unsigned int delta;
 
                        assert(object->resident_page_count >=
-                              object->wired_page_count);
+                           object->wired_page_count);
                        delta = (object->resident_page_count -
-                                object->wired_page_count);
+                           object->wired_page_count);
 
                        assert(vm_page_purgeable_count >= delta);
 
                        if (delta != 0) {
                                OSAddAtomic(-delta,
-                                           (SInt32 *)&vm_page_purgeable_count);
+                                   (SInt32 *)&vm_page_purgeable_count);
                        }
                        if (object->wired_page_count != 0) {
                                assert(vm_page_purgeable_wired_count >=
-                                      object->wired_page_count);
+                                   object->wired_page_count);
                                OSAddAtomic(-object->wired_page_count,
-                                           (SInt32 *)&vm_page_purgeable_wired_count);
+                                   (SInt32 *)&vm_page_purgeable_wired_count);
                        }
 
                        vm_page_lock_queues();
 
                        /* object should be on a queue */
                        assert(object->objq.next != NULL &&
-                              object->objq.prev != NULL);
+                           object->objq.prev != NULL);
                        purgeable_q_t queue;
 
                        /*
@@ -6655,7 +5847,7 @@ vm_object_purgable_control(
                        if (object->purgeable_when_ripe) {
                                vm_purgeable_token_delete_last(queue);
                        }
-                       assert(queue->debug_count_objects>=0);
+                       assert(queue->debug_count_objects >= 0);
 
                        vm_page_unlock_queues();
                }
@@ -6665,48 +5857,45 @@ vm_object_purgable_control(
                         * Transfer the object's pages from the volatile to
                         * non-volatile ledgers.
                         */
-                       vm_purgeable_accounting(object, VM_PURGABLE_VOLATILE,
-                                               FALSE);
+                       vm_purgeable_accounting(object, VM_PURGABLE_VOLATILE);
                }
 
                break;
 
        case VM_PURGABLE_VOLATILE:
                if (object->volatile_fault) {
-                       vm_page_t       p;
-                       int             refmod;
+                       vm_page_t       p;
+                       int             refmod;
 
-                       vm_page_queue_iterate(&object->memq, p, vm_page_t, listq) {
-                               if (p->busy ||
+                       vm_page_queue_iterate(&object->memq, p, vmp_listq) {
+                               if (p->vmp_busy ||
                                    VM_PAGE_WIRED(p) ||
-                                   p->fictitious) {
+                                   p->vmp_fictitious) {
                                        continue;
                                }
                                refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
                                if ((refmod & VM_MEM_MODIFIED) &&
-                                   !p->dirty) {
+                                   !p->vmp_dirty) {
                                        SET_PAGE_DIRTY(p, FALSE);
                                }
                        }
                }
-                                              
-               if (old_state == VM_PURGABLE_EMPTY &&
-                   object->resident_page_count == 0 &&
-                   object->pager == NULL)
-                       break;
+
+               assert(old_state != VM_PURGABLE_EMPTY);
 
                purgeable_q_t queue;
-        
+
                /* find the correct queue */
-               if ((*state&VM_PURGABLE_ORDERING_MASK) == VM_PURGABLE_ORDERING_OBSOLETE)
-                       queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
-               else {
-                       if ((*state&VM_PURGABLE_BEHAVIOR_MASK) == VM_PURGABLE_BEHAVIOR_FIFO)
-                               queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
-                       else
-                               queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
-               }
-        
+               if ((*state & VM_PURGABLE_ORDERING_MASK) == VM_PURGABLE_ORDERING_OBSOLETE) {
+                       queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
+               } else {
+                       if ((*state & VM_PURGABLE_BEHAVIOR_MASK) == VM_PURGABLE_BEHAVIOR_FIFO) {
+                               queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
+                       } else {
+                               queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
+                       }
+               }
+
                if (old_state == VM_PURGABLE_NONVOLATILE ||
                    old_state == VM_PURGABLE_EMPTY) {
                        unsigned int delta;
@@ -6717,7 +5906,7 @@ vm_object_purgable_control(
                        } else {
                                object->purgeable_when_ripe = TRUE;
                        }
-                               
+
                        if (object->purgeable_when_ripe) {
                                kern_return_t result;
 
@@ -6733,17 +5922,17 @@ vm_object_purgable_control(
                        }
 
                        assert(object->resident_page_count >=
-                              object->wired_page_count);
+                           object->wired_page_count);
                        delta = (object->resident_page_count -
-                                object->wired_page_count);
+                           object->wired_page_count);
 
                        if (delta != 0) {
                                OSAddAtomic(delta,
-                                           &vm_page_purgeable_count);
+                                   &vm_page_purgeable_count);
                        }
                        if (object->wired_page_count != 0) {
                                OSAddAtomic(object->wired_page_count,
-                                           &vm_page_purgeable_wired_count);
+                                   &vm_page_purgeable_wired_count);
                        }
 
                        object->purgable = new_state;
@@ -6751,12 +5940,11 @@ vm_object_purgable_control(
                        /* object should be on "non-volatile" queue */
                        assert(object->objq.next != NULL);
                        assert(object->objq.prev != NULL);
-               }
-               else if (old_state == VM_PURGABLE_VOLATILE) {
-                       purgeable_q_t   old_queue;
-                       boolean_t       purgeable_when_ripe;
+               } else if (old_state == VM_PURGABLE_VOLATILE) {
+                       purgeable_q_t   old_queue;
+                       boolean_t       purgeable_when_ripe;
 
-                       /*
+                       /*
                         * if reassigning priorities / purgeable groups, we don't change the
                         * token queue. So moving priorities will not make pages stay around longer.
                         * Reasoning is that the algorithm gives most priority to the most important
@@ -6764,80 +5952,73 @@ vm_object_purgable_control(
                         * This biases the system already for purgeable queues that move a lot.
                         * It doesn't seem more biasing is neccessary in this case, where no new object is added.
                         */
-                       assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */
-            
+                       assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */
+
                        old_queue = vm_purgeable_object_remove(object);
                        assert(old_queue);
-            
+
                        if ((*state & VM_PURGABLE_NO_AGING_MASK) ==
                            VM_PURGABLE_NO_AGING) {
                                purgeable_when_ripe = FALSE;
                        } else {
                                purgeable_when_ripe = TRUE;
                        }
-                               
+
                        if (old_queue != queue ||
                            (purgeable_when_ripe !=
-                            object->purgeable_when_ripe)) {
+                           object->purgeable_when_ripe)) {
                                kern_return_t result;
 
-                               /* Changing queue. Have to move token. */
-                               vm_page_lock_queues();
+                               /* Changing queue. Have to move token. */
+                               vm_page_lock_queues();
                                if (object->purgeable_when_ripe) {
                                        vm_purgeable_token_delete_last(old_queue);
                                }
                                object->purgeable_when_ripe = purgeable_when_ripe;
                                if (object->purgeable_when_ripe) {
                                        result = vm_purgeable_token_add(queue);
-                                       assert(result==KERN_SUCCESS);   /* this should never fail since we just freed a token */
+                                       assert(result == KERN_SUCCESS);   /* this should never fail since we just freed a token */
                                }
                                vm_page_unlock_queues();
-
                        }
-               };
-               vm_purgeable_object_add(object, queue, (*state&VM_VOLATILE_GROUP_MASK)>>VM_VOLATILE_GROUP_SHIFT );
+               }
+               ;
+               vm_purgeable_object_add(object, queue, (*state & VM_VOLATILE_GROUP_MASK) >> VM_VOLATILE_GROUP_SHIFT );
                if (old_state == VM_PURGABLE_NONVOLATILE) {
-                       vm_purgeable_accounting(object, VM_PURGABLE_NONVOLATILE,
-                                               FALSE);
+                       vm_purgeable_accounting(object,
+                           VM_PURGABLE_NONVOLATILE);
                }
 
-               assert(queue->debug_count_objects>=0);
-        
+               assert(queue->debug_count_objects >= 0);
+
                break;
 
 
        case VM_PURGABLE_EMPTY:
                if (object->volatile_fault) {
-                       vm_page_t       p;
-                       int             refmod;
+                       vm_page_t       p;
+                       int             refmod;
 
-                       vm_page_queue_iterate(&object->memq, p, vm_page_t, listq) {
-                               if (p->busy ||
+                       vm_page_queue_iterate(&object->memq, p, vmp_listq) {
+                               if (p->vmp_busy ||
                                    VM_PAGE_WIRED(p) ||
-                                   p->fictitious) {
+                                   p->vmp_fictitious) {
                                        continue;
                                }
                                refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
                                if ((refmod & VM_MEM_MODIFIED) &&
-                                   !p->dirty) {
+                                   !p->vmp_dirty) {
                                        SET_PAGE_DIRTY(p, FALSE);
                                }
                        }
                }
 
-               if (old_state == new_state) {
-                       /* nothing changes */
-                       break;
-               }
-
-               assert(old_state == VM_PURGABLE_NONVOLATILE ||
-                      old_state == VM_PURGABLE_VOLATILE);
                if (old_state == VM_PURGABLE_VOLATILE) {
                        purgeable_q_t old_queue;
 
                        /* object should be on a queue */
                        assert(object->objq.next != NULL &&
-                              object->objq.prev != NULL);
+                           object->objq.prev != NULL);
 
                        old_queue = vm_purgeable_object_remove(object);
                        assert(old_queue);
@@ -6854,8 +6035,8 @@ vm_object_purgable_control(
                         * "non-volatile" and now need to be accounted as
                         * "volatile".
                         */
-                       vm_purgeable_accounting(object, VM_PURGABLE_NONVOLATILE,
-                                               FALSE);
+                       vm_purgeable_accounting(object,
+                           VM_PURGABLE_NONVOLATILE);
                        /*
                         * Set to VM_PURGABLE_EMPTY because the pages are no
                         * longer accounted in the "non-volatile" ledger
@@ -6880,33 +6061,32 @@ vm_object_purgable_control(
 
 kern_return_t
 vm_object_get_page_counts(
-       vm_object_t             object,
-       vm_object_offset_t      offset,
-       vm_object_size_t        size,
-       unsigned int            *resident_page_count,
-       unsigned int            *dirty_page_count)
+       vm_object_t             object,
+       vm_object_offset_t      offset,
+       vm_object_size_t        size,
+       unsigned int            *resident_page_count,
+       unsigned int            *dirty_page_count)
 {
+       kern_return_t           kr = KERN_SUCCESS;
+       boolean_t               count_dirty_pages = FALSE;
+       vm_page_t               p = VM_PAGE_NULL;
+       unsigned int            local_resident_count = 0;
+       unsigned int            local_dirty_count = 0;
+       vm_object_offset_t      cur_offset = 0;
+       vm_object_offset_t      end_offset = 0;
 
-       kern_return_t           kr = KERN_SUCCESS;
-       boolean_t               count_dirty_pages = FALSE;
-       vm_page_t               p = VM_PAGE_NULL;
-       unsigned int            local_resident_count = 0;
-       unsigned int            local_dirty_count = 0;
-       vm_object_offset_t      cur_offset = 0;
-       vm_object_offset_t      end_offset = 0;
-
-       if (object == VM_OBJECT_NULL)
+       if (object == VM_OBJECT_NULL) {
                return KERN_INVALID_ARGUMENT;
+       }
 
 
        cur_offset = offset;
-       
+
        end_offset = offset + size;
 
        vm_object_lock_assert_exclusive(object);
 
        if (dirty_page_count != NULL) {
-
                count_dirty_pages = TRUE;
        }
 
@@ -6917,49 +6097,37 @@ vm_object_get_page_counts(
                 * - the entire object is exactly covered by the request.
                 */
                if (offset == 0 && (object->vo_size == size)) {
-
                        *resident_page_count = object->resident_page_count;
                        goto out;
                }
        }
 
        if (object->resident_page_count <= (size >> PAGE_SHIFT)) {
-
-               vm_page_queue_iterate(&object->memq, p, vm_page_t, listq) {
-               
-                       if (p->offset >= cur_offset && p->offset < end_offset) {
-
+               vm_page_queue_iterate(&object->memq, p, vmp_listq) {
+                       if (p->vmp_offset >= cur_offset && p->vmp_offset < end_offset) {
                                local_resident_count++;
 
                                if (count_dirty_pages) {
-                                       
-                                       if (p->dirty || (p->wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) {
-                                               
+                                       if (p->vmp_dirty || (p->vmp_wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) {
                                                local_dirty_count++;
                                        }
                                }
                        }
                }
        } else {
-
                for (cur_offset = offset; cur_offset < end_offset; cur_offset += PAGE_SIZE_64) {
-       
                        p = vm_page_lookup(object, cur_offset);
-               
-                       if (p != VM_PAGE_NULL) {
 
+                       if (p != VM_PAGE_NULL) {
                                local_resident_count++;
 
                                if (count_dirty_pages) {
-                                       
-                                       if (p->dirty || (p->wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) {
-                               
+                                       if (p->vmp_dirty || (p->vmp_wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) {
                                                local_dirty_count++;
                                        }
                                }
                        }
                }
-
        }
 
        if (resident_page_count != NULL) {
@@ -6975,7 +6143,7 @@ out:
 }
 
 
-#if    TASK_SWAPPER
+#if     TASK_SWAPPER
 /*
  * vm_object_res_deallocate
  *
@@ -6991,7 +6159,7 @@ out:
 
 __private_extern__ void
 vm_object_res_deallocate(
-       vm_object_t     object)
+       vm_object_t     object)
 {
        vm_object_t orig_object = object;
        /*
@@ -7000,22 +6168,25 @@ vm_object_res_deallocate(
         * unlocked.
         */
        assert(object->res_count > 0);
-       while  (--object->res_count == 0) {
+       while (--object->res_count == 0) {
                assert(object->ref_count >= object->res_count);
                vm_object_deactivate_all_pages(object);
                /* iterate on shadow, if present */
                if (object->shadow != VM_OBJECT_NULL) {
                        vm_object_t tmp_object = object->shadow;
                        vm_object_lock(tmp_object);
-                       if (object != orig_object)
+                       if (object != orig_object) {
                                vm_object_unlock(object);
+                       }
                        object = tmp_object;
                        assert(object->res_count > 0);
-               } else
+               } else {
                        break;
+               }
        }
-       if (object != orig_object)
+       if (object != orig_object) {
                vm_object_unlock(object);
+       }
 }
 
 /*
@@ -7034,28 +6205,30 @@ vm_object_res_deallocate(
 
 __private_extern__ void
 vm_object_res_reference(
-       vm_object_t     object)
+       vm_object_t     object)
 {
        vm_object_t orig_object = object;
-       /* 
+       /*
         * Object is locked, so this can be called directly
         * from vm_object_reference.  This lock is never released.
         */
-       while  ((++object->res_count == 1)  && 
-               (object->shadow != VM_OBJECT_NULL)) {
+       while ((++object->res_count == 1) &&
+           (object->shadow != VM_OBJECT_NULL)) {
                vm_object_t tmp_object = object->shadow;
 
                assert(object->ref_count >= object->res_count);
                vm_object_lock(tmp_object);
-               if (object != orig_object)
+               if (object != orig_object) {
                        vm_object_unlock(object);
+               }
                object = tmp_object;
        }
-       if (object != orig_object)
+       if (object != orig_object) {
                vm_object_unlock(object);
+       }
        assert(orig_object->ref_count >= orig_object->res_count);
 }
-#endif /* TASK_SWAPPER */
+#endif  /* TASK_SWAPPER */
 
 /*
  *     vm_object_reference:
@@ -7067,10 +6240,11 @@ vm_object_res_reference(
 #endif
 __private_extern__ void
 vm_object_reference(
-       vm_object_t     object)
+       vm_object_t     object)
 {
-       if (object == VM_OBJECT_NULL)
+       if (object == VM_OBJECT_NULL) {
                return;
+       }
 
        vm_object_lock(object);
        assert(object->ref_count > 0);
@@ -7078,31 +6252,6 @@ vm_object_reference(
        vm_object_unlock(object);
 }
 
-#ifdef MACH_BSD
-/*
- * Scale the vm_object_cache
- * This is required to make sure that the vm_object_cache is big
- * enough to effectively cache the mapped file.
- * This is really important with UBC as all the regular file vnodes
- * have memory object associated with them. Havving this cache too
- * small results in rapid reclaim of vnodes and hurts performance a LOT!
- *
- * This is also needed as number of vnodes can be dynamically scaled.
- */
-kern_return_t
-adjust_vm_object_cache(
-       __unused vm_size_t oval,
-       __unused vm_size_t nval)
-{
-#if VM_OBJECT_CACHE
-       vm_object_cached_max = nval;
-       vm_object_cache_trim(FALSE);
-#endif
-       return (KERN_SUCCESS);
-}
-#endif /* MACH_BSD */
-
-
 /*
  * vm_object_transpose
  *
@@ -7116,17 +6265,15 @@ adjust_vm_object_cache(
 unsigned int vm_object_transpose_count = 0;
 kern_return_t
 vm_object_transpose(
-       vm_object_t             object1,
-       vm_object_t             object2,
-       vm_object_size_t        transpose_size)
+       vm_object_t             object1,
+       vm_object_t             object2,
+       vm_object_size_t        transpose_size)
 {
-       vm_object_t             tmp_object;
-       kern_return_t           retval;
-       boolean_t               object1_locked, object2_locked;
-       vm_page_t               page;
-       vm_object_offset_t      page_offset;
-       lck_mtx_t               *hash_lck;
-       vm_object_hash_entry_t  hash_entry;
+       vm_object_t             tmp_object;
+       kern_return_t           retval;
+       boolean_t               object1_locked, object2_locked;
+       vm_page_t               page;
+       vm_object_offset_t      page_offset;
 
        tmp_object = VM_OBJECT_NULL;
        object1_locked = FALSE; object2_locked = FALSE;
@@ -7147,7 +6294,7 @@ vm_object_transpose(
         * make sure we always lock them in the same order to
         * avoid deadlocks.
         */
-       if (object1 >  object2) {
+       if (object1 > object2) {
                tmp_object = object1;
                object1 = object2;
                object2 = tmp_object;
@@ -7177,14 +6324,14 @@ vm_object_transpose(
                goto done;
        }
        /*
-        * We're about to mess with the object's backing store and 
+        * We're about to mess with the object's backing store and
         * taking a "paging_in_progress" reference wouldn't be enough
         * to prevent any paging activity on this object, so the caller should
         * have "quiesced" the objects beforehand, via a UPL operation with
         * UPL_SET_IO_WIRE (to make sure all the pages are there and wired)
         * and UPL_BLOCK_ACCESS (to mark the pages "busy").
-        * 
-        * Wait for any paging operation to complete (but only paging, not 
+        *
+        * Wait for any paging operation to complete (but only paging, not
         * other kind of activities not linked to the pager).  After we're
         * statisfied that there's no more paging in progress, we keep the
         * object locked, to guarantee that no one tries to access its pager.
@@ -7196,7 +6343,7 @@ vm_object_transpose(
         */
        vm_object_lock(object2);
        object2_locked = TRUE;
-       if (! object2->alive || object2->terminating ||
+       if (!object2->alive || object2->terminating ||
            object2->copy || object2->shadow || object2->shadowed ||
            object2->purgable != VM_PURGABLE_DENY) {
                retval = KERN_INVALID_VALUE;
@@ -7212,7 +6359,7 @@ vm_object_transpose(
                 * exchange their backing stores or one would overflow.
                 * If their size doesn't match the caller's
                 * "transpose_size", we can't do it either because the
-                * transpose operation will affect the entire span of 
+                * transpose operation will affect the entire span of
                 * the objects.
                 */
                retval = KERN_INVALID_VALUE;
@@ -7232,7 +6379,7 @@ vm_object_transpose(
                 */
                while (!vm_page_queue_empty(&object2->memq)) {
                        page = (vm_page_t) vm_page_queue_first(&object2->memq);
-                       vm_page_rename(page, object1, page->offset, FALSE);
+                       vm_page_rename(page, object1, page->vmp_offset);
                }
                assert(vm_page_queue_empty(&object2->memq));
        } else if (object2->phys_contiguous || vm_page_queue_empty(&object2->memq)) {
@@ -7243,40 +6390,39 @@ vm_object_transpose(
                 */
                while (!vm_page_queue_empty(&object1->memq)) {
                        page = (vm_page_t) vm_page_queue_first(&object1->memq);
-                       vm_page_rename(page, object2, page->offset, FALSE);
+                       vm_page_rename(page, object2, page->vmp_offset);
                }
                assert(vm_page_queue_empty(&object1->memq));
        } else {
                /* transfer object1's pages to tmp_object */
                while (!vm_page_queue_empty(&object1->memq)) {
                        page = (vm_page_t) vm_page_queue_first(&object1->memq);
-                       page_offset = page->offset;
+                       page_offset = page->vmp_offset;
                        vm_page_remove(page, TRUE);
-                       page->offset = page_offset;
-                       vm_page_queue_enter(&tmp_object->memq, page, vm_page_t, listq);
+                       page->vmp_offset = page_offset;
+                       vm_page_queue_enter(&tmp_object->memq, page, vmp_listq);
                }
                assert(vm_page_queue_empty(&object1->memq));
                /* transfer object2's pages to object1 */
                while (!vm_page_queue_empty(&object2->memq)) {
                        page = (vm_page_t) vm_page_queue_first(&object2->memq);
-                       vm_page_rename(page, object1, page->offset, FALSE);
+                       vm_page_rename(page, object1, page->vmp_offset);
                }
                assert(vm_page_queue_empty(&object2->memq));
                /* transfer tmp_object's pages to object2 */
                while (!vm_page_queue_empty(&tmp_object->memq)) {
                        page = (vm_page_t) vm_page_queue_first(&tmp_object->memq);
-                       vm_page_queue_remove(&tmp_object->memq, page,
-                                            vm_page_t, listq);
-                       vm_page_insert(page, object2, page->offset);
+                       vm_page_queue_remove(&tmp_object->memq, page, vmp_listq);
+                       vm_page_insert(page, object2, page->vmp_offset);
                }
                assert(vm_page_queue_empty(&tmp_object->memq));
        }
 
-#define __TRANSPOSE_FIELD(field)                               \
-MACRO_BEGIN                                                    \
-       tmp_object->field = object1->field;                     \
-       object1->field = object2->field;                        \
-       object2->field = tmp_object->field;                     \
+#define __TRANSPOSE_FIELD(field)                                \
+MACRO_BEGIN                                                     \
+       tmp_object->field = object1->field;                     \
+       object1->field = object2->field;                        \
+       object2->field = tmp_object->field;                     \
 MACRO_END
 
        /* "Lock" refers to the object not its contents */
@@ -7284,11 +6430,16 @@ MACRO_END
        assert(object1->vo_size == object2->vo_size);
        /* "memq_hint" was updated above when transposing pages */
        /* "ref_count" refers to the object not its contents */
+       assert(object1->ref_count >= 1);
+       assert(object2->ref_count >= 1);
 #if TASK_SWAPPER
        /* "res_count" refers to the object not its contents */
 #endif
        /* "resident_page_count" was updated above when transposing pages */
        /* "wired_page_count" was updated above when transposing pages */
+#if !VM_TAG_ACTIVE_UPDATE
+       /* "wired_objq" was dealt with along with "wired_page_count" */
+#endif /* ! VM_TAG_ACTIVE_UPDATE */
        /* "reusable_page_count" was updated above when transposing pages */
        /* there should be no "copy" */
        assert(!object1->copy);
@@ -7303,11 +6454,11 @@ MACRO_END
        /* update the memory_objects' pointers back to the VM objects */
        if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
                memory_object_control_collapse(object1->pager_control,
-                                              object1);
+                   object1);
        }
        if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
                memory_object_control_collapse(object2->pager_control,
-                                              object2);
+                   object2);
        }
        __TRANSPOSE_FIELD(copy_strategy);
        /* "paging_in_progress" refers to the object not its contents */
@@ -7322,7 +6473,6 @@ MACRO_END
        __TRANSPOSE_FIELD(pager_trusted);
        __TRANSPOSE_FIELD(can_persist);
        __TRANSPOSE_FIELD(internal);
-       __TRANSPOSE_FIELD(temporary);
        __TRANSPOSE_FIELD(private);
        __TRANSPOSE_FIELD(pageout);
        /* "alive" should be set */
@@ -7333,11 +6483,22 @@ MACRO_END
        assert(object2->purgable == VM_PURGABLE_DENY);
        /* "shadowed" refers to the the object not its contents */
        __TRANSPOSE_FIELD(purgeable_when_ripe);
-       __TRANSPOSE_FIELD(advisory_pageout);
        __TRANSPOSE_FIELD(true_share);
        /* "terminating" should not be set */
        assert(!object1->terminating);
        assert(!object2->terminating);
+       /* transfer "named" reference if needed */
+       if (object1->named && !object2->named) {
+               assert(object1->ref_count >= 2);
+               assert(object2->ref_count >= 1);
+               object1->ref_count--;
+               object2->ref_count++;
+       } else if (!object1->named && object2->named) {
+               assert(object1->ref_count >= 1);
+               assert(object2->ref_count >= 2);
+               object1->ref_count++;
+               object2->ref_count--;
+       }
        __TRANSPOSE_FIELD(named);
        /* "shadow_severed" refers to the object not its contents */
        __TRANSPOSE_FIELD(phys_contiguous);
@@ -7348,36 +6509,15 @@ MACRO_END
        /* "cached_list.prev" should be NULL */
        assert(object1->cached_list.prev == NULL);
        assert(object2->cached_list.prev == NULL);
-       /* "msr_q" is linked to the object not its contents */
-       assert(queue_empty(&object1->msr_q));
-       assert(queue_empty(&object2->msr_q));
        __TRANSPOSE_FIELD(last_alloc);
        __TRANSPOSE_FIELD(sequential);
        __TRANSPOSE_FIELD(pages_created);
        __TRANSPOSE_FIELD(pages_used);
        __TRANSPOSE_FIELD(scan_collisions);
        __TRANSPOSE_FIELD(cow_hint);
-#if MACH_ASSERT
-       __TRANSPOSE_FIELD(paging_object);
-#endif
        __TRANSPOSE_FIELD(wimg_bits);
        __TRANSPOSE_FIELD(set_cache_attr);
        __TRANSPOSE_FIELD(code_signed);
-       if (object1->hashed) {
-               hash_lck = vm_object_hash_lock_spin(object2->pager);
-               hash_entry = vm_object_hash_lookup(object2->pager, FALSE);
-               assert(hash_entry != VM_OBJECT_HASH_ENTRY_NULL);
-               hash_entry->object = object2;
-               vm_object_hash_unlock(hash_lck);
-       }
-       if (object2->hashed) {
-               hash_lck = vm_object_hash_lock_spin(object1->pager);
-               hash_entry = vm_object_hash_lookup(object1->pager, FALSE);
-               assert(hash_entry != VM_OBJECT_HASH_ENTRY_NULL);
-               hash_entry->object = object1;
-               vm_object_hash_unlock(hash_lck);
-       }
-       __TRANSPOSE_FIELD(hashed);
        object1->transposed = TRUE;
        object2->transposed = TRUE;
        __TRANSPOSE_FIELD(mapping_in_progress);
@@ -7386,8 +6526,26 @@ MACRO_END
        __TRANSPOSE_FIELD(all_reusable);
        assert(object1->blocked_access);
        assert(object2->blocked_access);
-       assert(object1->__object2_unused_bits == 0);
-       assert(object2->__object2_unused_bits == 0);
+       __TRANSPOSE_FIELD(set_cache_attr);
+       assert(!object1->object_is_shared_cache);
+       assert(!object2->object_is_shared_cache);
+       /* ignore purgeable_queue_type and purgeable_queue_group */
+       assert(!object1->io_tracking);
+       assert(!object2->io_tracking);
+#if VM_OBJECT_ACCESS_TRACKING
+       assert(!object1->access_tracking);
+       assert(!object2->access_tracking);
+#endif /* VM_OBJECT_ACCESS_TRACKING */
+       __TRANSPOSE_FIELD(no_tag_update);
+#if CONFIG_SECLUDED_MEMORY
+       assert(!object1->eligible_for_secluded);
+       assert(!object2->eligible_for_secluded);
+       assert(!object1->can_grab_secluded);
+       assert(!object2->can_grab_secluded);
+#else /* CONFIG_SECLUDED_MEMORY */
+       assert(object1->__object3_unused_bits == 0);
+       assert(object2->__object3_unused_bits == 0);
+#endif /* CONFIG_SECLUDED_MEMORY */
 #if UPL_DEBUG
        /* "uplq" refers to the object not its contents (see upl_transpose()) */
 #endif
@@ -7444,7 +6602,6 @@ done:
  *
  */
 extern int speculative_reads_disabled;
-extern int ignore_is_ssd;
 
 /*
  * Try to always keep these values an even multiple of PAGE_SIZE. We use these values
@@ -7453,32 +6610,36 @@ extern int ignore_is_ssd;
  * that could give us non-page-size aligned values if we start out with values that
  * are odd multiples of PAGE_SIZE.
  */
-       unsigned int preheat_max_bytes = MAX_UPL_TRANSFER_BYTES;
+#if CONFIG_EMBEDDED
+unsigned int preheat_max_bytes = (1024 * 512);
+#else /* CONFIG_EMBEDDED */
+unsigned int preheat_max_bytes = MAX_UPL_TRANSFER_BYTES;
+#endif /* CONFIG_EMBEDDED */
 unsigned int preheat_min_bytes = (1024 * 32);
 
 
 __private_extern__ void
 vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
-                      vm_size_t *length, vm_object_fault_info_t fault_info, uint32_t *io_streaming)
+    vm_size_t *length, vm_object_fault_info_t fault_info, uint32_t *io_streaming)
 {
-       vm_size_t               pre_heat_size;
-       vm_size_t               tail_size;
-       vm_size_t               head_size;
-       vm_size_t               max_length;
-       vm_size_t               cluster_size;
-       vm_object_offset_t      object_size;
-       vm_object_offset_t      orig_start;
-       vm_object_offset_t      target_start;
-       vm_object_offset_t      offset;
-       vm_behavior_t           behavior;
-       boolean_t               look_behind = TRUE;
-       boolean_t               look_ahead  = TRUE;
-       boolean_t               isSSD = FALSE;
-       uint32_t                throttle_limit;
-       int                     sequential_run;
-       int                     sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
-       vm_size_t               max_ph_size;
-       vm_size_t               min_ph_size;
+       vm_size_t               pre_heat_size;
+       vm_size_t               tail_size;
+       vm_size_t               head_size;
+       vm_size_t               max_length;
+       vm_size_t               cluster_size;
+       vm_object_offset_t      object_size;
+       vm_object_offset_t      orig_start;
+       vm_object_offset_t      target_start;
+       vm_object_offset_t      offset;
+       vm_behavior_t           behavior;
+       boolean_t               look_behind = TRUE;
+       boolean_t               look_ahead  = TRUE;
+       boolean_t               isSSD = FALSE;
+       uint32_t                throttle_limit;
+       int                     sequential_run;
+       int                     sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
+       vm_size_t               max_ph_size;
+       vm_size_t               min_ph_size;
 
        assert( !(*length & PAGE_MASK));
        assert( !(*start & PAGE_MASK_64));
@@ -7496,10 +6657,10 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
        *io_streaming = 0;
 
        if (speculative_reads_disabled || fault_info == NULL) {
-               /*
+               /*
                 * no cluster... just fault the page in
                 */
-               return;
+               return;
        }
        orig_start = *start;
        target_start = orig_start;
@@ -7508,15 +6669,15 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
 
        vm_object_lock(object);
 
-       if (object->pager == MEMORY_OBJECT_NULL)
-               goto out;       /* pager is gone for this object, nothing more to do */
-
-       if (!ignore_is_ssd)
-               vnode_pager_get_isSSD(object->pager, &isSSD);
+       if (object->pager == MEMORY_OBJECT_NULL) {
+               goto out;       /* pager is gone for this object, nothing more to do */
+       }
+       vnode_pager_get_isSSD(object->pager, &isSSD);
 
        min_ph_size = round_page(preheat_min_bytes);
        max_ph_size = round_page(preheat_max_bytes);
 
+#if !CONFIG_EMBEDDED
        if (isSSD) {
                min_ph_size /= 2;
                max_ph_size /= 8;
@@ -7529,143 +6690,153 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
                        max_ph_size = trunc_page(max_ph_size);
                }
        }
+#endif /* !CONFIG_EMBEDDED */
 
-       if (min_ph_size < PAGE_SIZE)
+       if (min_ph_size < PAGE_SIZE) {
                min_ph_size = PAGE_SIZE;
+       }
 
-       if (max_ph_size < PAGE_SIZE)
+       if (max_ph_size < PAGE_SIZE) {
                max_ph_size = PAGE_SIZE;
-       else if (max_ph_size > MAX_UPL_TRANSFER_BYTES)
+       } else if (max_ph_size > MAX_UPL_TRANSFER_BYTES) {
                max_ph_size = MAX_UPL_TRANSFER_BYTES;
+       }
 
-       if (max_length > max_ph_size) 
-               max_length = max_ph_size;
+       if (max_length > max_ph_size) {
+               max_length = max_ph_size;
+       }
 
-       if (max_length <= PAGE_SIZE)
+       if (max_length <= PAGE_SIZE) {
                goto out;
+       }
 
-       if (object->internal)
-               object_size = object->vo_size;
-       else
-               vnode_pager_get_object_size(object->pager, &object_size);
+       if (object->internal) {
+               object_size = object->vo_size;
+       } else {
+               vnode_pager_get_object_size(object->pager, &object_size);
+       }
 
        object_size = round_page_64(object_size);
 
        if (orig_start >= object_size) {
-               /*
+               /*
                 * fault occurred beyond the EOF...
                 * we need to punt w/o changing the
                 * starting offset
                 */
-               goto out;
+               goto out;
        }
        if (object->pages_used > object->pages_created) {
-               /*
+               /*
                 * must have wrapped our 32 bit counters
                 * so reset
                 */
-               object->pages_used = object->pages_created = 0;
+               object->pages_used = object->pages_created = 0;
        }
        if ((sequential_run = object->sequential)) {
-                 if (sequential_run < 0) {
-                         sequential_behavior = VM_BEHAVIOR_RSEQNTL;
-                         sequential_run = 0 - sequential_run;
-                 } else {
-                         sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
-                 }
-
+               if (sequential_run < 0) {
+                       sequential_behavior = VM_BEHAVIOR_RSEQNTL;
+                       sequential_run = 0 - sequential_run;
+               } else {
+                       sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
+               }
        }
        switch (behavior) {
-
        default:
-               behavior = VM_BEHAVIOR_DEFAULT;
+               behavior = VM_BEHAVIOR_DEFAULT;
 
        case VM_BEHAVIOR_DEFAULT:
-               if (object->internal && fault_info->user_tag == VM_MEMORY_STACK)
-                       goto out;
+               if (object->internal && fault_info->user_tag == VM_MEMORY_STACK) {
+                       goto out;
+               }
 
                if (sequential_run >= (3 * PAGE_SIZE)) {
-                       pre_heat_size = sequential_run + PAGE_SIZE;
+                       pre_heat_size = sequential_run + PAGE_SIZE;
 
-                       if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL)
-                               look_behind = FALSE;
-                       else
-                               look_ahead = FALSE;
+                       if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) {
+                               look_behind = FALSE;
+                       } else {
+                               look_ahead = FALSE;
+                       }
 
                        *io_streaming = 1;
                } else {
-
                        if (object->pages_created < (20 * (min_ph_size >> PAGE_SHIFT))) {
-                               /*
+                               /*
                                 * prime the pump
                                 */
-                               pre_heat_size = min_ph_size;
+                               pre_heat_size = min_ph_size;
                        } else {
                                /*
                                 * Linear growth in PH size: The maximum size is max_length...
-                                * this cacluation will result in a size that is neither a 
+                                * this cacluation will result in a size that is neither a
                                 * power of 2 nor a multiple of PAGE_SIZE... so round
                                 * it up to the nearest PAGE_SIZE boundary
                                 */
                                pre_heat_size = (max_length * (uint64_t)object->pages_used) / object->pages_created;
 
-                               if (pre_heat_size < min_ph_size)
+                               if (pre_heat_size < min_ph_size) {
                                        pre_heat_size = min_ph_size;
-                               else
+                               } else {
                                        pre_heat_size = round_page(pre_heat_size);
+                               }
                        }
                }
                break;
 
        case VM_BEHAVIOR_RANDOM:
-               if ((pre_heat_size = cluster_size) <= PAGE_SIZE)
-                       goto out;
-               break;
+               if ((pre_heat_size = cluster_size) <= PAGE_SIZE) {
+                       goto out;
+               }
+               break;
 
        case VM_BEHAVIOR_SEQUENTIAL:
-               if ((pre_heat_size = cluster_size) == 0)
-                       pre_heat_size = sequential_run + PAGE_SIZE;
+               if ((pre_heat_size = cluster_size) == 0) {
+                       pre_heat_size = sequential_run + PAGE_SIZE;
+               }
                look_behind = FALSE;
                *io_streaming = 1;
 
-               break;
+               break;
 
        case VM_BEHAVIOR_RSEQNTL:
-               if ((pre_heat_size = cluster_size) == 0)
-                       pre_heat_size = sequential_run + PAGE_SIZE;
+               if ((pre_heat_size = cluster_size) == 0) {
+                       pre_heat_size = sequential_run + PAGE_SIZE;
+               }
                look_ahead = FALSE;
                *io_streaming = 1;
 
-               break;
-
+               break;
        }
        throttle_limit = (uint32_t) max_length;
        assert(throttle_limit == max_length);
 
        if (vnode_pager_get_throttle_io_limit(object->pager, &throttle_limit) == KERN_SUCCESS) {
-               if (max_length > throttle_limit)
+               if (max_length > throttle_limit) {
                        max_length = throttle_limit;
+               }
+       }
+       if (pre_heat_size > max_length) {
+               pre_heat_size = max_length;
        }
-       if (pre_heat_size > max_length)
-               pre_heat_size = max_length;
 
        if (behavior == VM_BEHAVIOR_DEFAULT && (pre_heat_size > min_ph_size)) {
-
                unsigned int consider_free = vm_page_free_count + vm_page_cleaned_count;
-               
+
                if (consider_free < vm_page_throttle_limit) {
                        pre_heat_size = trunc_page(pre_heat_size / 16);
                } else if (consider_free < vm_page_free_target) {
                        pre_heat_size = trunc_page(pre_heat_size / 4);
                }
-               
-               if (pre_heat_size < min_ph_size)
+
+               if (pre_heat_size < min_ph_size) {
                        pre_heat_size = min_ph_size;
+               }
        }
        if (look_ahead == TRUE) {
-               if (look_behind == TRUE) { 
+               if (look_behind == TRUE) {
                        /*
-                        * if we get here its due to a random access... 
+                        * if we get here its due to a random access...
                         * so we want to center the original fault address
                         * within the cluster we will issue... make sure
                         * to calculate 'head_size' as a multiple of PAGE_SIZE...
@@ -7675,10 +6846,11 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
                         */
                        head_size = trunc_page(pre_heat_size / 2);
 
-                       if (target_start > head_size)
+                       if (target_start > head_size) {
                                target_start -= head_size;
-                       else
+                       } else {
                                target_start = 0;
+                       }
 
                        /*
                         * 'target_start' at this point represents the beginning offset
@@ -7687,47 +6859,50 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
                         * due to running into the start of the file
                         */
                }
-               if ((target_start + pre_heat_size) > object_size)
-                       pre_heat_size = (vm_size_t)(round_page_64(object_size - target_start));
+               if ((target_start + pre_heat_size) > object_size) {
+                       pre_heat_size = (vm_size_t)(round_page_64(object_size - target_start));
+               }
                /*
                 * at this point caclulate the number of pages beyond the original fault
                 * address that we want to consider... this is guaranteed not to extend beyond
                 * the current EOF...
                 */
                assert((vm_size_t)(orig_start - target_start) == (orig_start - target_start));
-               tail_size = pre_heat_size - (vm_size_t)(orig_start - target_start) - PAGE_SIZE;
+               tail_size = pre_heat_size - (vm_size_t)(orig_start - target_start) - PAGE_SIZE;
        } else {
-               if (pre_heat_size > target_start) {
+               if (pre_heat_size > target_start) {
                        /*
                         * since pre_heat_size is always smaller then 2^32,
                         * if it is larger then target_start (a 64 bit value)
                         * it is safe to clip target_start to 32 bits
                         */
-                       pre_heat_size = (vm_size_t) target_start;
+                       pre_heat_size = (vm_size_t) target_start;
                }
                tail_size = 0;
        }
        assert( !(target_start & PAGE_MASK_64));
        assert( !(pre_heat_size & PAGE_MASK_64));
 
-       if (pre_heat_size <= PAGE_SIZE)
-               goto out;
+       if (pre_heat_size <= PAGE_SIZE) {
+               goto out;
+       }
 
        if (look_behind == TRUE) {
-               /*
+               /*
                 * take a look at the pages before the original
                 * faulting offset... recalculate this in case
-                * we had to clip 'pre_heat_size' above to keep 
+                * we had to clip 'pre_heat_size' above to keep
                 * from running past the EOF.
                 */
-               head_size = pre_heat_size - tail_size - PAGE_SIZE;
+               head_size = pre_heat_size - tail_size - PAGE_SIZE;
 
-               for (offset = orig_start - PAGE_SIZE_64; head_size; offset -= PAGE_SIZE_64, head_size -= PAGE_SIZE) {
-                       /*
-                        * don't poke below the lowest offset 
+               for (offset = orig_start - PAGE_SIZE_64; head_size; offset -= PAGE_SIZE_64, head_size -= PAGE_SIZE) {
+                       /*
+                        * don't poke below the lowest offset
                         */
-                       if (offset < fault_info->lo_offset)
-                               break;
+                       if (offset < fault_info->lo_offset) {
+                               break;
+                       }
                        /*
                         * for external objects or internal objects w/o a pager,
                         * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN
@@ -7736,22 +6911,23 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
                                break;
                        }
                        if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
-                               /*
+                               /*
                                 * don't bridge resident pages
                                 */
-                               break;
+                               break;
                        }
                        *start = offset;
                        *length += PAGE_SIZE;
                }
        }
        if (look_ahead == TRUE) {
-               for (offset = orig_start + PAGE_SIZE_64; tail_size; offset += PAGE_SIZE_64, tail_size -= PAGE_SIZE) {
-                       /*
-                        * don't poke above the highest offset 
+               for (offset = orig_start + PAGE_SIZE_64; tail_size; offset += PAGE_SIZE_64, tail_size -= PAGE_SIZE) {
+                       /*
+                        * don't poke above the highest offset
                         */
-                       if (offset >= fault_info->hi_offset)
-                               break;
+                       if (offset >= fault_info->hi_offset) {
+                               break;
+                       }
                        assert(offset < object_size);
 
                        /*
@@ -7762,20 +6938,21 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
                                break;
                        }
                        if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
-                               /*
+                               /*
                                 * don't bridge resident pages
                                 */
-                               break;
+                               break;
                        }
                        *length += PAGE_SIZE;
                }
        }
 out:
-       if (*length > max_length)
+       if (*length > max_length) {
                *length = max_length;
+       }
 
        vm_object_unlock(object);
-       
+
        DTRACE_VM1(clustersize, vm_size_t, *length);
 }
 
@@ -7787,21 +6964,21 @@ out:
 
 kern_return_t
 vm_object_page_op(
-       vm_object_t             object,
-       vm_object_offset_t      offset,
-       int                     ops,
-       ppnum_t                 *phys_entry,
-       int                     *flags)
+       vm_object_t             object,
+       vm_object_offset_t      offset,
+       int                     ops,
+       ppnum_t                 *phys_entry,
+       int                     *flags)
 {
-       vm_page_t               dst_page;
+       vm_page_t               dst_page;
 
        vm_object_lock(object);
 
-       if(ops & UPL_POP_PHYSICAL) {
-               if(object->phys_contiguous) {
+       if (ops & UPL_POP_PHYSICAL) {
+               if (object->phys_contiguous) {
                        if (phys_entry) {
                                *phys_entry = (ppnum_t)
-                                       (object->vo_shadow_offset >> PAGE_SHIFT);
+                                   (object->vo_shadow_offset >> PAGE_SHIFT);
                        }
                        vm_object_unlock(object);
                        return KERN_SUCCESS;
@@ -7810,21 +6987,21 @@ vm_object_page_op(
                        return KERN_INVALID_OBJECT;
                }
        }
-       if(object->phys_contiguous) {
+       if (object->phys_contiguous) {
                vm_object_unlock(object);
                return KERN_INVALID_OBJECT;
        }
 
-       while(TRUE) {
-               if((dst_page = vm_page_lookup(object,offset)) == VM_PAGE_NULL) {
+       while (TRUE) {
+               if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL) {
                        vm_object_unlock(object);
                        return KERN_FAILURE;
                }
 
                /* Sync up on getting the busy bit */
-               if((dst_page->busy || dst_page->cleaning) && 
-                          (((ops & UPL_POP_SET) && 
-                          (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) {
+               if ((dst_page->vmp_busy || dst_page->vmp_cleaning) &&
+                   (((ops & UPL_POP_SET) &&
+                   (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) {
                        /* someone else is playing with the page, we will */
                        /* have to wait */
                        PAGE_SLEEP(object, dst_page, THREAD_UNINT);
@@ -7832,29 +7009,40 @@ vm_object_page_op(
                }
 
                if (ops & UPL_POP_DUMP) {
-                       if (dst_page->pmapped == TRUE)
-                               pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
+                       if (dst_page->vmp_pmapped == TRUE) {
+                               pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
+                       }
 
                        VM_PAGE_FREE(dst_page);
                        break;
                }
 
                if (flags) {
-                       *flags = 0;
+                       *flags = 0;
 
                        /* Get the condition of flags before requested ops */
                        /* are undertaken */
 
-                       if(dst_page->dirty) *flags |= UPL_POP_DIRTY;
-                       if(dst_page->free_when_done) *flags |= UPL_POP_PAGEOUT;
-                       if(dst_page->precious) *flags |= UPL_POP_PRECIOUS;
-                       if(dst_page->absent) *flags |= UPL_POP_ABSENT;
-                       if(dst_page->busy) *flags |= UPL_POP_BUSY;
+                       if (dst_page->vmp_dirty) {
+                               *flags |= UPL_POP_DIRTY;
+                       }
+                       if (dst_page->vmp_free_when_done) {
+                               *flags |= UPL_POP_PAGEOUT;
+                       }
+                       if (dst_page->vmp_precious) {
+                               *flags |= UPL_POP_PRECIOUS;
+                       }
+                       if (dst_page->vmp_absent) {
+                               *flags |= UPL_POP_ABSENT;
+                       }
+                       if (dst_page->vmp_busy) {
+                               *flags |= UPL_POP_BUSY;
+                       }
                }
 
                /* The caller should have made a call either contingent with */
                /* or prior to this call to set UPL_POP_BUSY */
-               if(ops & UPL_POP_SET) {
+               if (ops & UPL_POP_SET) {
                        /* The protection granted with this assert will */
                        /* not be complete.  If the caller violates the */
                        /* convention and attempts to change page state */
@@ -7862,68 +7050,49 @@ vm_object_page_op(
                        /* because the page may already be busy.  However */
                        /* if such violations occur we will assert sooner */
                        /* or later. */
-                       assert(dst_page->busy || (ops & UPL_POP_BUSY));
+                       assert(dst_page->vmp_busy || (ops & UPL_POP_BUSY));
                        if (ops & UPL_POP_DIRTY) {
                                SET_PAGE_DIRTY(dst_page, FALSE);
                        }
-                       if (ops & UPL_POP_PAGEOUT) dst_page->free_when_done = TRUE;
-                       if (ops & UPL_POP_PRECIOUS) dst_page->precious = TRUE;
-                       if (ops & UPL_POP_ABSENT) dst_page->absent = TRUE;
-                       if (ops & UPL_POP_BUSY) dst_page->busy = TRUE;
-               }
-
-               if(ops & UPL_POP_CLR) {
-                       assert(dst_page->busy);
-                       if (ops & UPL_POP_DIRTY) dst_page->dirty = FALSE;
-                       if (ops & UPL_POP_PAGEOUT) dst_page->free_when_done = FALSE;
-                       if (ops & UPL_POP_PRECIOUS) dst_page->precious = FALSE;
-                       if (ops & UPL_POP_ABSENT) dst_page->absent = FALSE;
+                       if (ops & UPL_POP_PAGEOUT) {
+                               dst_page->vmp_free_when_done = TRUE;
+                       }
+                       if (ops & UPL_POP_PRECIOUS) {
+                               dst_page->vmp_precious = TRUE;
+                       }
+                       if (ops & UPL_POP_ABSENT) {
+                               dst_page->vmp_absent = TRUE;
+                       }
                        if (ops & UPL_POP_BUSY) {
-                               dst_page->busy = FALSE;
-                               PAGE_WAKEUP(dst_page);
+                               dst_page->vmp_busy = TRUE;
                        }
                }
 
-               if (dst_page->encrypted) {
-                       /*
-                        * ENCRYPTED SWAP:
-                        * We need to decrypt this encrypted page before the
-                        * caller can access its contents.
-                        * But if the caller really wants to access the page's
-                        * contents, they have to keep the page "busy".
-                        * Otherwise, the page could get recycled or re-encrypted
-                        * at any time.
-                        */
-                       if ((ops & UPL_POP_SET) && (ops & UPL_POP_BUSY) &&
-                           dst_page->busy) {
-                               /*
-                                * The page is stable enough to be accessed by
-                                * the caller, so make sure its contents are
-                                * not encrypted.
-                                */
-                               vm_page_decrypt(dst_page, 0);
-                       } else {
-                               /*
-                                * The page is not busy, so don't bother
-                                * decrypting it, since anything could
-                                * happen to it between now and when the
-                                * caller wants to access it.
-                                * We should not give the caller access
-                                * to this page.
-                                */
-                               assert(!phys_entry);
+               if (ops & UPL_POP_CLR) {
+                       assert(dst_page->vmp_busy);
+                       if (ops & UPL_POP_DIRTY) {
+                               dst_page->vmp_dirty = FALSE;
+                       }
+                       if (ops & UPL_POP_PAGEOUT) {
+                               dst_page->vmp_free_when_done = FALSE;
+                       }
+                       if (ops & UPL_POP_PRECIOUS) {
+                               dst_page->vmp_precious = FALSE;
+                       }
+                       if (ops & UPL_POP_ABSENT) {
+                               dst_page->vmp_absent = FALSE;
+                       }
+                       if (ops & UPL_POP_BUSY) {
+                               dst_page->vmp_busy = FALSE;
+                               PAGE_WAKEUP(dst_page);
                        }
                }
-
                if (phys_entry) {
                        /*
                         * The physical page number will remain valid
                         * only if the page is kept busy.
-                        * ENCRYPTED SWAP: make sure we don't let the
-                        * caller access an encrypted page.
                         */
-                       assert(dst_page->busy);
-                       assert(!dst_page->encrypted);
+                       assert(dst_page->vmp_busy);
                        *phys_entry = VM_PAGE_GET_PHYS_PAGE(dst_page);
                }
 
@@ -7932,40 +7101,39 @@ vm_object_page_op(
 
        vm_object_unlock(object);
        return KERN_SUCCESS;
-                               
 }
 
 /*
- * vm_object_range_op offers performance enhancement over 
- * vm_object_page_op for page_op functions which do not require page 
- * level state to be returned from the call.  Page_op was created to provide 
- * a low-cost alternative to page manipulation via UPLs when only a single 
- * page was involved.  The range_op call establishes the ability in the _op 
+ * vm_object_range_op offers performance enhancement over
+ * vm_object_page_op for page_op functions which do not require page
+ * level state to be returned from the call.  Page_op was created to provide
+ * a low-cost alternative to page manipulation via UPLs when only a single
+ * page was involved.  The range_op call establishes the ability in the _op
  * family of functions to work on multiple pages where the lack of page level
  * state handling allows the caller to avoid the overhead of the upl structures.
  */
 
 kern_return_t
 vm_object_range_op(
-       vm_object_t             object,
-       vm_object_offset_t      offset_beg,
-       vm_object_offset_t      offset_end,
+       vm_object_t             object,
+       vm_object_offset_t      offset_beg,
+       vm_object_offset_t      offset_end,
        int                     ops,
-       uint32_t                *range)
+       uint32_t                *range)
 {
-        vm_object_offset_t     offset;
-       vm_page_t               dst_page;
+       vm_object_offset_t      offset;
+       vm_page_t               dst_page;
 
        if (offset_end - offset_beg > (uint32_t) -1) {
                /* range is too big and would overflow "*range" */
                return KERN_INVALID_ARGUMENT;
-       } 
+       }
        if (object->resident_page_count == 0) {
-               if (range) {
-                       if (ops & UPL_ROP_PRESENT) {
-                               *range = 0;
+               if (range) {
+                       if (ops & UPL_ROP_PRESENT) {
+                               *range = 0;
                        } else {
-                               *range = (uint32_t) (offset_end - offset_beg);
+                               *range = (uint32_t) (offset_end - offset_beg);
                                assert(*range == (offset_end - offset_beg));
                        }
                }
@@ -7975,21 +7143,21 @@ vm_object_range_op(
 
        if (object->phys_contiguous) {
                vm_object_unlock(object);
-               return KERN_INVALID_OBJECT;
+               return KERN_INVALID_OBJECT;
        }
-       
+
        offset = offset_beg & ~PAGE_MASK_64;
 
        while (offset < offset_end) {
                dst_page = vm_page_lookup(object, offset);
                if (dst_page != VM_PAGE_NULL) {
                        if (ops & UPL_ROP_DUMP) {
-                               if (dst_page->busy || dst_page->cleaning) {
+                               if (dst_page->vmp_busy || dst_page->vmp_cleaning) {
                                        /*
-                                        * someone else is playing with the 
+                                        * someone else is playing with the
                                         * page, we will have to wait
                                         */
-                                       PAGE_SLEEP(object, dst_page, THREAD_UNINT);
+                                       PAGE_SLEEP(object, dst_page, THREAD_UNINT);
                                        /*
                                         * need to relook the page up since it's
                                         * state may have changed while we slept
@@ -7998,29 +7166,32 @@ vm_object_range_op(
                                         */
                                        continue;
                                }
-                               if (dst_page->laundry)
+                               if (dst_page->vmp_laundry) {
                                        vm_pageout_steal_laundry(dst_page, FALSE);
+                               }
 
-                               if (dst_page->pmapped == TRUE)
-                                       pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
+                               if (dst_page->vmp_pmapped == TRUE) {
+                                       pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
+                               }
 
                                VM_PAGE_FREE(dst_page);
-
                        } else if ((ops & UPL_ROP_ABSENT)
-                                          && (!dst_page->absent || dst_page->busy)) {
+                           && (!dst_page->vmp_absent || dst_page->vmp_busy)) {
                                break;
                        }
-               } else if (ops & UPL_ROP_PRESENT)
-                       break;
+               } else if (ops & UPL_ROP_PRESENT) {
+                       break;
+               }
 
                offset += PAGE_SIZE;
        }
        vm_object_unlock(object);
 
        if (range) {
-               if (offset > offset_end)
-                       offset = offset_end;
-               if(offset > offset_beg) {
+               if (offset > offset_end) {
+                       offset = offset_end;
+               }
+               if (offset > offset_beg) {
                        *range = (uint32_t) (offset - offset_beg);
                        assert(*range == (offset - offset_beg));
                } else {
@@ -8035,11 +7206,12 @@ vm_object_range_op(
  *   with a non-device vnode).  Takes a virtual address, an offset, and a size.  We currently
  *   expect that the virtual address will denote the start of a range that is physically contiguous.
  */
-kern_return_t pager_map_to_phys_contiguous(
-       memory_object_control_t object,
-       memory_object_offset_t  offset,
-       addr64_t                base_vaddr,
-       vm_size_t               size)
+kern_return_t
+pager_map_to_phys_contiguous(
+       memory_object_control_t object,
+       memory_object_offset_t  offset,
+       addr64_t                base_vaddr,
+       vm_size_t               size)
 {
        ppnum_t page_num;
        boolean_t clobbered_private;
@@ -8085,11 +7257,12 @@ uint32_t scan_object_collision = 0;
 void
 vm_object_lock(vm_object_t object)
 {
-        if (object == vm_pageout_scan_wants_object) {
-               scan_object_collision++;
-               mutex_pause(2);
+       if (object == vm_pageout_scan_wants_object) {
+               scan_object_collision++;
+               mutex_pause(2);
        }
-        lck_rw_lock_exclusive(&object->Lock);
+       DTRACE_VM(vm_object_lock_w);
+       lck_rw_lock_exclusive(&object->Lock);
 #if DEVELOPMENT || DEBUG
        object->Lock_owner = current_thread();
 #endif
@@ -8098,8 +7271,8 @@ vm_object_lock(vm_object_t object)
 boolean_t
 vm_object_lock_avoid(vm_object_t object)
 {
-        if (object == vm_pageout_scan_wants_object) {
-               scan_object_collision++;
+       if (object == vm_pageout_scan_wants_object) {
+               scan_object_collision++;
                return TRUE;
        }
        return FALSE;
@@ -8108,14 +7281,16 @@ vm_object_lock_avoid(vm_object_t object)
 boolean_t
 _vm_object_lock_try(vm_object_t object)
 {
-       boolean_t       retval;
+       boolean_t       retval;
 
        retval = lck_rw_try_lock_exclusive(&object->Lock);
 #if DEVELOPMENT || DEBUG
-       if (retval == TRUE)
+       if (retval == TRUE) {
+               DTRACE_VM(vm_object_lock_w);
                object->Lock_owner = current_thread();
+       }
 #endif
-       return (retval);
+       return retval;
 }
 
 boolean_t
@@ -8124,7 +7299,7 @@ vm_object_lock_try(vm_object_t object)
        /*
         * Called from hibernate path so check before blocking.
         */
-       if (vm_object_lock_avoid(object) && ml_get_interrupts_enabled() && get_preemption_level()==0) {
+       if (vm_object_lock_avoid(object) && ml_get_interrupts_enabled() && get_preemption_level() == 0) {
                mutex_pause(2);
        }
        return _vm_object_lock_try(object);
@@ -8133,31 +7308,58 @@ vm_object_lock_try(vm_object_t object)
 void
 vm_object_lock_shared(vm_object_t object)
 {
-        if (vm_object_lock_avoid(object)) {
-               mutex_pause(2);
+       if (vm_object_lock_avoid(object)) {
+               mutex_pause(2);
        }
+       DTRACE_VM(vm_object_lock_r);
        lck_rw_lock_shared(&object->Lock);
 }
 
+boolean_t
+vm_object_lock_yield_shared(vm_object_t object)
+{
+       boolean_t retval = FALSE, force_yield = FALSE;;
+
+       vm_object_lock_assert_shared(object);
+
+       force_yield = vm_object_lock_avoid(object);
+
+       retval = lck_rw_lock_yield_shared(&object->Lock, force_yield);
+       if (retval) {
+               DTRACE_VM(vm_object_lock_yield);
+       }
+
+       return retval;
+}
+
 boolean_t
 vm_object_lock_try_shared(vm_object_t object)
 {
-        if (vm_object_lock_avoid(object)) {
-               mutex_pause(2);
+       boolean_t retval;
+
+       if (vm_object_lock_avoid(object)) {
+               mutex_pause(2);
+       }
+       retval = lck_rw_try_lock_shared(&object->Lock);
+       if (retval) {
+               DTRACE_VM(vm_object_lock_r);
        }
-       return (lck_rw_try_lock_shared(&object->Lock));
+       return retval;
 }
 
 boolean_t
 vm_object_lock_upgrade(vm_object_t object)
-{      boolean_t       retval;
+{
+       boolean_t       retval;
 
        retval = lck_rw_lock_shared_to_exclusive(&object->Lock);
 #if DEVELOPMENT || DEBUG
-       if (retval == TRUE)
+       if (retval == TRUE) {
+               DTRACE_VM(vm_object_lock_w);
                object->Lock_owner = current_thread();
+       }
 #endif
-       return (retval);
+       return retval;
 }
 
 void
@@ -8165,9 +7367,11 @@ vm_object_unlock(vm_object_t object)
 {
 #if DEVELOPMENT || DEBUG
        if (object->Lock_owner) {
-               if (object->Lock_owner != current_thread())
+               if (object->Lock_owner != current_thread()) {
                        panic("vm_object_unlock: not owner - %p\n", object);
+               }
                object->Lock_owner = 0;
+               DTRACE_VM(vm_object_unlock);
        }
 #endif
        lck_rw_done(&object->Lock);
@@ -8188,15 +7392,16 @@ vm_object_change_wimg_mode(vm_object_t object, unsigned int wimg_mode)
 
        vm_object_paging_wait(object, THREAD_UNINT);
 
-       vm_page_queue_iterate(&object->memq, p, vm_page_t, listq) {
-
-               if (!p->fictitious)
+       vm_page_queue_iterate(&object->memq, p, vmp_listq) {
+               if (!p->vmp_fictitious) {
                        pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(p), wimg_mode);
+               }
        }
-       if (wimg_mode == VM_WIMG_USE_DEFAULT)
+       if (wimg_mode == VM_WIMG_USE_DEFAULT) {
                object->set_cache_attr = FALSE;
-       else
+       } else {
                object->set_cache_attr = TRUE;
+       }
 
        object->wimg_bits = wimg_mode;
 
@@ -8218,9 +7423,9 @@ extern char *freezer_compressor_scratch_buf;
 extern int c_freezer_compression_count;
 extern AbsoluteTime c_freezer_last_yield_ts;
 
-#define        MAX_FREE_BATCH  32
-#define FREEZER_DUTY_CYCLE_ON_MS       5
-#define FREEZER_DUTY_CYCLE_OFF_MS      5
+#define MAX_FREE_BATCH  32
+#define FREEZER_DUTY_CYCLE_ON_MS        5
+#define FREEZER_DUTY_CYCLE_OFF_MS       5
 
 static int c_freezer_should_yield(void);
 
@@ -8228,8 +7433,8 @@ static int c_freezer_should_yield(void);
 static int
 c_freezer_should_yield()
 {
-       AbsoluteTime    cur_time;
-       uint64_t        nsecs;
+       AbsoluteTime    cur_time;
+       uint64_t        nsecs;
 
        assert(c_freezer_last_yield_ts);
        clock_get_uptime(&cur_time);
@@ -8237,9 +7442,10 @@ c_freezer_should_yield()
        SUB_ABSOLUTETIME(&cur_time, &c_freezer_last_yield_ts);
        absolutetime_to_nanoseconds(cur_time, &nsecs);
 
-       if (nsecs > 1000 * 1000 * FREEZER_DUTY_CYCLE_ON_MS)
-               return (1);
-       return (0);
+       if (nsecs > 1000 * 1000 * FREEZER_DUTY_CYCLE_ON_MS) {
+               return 1;
+       }
+       return 0;
 }
 
 
@@ -8250,39 +7456,39 @@ vm_object_compressed_freezer_done()
 }
 
 
-void
+uint32_t
 vm_object_compressed_freezer_pageout(
-       vm_object_t object)
+       vm_object_t object, uint32_t dirty_budget)
 {
-       vm_page_t                       p;
-       vm_page_t                       local_freeq = NULL;
-       int                             local_freed = 0;
-       kern_return_t                   retval = KERN_SUCCESS;
-       int                             obj_resident_page_count_snapshot = 0;
+       vm_page_t                       p;
+       vm_page_t                       local_freeq = NULL;
+       int                             local_freed = 0;
+       kern_return_t                   retval = KERN_SUCCESS;
+       int                             obj_resident_page_count_snapshot = 0;
+       uint32_t                        paged_out_count = 0;
 
        assert(object != VM_OBJECT_NULL);
        assert(object->internal);
 
        vm_object_lock(object);
 
-       if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL)  {
-                       
+       if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
                if (!object->pager_initialized) {
-
                        vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
 
-                       if (!object->pager_initialized)
+                       if (!object->pager_initialized) {
                                vm_object_compressor_pager_create(object);
+                       }
                }
 
-               if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL)  {
+               if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
                        vm_object_unlock(object);
-                       return;
+                       return paged_out_count;
                }
        }
-                       
+
        if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
-               vm_object_offset_t      curr_offset = 0;
+               vm_object_offset_t      curr_offset = 0;
 
                /*
                 * Go through the object and make sure that any
@@ -8290,16 +7496,17 @@ vm_object_compressed_freezer_pageout(
                 * a compressed segment associated with our "freezer_chead".
                 */
                while (curr_offset < object->vo_size) {
-
                        curr_offset = vm_compressor_pager_next_compressed(object->pager, curr_offset);
-       
-                       if (curr_offset == (vm_object_offset_t) -1)
+
+                       if (curr_offset == (vm_object_offset_t) -1) {
                                break;
+                       }
 
                        retval = vm_compressor_pager_relocate(object->pager, curr_offset, &freezer_chead);
 
-                       if (retval != KERN_SUCCESS)
+                       if (retval != KERN_SUCCESS) {
                                break;
+                       }
 
                        curr_offset += PAGE_SIZE_64;
                }
@@ -8322,30 +7529,28 @@ vm_object_compressed_freezer_pageout(
 
        vm_object_activity_begin(object);
 
-       while ((obj_resident_page_count_snapshot--) && !vm_page_queue_empty(&object->memq)) {
-
+       while ((obj_resident_page_count_snapshot--) && !vm_page_queue_empty(&object->memq) && paged_out_count < dirty_budget) {
                p = (vm_page_t)vm_page_queue_first(&object->memq);
 
                KERNEL_DEBUG(0xe0430004 | DBG_FUNC_START, object, local_freed, 0, 0, 0);
 
                vm_page_lockspin_queues();
 
-               if (p->cleaning || p->fictitious || p->busy || p->absent || p->unusual || p->error || VM_PAGE_WIRED(p)) {
-
+               if (p->vmp_cleaning || p->vmp_fictitious || p->vmp_busy || p->vmp_absent || p->vmp_unusual || p->vmp_error || VM_PAGE_WIRED(p)) {
                        vm_page_unlock_queues();
 
                        KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 1, 0, 0);
 
-                       vm_page_queue_remove(&object->memq, p, vm_page_t, listq);
-                       vm_page_queue_enter(&object->memq, p, vm_page_t, listq);
+                       vm_page_queue_remove(&object->memq, p, vmp_listq);
+                       vm_page_queue_enter(&object->memq, p, vmp_listq);
 
                        continue;
                }
 
-               if (p->pmapped == TRUE) {
+               if (p->vmp_pmapped == TRUE) {
                        int refmod_state, pmap_flags;
 
-                       if (p->dirty || p->precious) {
+                       if (p->vmp_dirty || p->vmp_precious) {
                                pmap_flags = PMAP_OPTIONS_COMPRESSOR;
                        } else {
                                pmap_flags = PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
@@ -8356,8 +7561,8 @@ vm_object_compressed_freezer_pageout(
                                SET_PAGE_DIRTY(p, FALSE);
                        }
                }
-               
-               if (p->dirty == FALSE && p->precious == FALSE) {
+
+               if (p->vmp_dirty == FALSE && p->vmp_precious == FALSE) {
                        /*
                         * Clean and non-precious page.
                         */
@@ -8368,8 +7573,9 @@ vm_object_compressed_freezer_pageout(
                        continue;
                }
 
-               if (p->laundry)
+               if (p->vmp_laundry) {
                        vm_pageout_steal_laundry(p, TRUE);
+               }
 
                vm_page_queues_remove(p, TRUE);
 
@@ -8382,36 +7588,34 @@ vm_object_compressed_freezer_pageout(
                 * Make the move here while we have the object lock held.
                 */
 
-               vm_page_queue_remove(&object->memq, p, vm_page_t, listq);
-               vm_page_queue_enter(&object->memq, p, vm_page_t, listq);
+               vm_page_queue_remove(&object->memq, p, vmp_listq);
+               vm_page_queue_enter(&object->memq, p, vmp_listq);
 
                /*
                 * Grab an activity_in_progress here for vm_pageout_compress_page() to consume.
                 *
                 * Mark the page busy so no one messes with it while we have the object lock dropped.
                 */
-
-               p->busy = TRUE;
+               p->vmp_busy = TRUE;
 
                vm_object_activity_begin(object);
 
                vm_object_unlock(object);
 
-               /*
-                * arg3 == FALSE  tells vm_pageout_compress_page that we don't hold the object lock and the pager may not be initialized.
-                */
-               if (vm_pageout_compress_page(&freezer_chead, freezer_compressor_scratch_buf, p, FALSE) == KERN_SUCCESS) {
+               if (vm_pageout_compress_page(&freezer_chead, freezer_compressor_scratch_buf, p) == KERN_SUCCESS) {
                        /*
                         * page has already been un-tabled from the object via 'vm_page_remove'
                         */
-                       p->snext = local_freeq;
+                       p->vmp_snext = local_freeq;
                        local_freeq = p;
                        local_freed++;
+                       paged_out_count++;
 
                        if (local_freed >= MAX_FREE_BATCH) {
-               
+                               OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
+
                                vm_page_free_list(local_freeq, TRUE);
-                               
+
                                local_freeq = NULL;
                                local_freed = 0;
                        }
@@ -8420,7 +7624,6 @@ vm_object_compressed_freezer_pageout(
                KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 0, 0, 0);
 
                if (local_freed == 0 && c_freezer_should_yield()) {
-
                        thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS);
                        clock_get_uptime(&c_freezer_last_yield_ts);
                }
@@ -8429,21 +7632,23 @@ vm_object_compressed_freezer_pageout(
        }
 
        if (local_freeq) {
+               OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
+
                vm_page_free_list(local_freeq, TRUE);
-                               
+
                local_freeq = NULL;
                local_freed = 0;
        }
-       
+
        vm_object_activity_end(object);
 
        vm_object_unlock(object);
 
        if (c_freezer_should_yield()) {
-
                thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS);
                clock_get_uptime(&c_freezer_last_yield_ts);
        }
+       return paged_out_count;
 }
 
 #endif /* CONFIG_FREEZE */
@@ -8453,17 +7658,17 @@ void
 vm_object_pageout(
        vm_object_t object)
 {
-       vm_page_t                       p, next;
-       struct  vm_pageout_queue        *iq;
-       boolean_t                       need_unlock = TRUE;
+       vm_page_t                       p, next;
+       struct  vm_pageout_queue        *iq;
 
-       if (!VM_CONFIG_COMPRESSOR_IS_PRESENT)
+       if (!VM_CONFIG_COMPRESSOR_IS_PRESENT) {
                return;
+       }
 
        iq = &vm_pageout_queue_internal;
-       
+
        assert(object != VM_OBJECT_NULL );
-       
+
        vm_object_lock(object);
 
        if (!object->internal ||
@@ -8473,80 +7678,74 @@ vm_object_pageout(
                return;
        }
 
-       if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL)  {
-                       
+       if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
                if (!object->pager_initialized) {
-
                        vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
 
-                       if (!object->pager_initialized)
+                       if (!object->pager_initialized) {
                                vm_object_compressor_pager_create(object);
+                       }
                }
 
-               if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL)  {
+               if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
                        vm_object_unlock(object);
                        return;
                }
        }
-                       
-ReScan:        
+
+ReScan:
        next = (vm_page_t)vm_page_queue_first(&object->memq);
 
        while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) {
                p = next;
-               next = (vm_page_t)vm_page_queue_next(&next->listq);
-               
-               assert(p->vm_page_q_state != VM_PAGE_ON_FREE_Q);
-               
-               if ((p->vm_page_q_state == VM_PAGE_ON_THROTTLED_Q) ||
-                   p->encrypted_cleaning ||
-                   p->cleaning ||
-                   p->laundry ||
-                   p->busy ||
-                   p->absent ||
-                   p->error ||
-                   p->fictitious ||
+               next = (vm_page_t)vm_page_queue_next(&next->vmp_listq);
+
+               assert(p->vmp_q_state != VM_PAGE_ON_FREE_Q);
+
+               if ((p->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) ||
+                   p->vmp_cleaning ||
+                   p->vmp_laundry ||
+                   p->vmp_busy ||
+                   p->vmp_absent ||
+                   p->vmp_error ||
+                   p->vmp_fictitious ||
                    VM_PAGE_WIRED(p)) {
                        /*
                         * Page is already being cleaned or can't be cleaned.
                         */
                        continue;
                }
+               if (vm_compressor_low_on_space()) {
+                       break;
+               }
 
                /* Throw to the pageout queue */
 
                vm_page_lockspin_queues();
-               need_unlock = TRUE;
-
-               if (vm_compressor_low_on_space()) {
-                       vm_page_unlock_queues();
-                       break;          
-               }
 
                if (VM_PAGE_Q_THROTTLED(iq)) {
-                                       
                        iq->pgo_draining = TRUE;
-                                       
+
                        assert_wait((event_t) (&iq->pgo_laundry + 1),
-                                   THREAD_INTERRUPTIBLE);
+                           THREAD_INTERRUPTIBLE);
                        vm_page_unlock_queues();
                        vm_object_unlock(object);
-                                       
+
                        thread_block(THREAD_CONTINUE_NULL);
 
                        vm_object_lock(object);
                        goto ReScan;
                }
 
-               assert(!p->fictitious);
-               assert(!p->busy);
-               assert(!p->absent);
-               assert(!p->unusual);
-               assert(!p->error);
+               assert(!p->vmp_fictitious);
+               assert(!p->vmp_busy);
+               assert(!p->vmp_absent);
+               assert(!p->vmp_unusual);
+               assert(!p->vmp_error);
                assert(!VM_PAGE_WIRED(p));
-               assert(!p->cleaning);
+               assert(!p->vmp_cleaning);
 
-               if (p->pmapped == TRUE) {
+               if (p->vmp_pmapped == TRUE) {
                        int refmod_state;
                        int pmap_options;
 
@@ -8555,8 +7754,8 @@ ReScan:
                         * for as "compressed" if it's been modified.
                         */
                        pmap_options =
-                               PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
-                       if (p->dirty || p->precious) {
+                           PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
+                       if (p->vmp_dirty || p->vmp_precious) {
                                /*
                                 * We already know it's been modified,
                                 * so tell pmap to account for it
@@ -8565,28 +7764,24 @@ ReScan:
                                pmap_options = PMAP_OPTIONS_COMPRESSOR;
                        }
                        refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p),
-                                                              pmap_options,
-                                                              NULL);
+                           pmap_options,
+                           NULL);
                        if (refmod_state & VM_MEM_MODIFIED) {
                                SET_PAGE_DIRTY(p, FALSE);
                        }
                }
 
-               if (!p->dirty && !p->precious) {
+               if (!p->vmp_dirty && !p->vmp_precious) {
                        vm_page_unlock_queues();
                        VM_PAGE_FREE(p);
                        continue;
                }
-
                vm_page_queues_remove(p, TRUE);
 
-               if (vm_pageout_cluster(p, FALSE, TRUE))
-                       need_unlock = FALSE;
+               vm_pageout_cluster(p);
 
-               if (need_unlock == TRUE)
-                       vm_page_unlock_queues();
+               vm_page_unlock_queues();
        }
-
        vm_object_unlock(object);
 }
 
@@ -8595,20 +7790,22 @@ ReScan:
 void
 vm_page_request_reprioritize(vm_object_t o, uint64_t blkno, uint32_t len, int prio)
 {
-       io_reprioritize_req_t   req;
-       struct vnode            *devvp = NULL;  
+       io_reprioritize_req_t   req;
+       struct vnode            *devvp = NULL;
 
-       if(vnode_pager_get_object_devvp(o->pager, (uintptr_t *)&devvp) != KERN_SUCCESS)
+       if (vnode_pager_get_object_devvp(o->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) {
                return;
-       
+       }
+
        /*
         * Create the request for I/O reprioritization.
         * We use the noblock variant of zalloc because we're holding the object
         * lock here and we could cause a deadlock in low memory conditions.
         */
        req = (io_reprioritize_req_t)zalloc_noblock(io_reprioritize_req_zone);
-       if (req == NULL)
+       if (req == NULL) {
                return;
+       }
        req->blkno = blkno;
        req->len = len;
        req->priority = prio;
@@ -8620,38 +7817,40 @@ vm_page_request_reprioritize(vm_object_t o, uint64_t blkno, uint32_t len, int pr
        IO_REPRIORITIZE_LIST_UNLOCK();
 
        /* Wakeup reprioritize thread */
-       IO_REPRIO_THREAD_WAKEUP();      
+       IO_REPRIO_THREAD_WAKEUP();
 
-       return;         
-}      
+       return;
+}
 
 void
 vm_decmp_upl_reprioritize(upl_t upl, int prio)
 {
        int offset;
        vm_object_t object;
-       io_reprioritize_req_t   req;
+       io_reprioritize_req_t   req;
        struct vnode            *devvp = NULL;
-       uint64_t                blkno;
-       uint32_t                len;
-       upl_t                   io_upl;
-       uint64_t                *io_upl_reprio_info;
-       int                     io_upl_size;
+       uint64_t                blkno;
+       uint32_t                len;
+       upl_t                   io_upl;
+       uint64_t                *io_upl_reprio_info;
+       int                     io_upl_size;
 
-       if ((upl->flags & UPL_TRACKED_BY_OBJECT) == 0 || (upl->flags & UPL_EXPEDITE_SUPPORTED) == 0)
+       if ((upl->flags & UPL_TRACKED_BY_OBJECT) == 0 || (upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) {
                return;
+       }
 
-       /* 
-        * We dont want to perform any allocations with the upl lock held since that might 
-        * result in a deadlock. If the system is low on memory, the pageout thread would 
+       /*
+        * We dont want to perform any allocations with the upl lock held since that might
+        * result in a deadlock. If the system is low on memory, the pageout thread would
         * try to pageout stuff and might wait on this lock. If we are waiting for the memory to
         * be freed up by the pageout thread, it would be a deadlock.
         */
 
 
        /* First step is just to get the size of the upl to find out how big the reprio info is */
-       if(!upl_try_lock(upl))
+       if (!upl_try_lock(upl)) {
                return;
+       }
 
        if (upl->decmp_io_upl == NULL) {
                /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
@@ -8663,15 +7862,17 @@ vm_decmp_upl_reprioritize(upl_t upl, int prio)
        assert((io_upl->flags & UPL_DECMP_REAL_IO) != 0);
        io_upl_size = io_upl->size;
        upl_unlock(upl);
-       
+
        /* Now perform the allocation */
        io_upl_reprio_info = (uint64_t *)kalloc(sizeof(uint64_t) * (io_upl_size / PAGE_SIZE));
-       if (io_upl_reprio_info == NULL)
+       if (io_upl_reprio_info == NULL) {
                return;
+       }
 
        /* Now again take the lock, recheck the state and grab out the required info */
-       if(!upl_try_lock(upl))
+       if (!upl_try_lock(upl)) {
                goto out;
+       }
 
        if (upl->decmp_io_upl == NULL || upl->decmp_io_upl != io_upl) {
                /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
@@ -8688,8 +7889,8 @@ vm_decmp_upl_reprioritize(upl_t upl, int prio)
        }
 
        /* Get the dev vnode ptr for this object */
-       if(!object || !object->pager || 
-          vnode_pager_get_object_devvp(object->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) {
+       if (!object || !object->pager ||
+           vnode_pager_get_object_devvp(object->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) {
                upl_unlock(upl);
                goto out;
        }
@@ -8700,29 +7901,29 @@ vm_decmp_upl_reprioritize(upl_t upl, int prio)
 
        offset = 0;
        while (offset < io_upl_size) {
-               blkno   = io_upl_reprio_info[(offset / PAGE_SIZE)] & UPL_REPRIO_INFO_MASK;
-               len     = (io_upl_reprio_info[(offset / PAGE_SIZE)] >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK;   
+               blkno   = io_upl_reprio_info[(offset / PAGE_SIZE)] & UPL_REPRIO_INFO_MASK;
+               len     = (io_upl_reprio_info[(offset / PAGE_SIZE)] >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK;
 
                /*
-                * This implementation may cause some spurious expedites due to the 
-                * fact that we dont cleanup the blkno & len from the upl_reprio_info 
-                * even after the I/O is complete. 
+                * This implementation may cause some spurious expedites due to the
+                * fact that we dont cleanup the blkno & len from the upl_reprio_info
+                * even after the I/O is complete.
                 */
-               
+
                if (blkno != 0 && len != 0) {
                        /* Create the request for I/O reprioritization */
-                               req = (io_reprioritize_req_t)zalloc(io_reprioritize_req_zone);
-                       assert(req != NULL);
-                       req->blkno = blkno;
-                       req->len = len;
-                       req->priority = prio;
-                       req->devvp = devvp;
-
-                       /* Insert request into the reprioritization list */
-                       IO_REPRIORITIZE_LIST_LOCK();
-                       queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list);
-                       IO_REPRIORITIZE_LIST_UNLOCK();          
-                       
+                       req = (io_reprioritize_req_t)zalloc(io_reprioritize_req_zone);
+                       assert(req != NULL);
+                       req->blkno = blkno;
+                       req->len = len;
+                       req->priority = prio;
+                       req->devvp = devvp;
+
+                       /* Insert request into the reprioritization list */
+                       IO_REPRIORITIZE_LIST_LOCK();
+                       queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list);
+                       IO_REPRIORITIZE_LIST_UNLOCK();
+
                        offset += len;
                } else {
                        offset += PAGE_SIZE;
@@ -8730,7 +7931,7 @@ vm_decmp_upl_reprioritize(upl_t upl, int prio)
        }
 
        /* Wakeup reprioritize thread */
-        IO_REPRIO_THREAD_WAKEUP();
+       IO_REPRIO_THREAD_WAKEUP();
 
 out:
        kfree(io_upl_reprio_info, sizeof(uint64_t) * (io_upl_size / PAGE_SIZE));
@@ -8741,61 +7942,66 @@ void
 vm_page_handle_prio_inversion(vm_object_t o, vm_page_t m)
 {
        upl_t upl;
-        upl_page_info_t *pl;
-        unsigned int i, num_pages;
-        int cur_tier;
+       upl_page_info_t *pl;
+       unsigned int i, num_pages;
+       int cur_tier;
 
        cur_tier = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO);
 
-       /* 
-       Scan through all UPLs associated with the object to find the 
-       UPL containing the contended page.
-       */ 
+       /*
+        *  Scan through all UPLs associated with the object to find the
+        *  UPL containing the contended page.
+        */
        queue_iterate(&o->uplq, upl, upl_t, uplq) {
-               if (((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) || upl->upl_priority <= cur_tier)
+               if (((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) || upl->upl_priority <= cur_tier) {
                        continue;
+               }
                pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
-                num_pages = (upl->size / PAGE_SIZE);
-                
+               num_pages = (upl->size / PAGE_SIZE);
+
                /*
-               For each page in the UPL page list, see if it matches the contended
-               page and was issued as a low prio I/O. 
-               */
-               for(i=0; i < num_pages; i++) {
-                       if(UPL_PAGE_PRESENT(pl,i) && VM_PAGE_GET_PHYS_PAGE(m) == pl[i].phys_addr) {
+                *  For each page in the UPL page list, see if it matches the contended
+                *  page and was issued as a low prio I/O.
+                */
+               for (i = 0; i < num_pages; i++) {
+                       if (UPL_PAGE_PRESENT(pl, i) && VM_PAGE_GET_PHYS_PAGE(m) == pl[i].phys_addr) {
                                if ((upl->flags & UPL_DECMP_REQ) && upl->decmp_io_upl) {
-                                       KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, upl->upl_creator, m, upl, upl->upl_priority, 0);
+                                       KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m),
+                                           VM_KERNEL_UNSLIDE_OR_PERM(upl), upl->upl_priority, 0);
                                        vm_decmp_upl_reprioritize(upl, cur_tier);
                                        break;
                                }
-                               KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, upl->upl_creator, m, upl->upl_reprio_info[i], upl->upl_priority, 0);
-                               if (UPL_REPRIO_INFO_BLKNO(upl, i) != 0 && UPL_REPRIO_INFO_LEN(upl, i) != 0) 
+                               KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m),
+                                   upl->upl_reprio_info[i], upl->upl_priority, 0);
+                               if (UPL_REPRIO_INFO_BLKNO(upl, i) != 0 && UPL_REPRIO_INFO_LEN(upl, i) != 0) {
                                        vm_page_request_reprioritize(o, UPL_REPRIO_INFO_BLKNO(upl, i), UPL_REPRIO_INFO_LEN(upl, i), cur_tier);
-                                break;
-                         }
-                }
-                /* Check if we found any hits */
-                 if (i != num_pages)
+                               }
+                               break;
+                       }
+               }
+               /* Check if we found any hits */
+               if (i != num_pages) {
                        break;
+               }
        }
-       
+
        return;
-}      
+}
 
 wait_result_t
 vm_page_sleep(vm_object_t o, vm_page_t m, int interruptible)
 {
        wait_result_t ret;
 
-       KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_START, o, m, 0, 0, 0);       
-       
-       if (o->io_tracking && ((m->busy == TRUE) || (m->cleaning == TRUE) || VM_PAGE_WIRED(m))) {
-               /* 
-               Indicates page is busy due to an I/O. Issue a reprioritize request if necessary.
-               */
-               vm_page_handle_prio_inversion(o,m);
+       KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_START, o, m, 0, 0, 0);
+
+       if (o->io_tracking && ((m->vmp_busy == TRUE) || (m->vmp_cleaning == TRUE) || VM_PAGE_WIRED(m))) {
+               /*
+                *  Indicates page is busy due to an I/O. Issue a reprioritize request if necessary.
+                */
+               vm_page_handle_prio_inversion(o, m);
        }
-       m->wanted = TRUE;
+       m->vmp_wanted = TRUE;
        ret = thread_sleep_vm_object(o, m, interruptible);
        KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_END, o, m, 0, 0, 0);
        return ret;
@@ -8805,22 +8011,603 @@ static void
 io_reprioritize_thread(void *param __unused, wait_result_t wr __unused)
 {
        io_reprioritize_req_t   req = NULL;
-       
-       while(1) {
 
+       while (1) {
                IO_REPRIORITIZE_LIST_LOCK();
                if (queue_empty(&io_reprioritize_list)) {
                        IO_REPRIORITIZE_LIST_UNLOCK();
                        break;
                }
-                       
-               queue_remove_first(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list);   
+
+               queue_remove_first(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list);
                IO_REPRIORITIZE_LIST_UNLOCK();
-               
+
                vnode_pager_issue_reprioritize_io(req->devvp, req->blkno, req->len, req->priority);
-               zfree(io_reprioritize_req_zone, req);   
-       }       
-       
+               zfree(io_reprioritize_req_zone, req);
+       }
+
        IO_REPRIO_THREAD_CONTINUATION();
 }
 #endif
+
+#if VM_OBJECT_ACCESS_TRACKING
+void
+vm_object_access_tracking(
+       vm_object_t     object,
+       int             *access_tracking_p,
+       uint32_t        *access_tracking_reads_p,
+       uint32_t        *access_tracking_writes_p)
+{
+       int     access_tracking;
+
+       access_tracking = !!*access_tracking_p;
+
+       vm_object_lock(object);
+       *access_tracking_p = object->access_tracking;
+       if (access_tracking_reads_p) {
+               *access_tracking_reads_p = object->access_tracking_reads;
+       }
+       if (access_tracking_writes_p) {
+               *access_tracking_writes_p = object->access_tracking_writes;
+       }
+       object->access_tracking = access_tracking;
+       object->access_tracking_reads = 0;
+       object->access_tracking_writes = 0;
+       vm_object_unlock(object);
+
+       if (access_tracking) {
+               vm_object_pmap_protect_options(object,
+                   0,
+                   object->vo_size,
+                   PMAP_NULL,
+                   0,
+                   VM_PROT_NONE,
+                   0);
+       }
+}
+#endif /* VM_OBJECT_ACCESS_TRACKING */
+
+void
+vm_object_ledger_tag_ledgers(
+       vm_object_t     object,
+       int             *ledger_idx_volatile,
+       int             *ledger_idx_nonvolatile,
+       int             *ledger_idx_volatile_compressed,
+       int             *ledger_idx_nonvolatile_compressed,
+       boolean_t       *do_footprint)
+{
+       assert(object->shadow == VM_OBJECT_NULL);
+
+       *do_footprint = !object->vo_no_footprint;
+
+       switch (object->vo_ledger_tag) {
+       case VM_LEDGER_TAG_NONE:
+               /*
+                * Regular purgeable memory:
+                * counts in footprint only when nonvolatile.
+                */
+               *do_footprint = TRUE;
+               assert(object->purgable != VM_PURGABLE_DENY);
+               *ledger_idx_volatile = task_ledgers.purgeable_volatile;
+               *ledger_idx_nonvolatile = task_ledgers.purgeable_nonvolatile;
+               *ledger_idx_volatile_compressed = task_ledgers.purgeable_volatile_compressed;
+               *ledger_idx_nonvolatile_compressed = task_ledgers.purgeable_nonvolatile_compressed;
+               break;
+       case VM_LEDGER_TAG_DEFAULT:
+               /*
+                * "default" tagged memory:
+                * counts in footprint only when nonvolatile and not marked
+                * as "no_footprint".
+                */
+               *ledger_idx_volatile = task_ledgers.tagged_nofootprint;
+               *ledger_idx_volatile_compressed = task_ledgers.tagged_nofootprint_compressed;
+               if (*do_footprint) {
+                       *ledger_idx_nonvolatile = task_ledgers.tagged_footprint;
+                       *ledger_idx_nonvolatile_compressed = task_ledgers.tagged_footprint_compressed;
+               } else {
+                       *ledger_idx_nonvolatile = task_ledgers.tagged_nofootprint;
+                       *ledger_idx_nonvolatile_compressed = task_ledgers.tagged_nofootprint_compressed;
+               }
+               break;
+       case VM_LEDGER_TAG_NETWORK:
+               /*
+                * "network" tagged memory:
+                * never counts in footprint.
+                */
+               *do_footprint = FALSE;
+               *ledger_idx_volatile = task_ledgers.network_volatile;
+               *ledger_idx_volatile_compressed = task_ledgers.network_volatile_compressed;
+               *ledger_idx_nonvolatile = task_ledgers.network_nonvolatile;
+               *ledger_idx_nonvolatile_compressed = task_ledgers.network_nonvolatile_compressed;
+               break;
+       case VM_LEDGER_TAG_MEDIA:
+               /*
+                * "media" tagged memory:
+                * counts in footprint only when nonvolatile and not marked
+                * as "no footprint".
+                */
+               *ledger_idx_volatile = task_ledgers.media_nofootprint;
+               *ledger_idx_volatile_compressed = task_ledgers.media_nofootprint_compressed;
+               if (*do_footprint) {
+                       *ledger_idx_nonvolatile = task_ledgers.media_footprint;
+                       *ledger_idx_nonvolatile_compressed = task_ledgers.media_footprint_compressed;
+               } else {
+                       *ledger_idx_nonvolatile = task_ledgers.media_nofootprint;
+                       *ledger_idx_nonvolatile_compressed = task_ledgers.media_nofootprint_compressed;
+               }
+               break;
+       case VM_LEDGER_TAG_GRAPHICS:
+               /*
+                * "graphics" tagged memory:
+                * counts in footprint only when nonvolatile and not marked
+                * as "no footprint".
+                */
+               *ledger_idx_volatile = task_ledgers.graphics_nofootprint;
+               *ledger_idx_volatile_compressed = task_ledgers.graphics_nofootprint_compressed;
+               if (*do_footprint) {
+                       *ledger_idx_nonvolatile = task_ledgers.graphics_footprint;
+                       *ledger_idx_nonvolatile_compressed = task_ledgers.graphics_footprint_compressed;
+               } else {
+                       *ledger_idx_nonvolatile = task_ledgers.graphics_nofootprint;
+                       *ledger_idx_nonvolatile_compressed = task_ledgers.graphics_nofootprint_compressed;
+               }
+               break;
+       case VM_LEDGER_TAG_NEURAL:
+               /*
+                * "neural" tagged memory:
+                * counts in footprint only when nonvolatile and not marked
+                * as "no footprint".
+                */
+               *ledger_idx_volatile = task_ledgers.neural_nofootprint;
+               *ledger_idx_volatile_compressed = task_ledgers.neural_nofootprint_compressed;
+               if (*do_footprint) {
+                       *ledger_idx_nonvolatile = task_ledgers.neural_footprint;
+                       *ledger_idx_nonvolatile_compressed = task_ledgers.neural_footprint_compressed;
+               } else {
+                       *ledger_idx_nonvolatile = task_ledgers.neural_nofootprint;
+                       *ledger_idx_nonvolatile_compressed = task_ledgers.neural_nofootprint_compressed;
+               }
+               break;
+       default:
+               panic("%s: object %p has unsupported ledger_tag %d\n",
+                   __FUNCTION__, object, object->vo_ledger_tag);
+       }
+}
+
+kern_return_t
+vm_object_ownership_change(
+       vm_object_t     object,
+       int             new_ledger_tag,
+       task_t          new_owner,
+       int             new_ledger_flags,
+       boolean_t       old_task_objq_locked)
+{
+       int             old_ledger_tag;
+       task_t          old_owner;
+       int             resident_count, wired_count;
+       unsigned int    compressed_count;
+       int             ledger_idx_volatile;
+       int             ledger_idx_nonvolatile;
+       int             ledger_idx_volatile_compressed;
+       int             ledger_idx_nonvolatile_compressed;
+       int             ledger_idx;
+       int             ledger_idx_compressed;
+       boolean_t       do_footprint, old_no_footprint, new_no_footprint;
+       boolean_t       new_task_objq_locked;
+
+       vm_object_lock_assert_exclusive(object);
+
+       if (!object->internal) {
+               return KERN_INVALID_ARGUMENT;
+       }
+       if (new_ledger_tag == VM_LEDGER_TAG_NONE &&
+           object->purgable == VM_PURGABLE_DENY) {
+               /* non-purgeable memory must have a valid non-zero ledger tag */
+               return KERN_INVALID_ARGUMENT;
+       }
+       if (new_ledger_tag < 0 ||
+           new_ledger_tag > VM_LEDGER_TAG_MAX) {
+               return KERN_INVALID_ARGUMENT;
+       }
+       if (new_ledger_flags & ~VM_LEDGER_FLAGS) {
+               return KERN_INVALID_ARGUMENT;
+       }
+       if (object->vo_ledger_tag == VM_LEDGER_TAG_NONE &&
+           object->purgable == VM_PURGABLE_DENY) {
+               /*
+                * This VM object is neither ledger-tagged nor purgeable.
+                * We can convert it to "ledger tag" ownership iff it
+                * has not been used at all yet (no resident pages and
+                * no pager) and it's going to be assigned to a valid task.
+                */
+               if (object->resident_page_count != 0 ||
+                   object->pager != NULL ||
+                   object->pager_created ||
+                   object->ref_count != 1 ||
+                   object->vo_owner != TASK_NULL ||
+                   object->copy_strategy != MEMORY_OBJECT_COPY_NONE ||
+                   new_owner == TASK_NULL) {
+                       return KERN_FAILURE;
+               }
+       }
+
+       if (new_ledger_flags & VM_LEDGER_FLAG_NO_FOOTPRINT) {
+               new_no_footprint = TRUE;
+       } else {
+               new_no_footprint = FALSE;
+       }
+#if __arm64__
+       if (!new_no_footprint &&
+           object->purgable != VM_PURGABLE_DENY &&
+           new_owner != TASK_NULL &&
+           new_owner != VM_OBJECT_OWNER_DISOWNED &&
+           new_owner->task_legacy_footprint) {
+               /*
+                * This task has been granted "legacy footprint" and should
+                * not be charged for its IOKit purgeable memory.  Since we
+                * might now change the accounting of such memory to the
+                * "graphics" ledger, for example, give it the "no footprint"
+                * option.
+                */
+               new_no_footprint = TRUE;
+       }
+#endif /* __arm64__ */
+       assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
+       assert(object->shadow == VM_OBJECT_NULL);
+       assert(object->copy == VM_OBJECT_NULL);
+
+       old_ledger_tag = object->vo_ledger_tag;
+       old_no_footprint = object->vo_no_footprint;
+       old_owner = VM_OBJECT_OWNER(object);
+
+       DTRACE_VM7(object_ownership_change,
+           vm_object_t, object,
+           task_t, old_owner,
+           int, old_ledger_tag,
+           int, old_no_footprint,
+           task_t, new_owner,
+           int, new_ledger_tag,
+           int, new_no_footprint);
+
+       assert(object->internal);
+       resident_count = object->resident_page_count - object->wired_page_count;
+       wired_count = object->wired_page_count;
+       compressed_count = vm_compressor_pager_get_count(object->pager);
+
+       /*
+        * Deal with the old owner and/or ledger tag, if needed.
+        */
+       if (old_owner != TASK_NULL &&
+           ((old_owner != new_owner)           /* new owner ... */
+           ||                                  /* ... or ... */
+           (old_no_footprint != new_no_footprint) /* new "no_footprint" */
+           ||                                  /* ... or ... */
+           old_ledger_tag != new_ledger_tag)) { /* ... new ledger */
+               /*
+                * Take this object off of the old owner's ledgers.
+                */
+               vm_object_ledger_tag_ledgers(object,
+                   &ledger_idx_volatile,
+                   &ledger_idx_nonvolatile,
+                   &ledger_idx_volatile_compressed,
+                   &ledger_idx_nonvolatile_compressed,
+                   &do_footprint);
+               if (object->purgable == VM_PURGABLE_VOLATILE ||
+                   object->purgable == VM_PURGABLE_EMPTY) {
+                       ledger_idx = ledger_idx_volatile;
+                       ledger_idx_compressed = ledger_idx_volatile_compressed;
+               } else {
+                       ledger_idx = ledger_idx_nonvolatile;
+                       ledger_idx_compressed = ledger_idx_nonvolatile_compressed;
+               }
+               if (resident_count) {
+                       /*
+                        * Adjust the appropriate old owners's ledgers by the
+                        * number of resident pages.
+                        */
+                       ledger_debit(old_owner->ledger,
+                           ledger_idx,
+                           ptoa_64(resident_count));
+                       /* adjust old owner's footprint */
+                       if (do_footprint &&
+                           object->purgable != VM_PURGABLE_VOLATILE &&
+                           object->purgable != VM_PURGABLE_EMPTY) {
+                               ledger_debit(old_owner->ledger,
+                                   task_ledgers.phys_footprint,
+                                   ptoa_64(resident_count));
+                       }
+               }
+               if (wired_count) {
+                       /* wired pages are always nonvolatile */
+                       ledger_debit(old_owner->ledger,
+                           ledger_idx_nonvolatile,
+                           ptoa_64(wired_count));
+                       if (do_footprint) {
+                               ledger_debit(old_owner->ledger,
+                                   task_ledgers.phys_footprint,
+                                   ptoa_64(wired_count));
+                       }
+               }
+               if (compressed_count) {
+                       /*
+                        * Adjust the appropriate old owner's ledgers
+                        * by the number of compressed pages.
+                        */
+                       ledger_debit(old_owner->ledger,
+                           ledger_idx_compressed,
+                           ptoa_64(compressed_count));
+                       if (do_footprint &&
+                           object->purgable != VM_PURGABLE_VOLATILE &&
+                           object->purgable != VM_PURGABLE_EMPTY) {
+                               ledger_debit(old_owner->ledger,
+                                   task_ledgers.phys_footprint,
+                                   ptoa_64(compressed_count));
+                       }
+               }
+               if (old_owner != new_owner) {
+                       /* remove object from old_owner's list of owned objects */
+                       DTRACE_VM2(object_owner_remove,
+                           vm_object_t, object,
+                           task_t, old_owner);
+                       if (!old_task_objq_locked) {
+                               task_objq_lock(old_owner);
+                       }
+                       old_owner->task_owned_objects--;
+                       queue_remove(&old_owner->task_objq, object,
+                           vm_object_t, task_objq);
+                       switch (object->purgable) {
+                       case VM_PURGABLE_NONVOLATILE:
+                       case VM_PURGABLE_EMPTY:
+                               vm_purgeable_nonvolatile_owner_update(old_owner,
+                                   -1);
+                               break;
+                       case VM_PURGABLE_VOLATILE:
+                               vm_purgeable_volatile_owner_update(old_owner,
+                                   -1);
+                               break;
+                       default:
+                               break;
+                       }
+                       if (!old_task_objq_locked) {
+                               task_objq_unlock(old_owner);
+                       }
+               }
+       }
+
+       /*
+        * Switch to new ledger tag and/or owner.
+        */
+
+       new_task_objq_locked = FALSE;
+       if (new_owner != old_owner &&
+           new_owner != TASK_NULL &&
+           new_owner != VM_OBJECT_OWNER_DISOWNED) {
+               /*
+                * If the new owner is not accepting new objects ("disowning"),
+                * the object becomes "disowned" and will be added to
+                * the kernel's task_objq.
+                *
+                * Check first without locking, to avoid blocking while the
+                * task is disowning its objects.
+                */
+               if (new_owner->task_objects_disowning) {
+                       new_owner = VM_OBJECT_OWNER_DISOWNED;
+               } else {
+                       task_objq_lock(new_owner);
+                       /* check again now that we have the lock */
+                       if (new_owner->task_objects_disowning) {
+                               new_owner = VM_OBJECT_OWNER_DISOWNED;
+                               task_objq_unlock(new_owner);
+                       } else {
+                               new_task_objq_locked = TRUE;
+                       }
+               }
+       }
+
+       object->vo_ledger_tag = new_ledger_tag;
+       object->vo_owner = new_owner;
+       object->vo_no_footprint = new_no_footprint;
+
+       if (new_owner == VM_OBJECT_OWNER_DISOWNED) {
+               /*
+                * Disowned objects are added to the kernel's task_objq but
+                * are marked as owned by "VM_OBJECT_OWNER_DISOWNED" to
+                * differentiate them from objects intentionally owned by
+                * the kernel.
+                */
+               assert(old_owner != kernel_task);
+               new_owner = kernel_task;
+               assert(!new_task_objq_locked);
+               task_objq_lock(new_owner);
+               new_task_objq_locked = TRUE;
+       }
+
+       /*
+        * Deal with the new owner and/or ledger tag, if needed.
+        */
+       if (new_owner != TASK_NULL &&
+           ((new_owner != old_owner)           /* new owner ... */
+           ||                                  /* ... or ... */
+           (new_no_footprint != old_no_footprint) /* ... new "no_footprint" */
+           ||                                  /* ... or ... */
+           new_ledger_tag != old_ledger_tag)) { /* ... new ledger */
+               /*
+                * Add this object to the new owner's ledgers.
+                */
+               vm_object_ledger_tag_ledgers(object,
+                   &ledger_idx_volatile,
+                   &ledger_idx_nonvolatile,
+                   &ledger_idx_volatile_compressed,
+                   &ledger_idx_nonvolatile_compressed,
+                   &do_footprint);
+               if (object->purgable == VM_PURGABLE_VOLATILE ||
+                   object->purgable == VM_PURGABLE_EMPTY) {
+                       ledger_idx = ledger_idx_volatile;
+                       ledger_idx_compressed = ledger_idx_volatile_compressed;
+               } else {
+                       ledger_idx = ledger_idx_nonvolatile;
+                       ledger_idx_compressed = ledger_idx_nonvolatile_compressed;
+               }
+               if (resident_count) {
+                       /*
+                        * Adjust the appropriate new owners's ledgers by the
+                        * number of resident pages.
+                        */
+                       ledger_credit(new_owner->ledger,
+                           ledger_idx,
+                           ptoa_64(resident_count));
+                       /* adjust new owner's footprint */
+                       if (do_footprint &&
+                           object->purgable != VM_PURGABLE_VOLATILE &&
+                           object->purgable != VM_PURGABLE_EMPTY) {
+                               ledger_credit(new_owner->ledger,
+                                   task_ledgers.phys_footprint,
+                                   ptoa_64(resident_count));
+                       }
+               }
+               if (wired_count) {
+                       /* wired pages are always nonvolatile */
+                       ledger_credit(new_owner->ledger,
+                           ledger_idx_nonvolatile,
+                           ptoa_64(wired_count));
+                       if (do_footprint) {
+                               ledger_credit(new_owner->ledger,
+                                   task_ledgers.phys_footprint,
+                                   ptoa_64(wired_count));
+                       }
+               }
+               if (compressed_count) {
+                       /*
+                        * Adjust the new owner's ledgers by the number of
+                        * compressed pages.
+                        */
+                       ledger_credit(new_owner->ledger,
+                           ledger_idx_compressed,
+                           ptoa_64(compressed_count));
+                       if (do_footprint &&
+                           object->purgable != VM_PURGABLE_VOLATILE &&
+                           object->purgable != VM_PURGABLE_EMPTY) {
+                               ledger_credit(new_owner->ledger,
+                                   task_ledgers.phys_footprint,
+                                   ptoa_64(compressed_count));
+                       }
+               }
+               if (new_owner != old_owner) {
+                       /* add object to new_owner's list of owned objects */
+                       DTRACE_VM2(object_owner_add,
+                           vm_object_t, object,
+                           task_t, new_owner);
+                       assert(new_task_objq_locked);
+                       new_owner->task_owned_objects++;
+                       queue_enter(&new_owner->task_objq, object,
+                           vm_object_t, task_objq);
+                       switch (object->purgable) {
+                       case VM_PURGABLE_NONVOLATILE:
+                       case VM_PURGABLE_EMPTY:
+                               vm_purgeable_nonvolatile_owner_update(new_owner,
+                                   +1);
+                               break;
+                       case VM_PURGABLE_VOLATILE:
+                               vm_purgeable_volatile_owner_update(new_owner,
+                                   +1);
+                               break;
+                       default:
+                               break;
+                       }
+               }
+       }
+
+       if (new_task_objq_locked) {
+               task_objq_unlock(new_owner);
+       }
+
+       return KERN_SUCCESS;
+}
+
+void
+vm_owned_objects_disown(
+       task_t  task)
+{
+       vm_object_t     next_object;
+       vm_object_t     object;
+       int             collisions;
+       kern_return_t   kr;
+
+       if (task == NULL) {
+               return;
+       }
+
+       collisions = 0;
+
+again:
+       if (task->task_objects_disowned) {
+               /* task has already disowned its owned objects */
+               assert(task->task_volatile_objects == 0);
+               assert(task->task_nonvolatile_objects == 0);
+               assert(task->task_owned_objects == 0);
+               return;
+       }
+
+       task_objq_lock(task);
+
+       task->task_objects_disowning = TRUE;
+
+       for (object = (vm_object_t) queue_first(&task->task_objq);
+           !queue_end(&task->task_objq, (queue_entry_t) object);
+           object = next_object) {
+               if (task->task_nonvolatile_objects == 0 &&
+                   task->task_volatile_objects == 0 &&
+                   task->task_owned_objects == 0) {
+                       /* no more objects owned by "task" */
+                       break;
+               }
+
+               next_object = (vm_object_t) queue_next(&object->task_objq);
+
+#if DEBUG
+               assert(object->vo_purgeable_volatilizer == NULL);
+#endif /* DEBUG */
+               assert(object->vo_owner == task);
+               if (!vm_object_lock_try(object)) {
+                       task_objq_unlock(task);
+                       mutex_pause(collisions++);
+                       goto again;
+               }
+               /* transfer ownership to the kernel */
+               assert(VM_OBJECT_OWNER(object) != kernel_task);
+               kr = vm_object_ownership_change(
+                       object,
+                       object->vo_ledger_tag, /* unchanged */
+                       VM_OBJECT_OWNER_DISOWNED, /* new owner */
+                       0, /* new_ledger_flags */
+                       TRUE);  /* old_owner->task_objq locked */
+               assert(kr == KERN_SUCCESS);
+               assert(object->vo_owner == VM_OBJECT_OWNER_DISOWNED);
+               vm_object_unlock(object);
+       }
+
+       if (__improbable(task->task_volatile_objects != 0 ||
+           task->task_nonvolatile_objects != 0 ||
+           task->task_owned_objects != 0)) {
+               panic("%s(%p): volatile=%d nonvolatile=%d owned=%d q=%p q_first=%p q_last=%p",
+                   __FUNCTION__,
+                   task,
+                   task->task_volatile_objects,
+                   task->task_nonvolatile_objects,
+                   task->task_owned_objects,
+                   &task->task_objq,
+                   queue_first(&task->task_objq),
+                   queue_last(&task->task_objq));
+       }
+
+       /* there shouldn't be any objects owned by task now */
+       assert(task->task_volatile_objects == 0);
+       assert(task->task_nonvolatile_objects == 0);
+       assert(task->task_owned_objects == 0);
+       assert(task->task_objects_disowning);
+
+       /* and we don't need to try and disown again */
+       task->task_objects_disowned = TRUE;
+
+       task_objq_unlock(task);
+}