]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_pageout.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / osfmk / vm / vm_pageout.c
index 9502c60ae530acbc70cbec671724f33d24cba019..63dd004ea1de41ae93fe6a1c0a68d764cf75cee8 100644 (file)
@@ -1,8 +1,8 @@
 /*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * unlawful or unlicensed copies of an Apple operating system, or to
  * circumvent, violate, or enable the circumvention or violation of, any
  * terms of an Apple operating system software license agreement.
- * 
+ *
  * Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
  */
-/* 
+/*
  * Mach Operating System
  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
  * All Rights Reserved.
- * 
+ *
  * Permission to use, copy, modify and distribute this software and its
  * documentation is hereby granted, provided that both the copyright
  * notice and this permission notice appear in all copies of the
  * software, derivative works or modified versions, and any portions
  * thereof, and that both notices appear in supporting documentation.
- * 
+ *
  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- * 
+ *
  * Carnegie Mellon requests users of this software to return to
- * 
+ *
  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
  *  School of Computer Science
  *  Carnegie Mellon University
  *  Pittsburgh PA 15213-3890
- * 
+ *
  * any improvements or extensions that they make and grant Carnegie Mellon
  * the rights to redistribute these changes.
  */
  */
 
 #include <stdint.h>
+#include <ptrauth.h>
 
 #include <debug.h>
 #include <mach_pagemap.h>
 #include <mach_cluster_stats.h>
-#include <mach_kdb.h>
-#include <advisory_pageout.h>
 
 #include <mach/mach_types.h>
 #include <mach/memory_object.h>
 #include <kern/host_statistics.h>
 #include <kern/machine.h>
 #include <kern/misc_protos.h>
+#include <kern/sched.h>
 #include <kern/thread.h>
-#include <kern/xpr.h>
 #include <kern/kalloc.h>
+#include <kern/zalloc_internal.h>
+#include <kern/policy_internal.h>
+#include <kern/thread_group.h>
 
 #include <machine/vm_tuning.h>
-
-#if CONFIG_EMBEDDED
-#include <sys/kern_memorystatus.h>
-#endif
+#include <machine/commpage.h>
 
 #include <vm/pmap.h>
+#include <vm/vm_compressor_pager.h>
 #include <vm/vm_fault.h>
 #include <vm/vm_map.h>
 #include <vm/vm_object.h>
 #include <vm/vm_protos.h> /* must be last */
 #include <vm/memory_object.h>
 #include <vm/vm_purgeable_internal.h>
+#include <vm/vm_shared_region.h>
+#include <vm/vm_compressor.h>
 
-/*
- * ENCRYPTED SWAP:
- */
-#include <../bsd/crypto/aes/aes.h>
+#include <san/kasan.h>
 
+#if CONFIG_PHANTOM_CACHE
+#include <vm/vm_phantom_cache.h>
+#endif
 
-#ifndef VM_PAGEOUT_BURST_ACTIVE_THROTTLE   /* maximum iterations of the active queue to move pages to inactive */
-#define VM_PAGEOUT_BURST_ACTIVE_THROTTLE  100
+#if UPL_DEBUG
+#include <libkern/OSDebug.h>
 #endif
 
+extern int cs_debug;
+
+extern void mbuf_drain(boolean_t);
+
+#if VM_PRESSURE_EVENTS
+#if CONFIG_JETSAM
+extern unsigned int memorystatus_available_pages;
+extern unsigned int memorystatus_available_pages_pressure;
+extern unsigned int memorystatus_available_pages_critical;
+#else /* CONFIG_JETSAM */
+extern uint64_t memorystatus_available_pages;
+extern uint64_t memorystatus_available_pages_pressure;
+extern uint64_t memorystatus_available_pages_critical;
+#endif /* CONFIG_JETSAM */
+
+extern unsigned int memorystatus_frozen_count;
+extern unsigned int memorystatus_suspended_count;
+extern vm_pressure_level_t memorystatus_vm_pressure_level;
+
+extern lck_mtx_t memorystatus_jetsam_fg_band_lock;
+extern uint32_t memorystatus_jetsam_fg_band_waiters;
+
+void vm_pressure_response(void);
+extern void consider_vm_pressure_events(void);
+
+#define MEMORYSTATUS_SUSPENDED_THRESHOLD  4
+#endif /* VM_PRESSURE_EVENTS */
+
+thread_t  vm_pageout_scan_thread = THREAD_NULL;
+boolean_t vps_dynamic_priority_enabled = FALSE;
+
 #ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE  /* maximum iterations of the inactive queue w/o stealing/cleaning a page */
-#ifdef CONFIG_EMBEDDED
+#ifdef  CONFIG_EMBEDDED
 #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 1024
 #else
 #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 4096
 #endif
 
 #ifndef VM_PAGEOUT_DEADLOCK_RELIEF
-#define VM_PAGEOUT_DEADLOCK_RELIEF 100 /* number of pages to move to break deadlock */
+#define VM_PAGEOUT_DEADLOCK_RELIEF 100  /* number of pages to move to break deadlock */
 #endif
 
-#ifndef VM_PAGEOUT_INACTIVE_RELIEF
-#define VM_PAGEOUT_INACTIVE_RELIEF 50  /* minimum number of pages to move to the inactive q */
-#endif
+#ifndef VM_PAGE_LAUNDRY_MAX
+#define VM_PAGE_LAUNDRY_MAX     128UL   /* maximum pageouts on a given pageout queue */
+#endif  /* VM_PAGEOUT_LAUNDRY_MAX */
+
+#ifndef VM_PAGEOUT_BURST_WAIT
+#define VM_PAGEOUT_BURST_WAIT   1       /* milliseconds */
+#endif  /* VM_PAGEOUT_BURST_WAIT */
 
-#ifndef        VM_PAGE_LAUNDRY_MAX
-#define        VM_PAGE_LAUNDRY_MAX     16UL    /* maximum pageouts on a given pageout queue */
-#endif /* VM_PAGEOUT_LAUNDRY_MAX */
+#ifndef VM_PAGEOUT_EMPTY_WAIT
+#define VM_PAGEOUT_EMPTY_WAIT   50      /* milliseconds */
+#endif  /* VM_PAGEOUT_EMPTY_WAIT */
 
-#ifndef        VM_PAGEOUT_BURST_WAIT
-#define        VM_PAGEOUT_BURST_WAIT   30      /* milliseconds per page */
-#endif /* VM_PAGEOUT_BURST_WAIT */
+#ifndef VM_PAGEOUT_DEADLOCK_WAIT
+#define VM_PAGEOUT_DEADLOCK_WAIT 100    /* milliseconds */
+#endif  /* VM_PAGEOUT_DEADLOCK_WAIT */
 
-#ifndef        VM_PAGEOUT_EMPTY_WAIT
-#define VM_PAGEOUT_EMPTY_WAIT  200     /* milliseconds */
-#endif /* VM_PAGEOUT_EMPTY_WAIT */
+#ifndef VM_PAGEOUT_IDLE_WAIT
+#define VM_PAGEOUT_IDLE_WAIT    10      /* milliseconds */
+#endif  /* VM_PAGEOUT_IDLE_WAIT */
 
-#ifndef        VM_PAGEOUT_DEADLOCK_WAIT
-#define VM_PAGEOUT_DEADLOCK_WAIT       300     /* milliseconds */
-#endif /* VM_PAGEOUT_DEADLOCK_WAIT */
+#ifndef VM_PAGEOUT_SWAP_WAIT
+#define VM_PAGEOUT_SWAP_WAIT    10      /* milliseconds */
+#endif  /* VM_PAGEOUT_SWAP_WAIT */
 
-#ifndef        VM_PAGEOUT_IDLE_WAIT
-#define VM_PAGEOUT_IDLE_WAIT   10      /* milliseconds */
-#endif /* VM_PAGEOUT_IDLE_WAIT */
 
 #ifndef VM_PAGE_SPECULATIVE_TARGET
-#define VM_PAGE_SPECULATIVE_TARGET(total) ((total) * 1 / 20)
+#define VM_PAGE_SPECULATIVE_TARGET(total) ((total) * 1 / (100 / vm_pageout_state.vm_page_speculative_percentage))
 #endif /* VM_PAGE_SPECULATIVE_TARGET */
 
-#ifndef VM_PAGE_INACTIVE_HEALTHY_LIMIT
-#define VM_PAGE_INACTIVE_HEALTHY_LIMIT(total) ((total) * 1 / 200)
-#endif /* VM_PAGE_INACTIVE_HEALTHY_LIMIT */
-
 
 /*
  *     To obtain a reasonable LRU approximation, the inactive queue
  *     then the pageout daemon starts running.
  */
 
-#ifndef        VM_PAGE_INACTIVE_TARGET
-#define        VM_PAGE_INACTIVE_TARGET(avail)  ((avail) * 1 / 3)
-#endif /* VM_PAGE_INACTIVE_TARGET */
+#ifndef VM_PAGE_INACTIVE_TARGET
+#define VM_PAGE_INACTIVE_TARGET(avail)  ((avail) * 1 / 2)
+#endif  /* VM_PAGE_INACTIVE_TARGET */
 
 /*
  *     Once the pageout daemon starts running, it keeps going
  *     until vm_page_free_count meets or exceeds vm_page_free_target.
  */
 
-#ifndef        VM_PAGE_FREE_TARGET
-#ifdef CONFIG_EMBEDDED
-#define        VM_PAGE_FREE_TARGET(free)       (15 + (free) / 100)
+#ifndef VM_PAGE_FREE_TARGET
+#ifdef  CONFIG_EMBEDDED
+#define VM_PAGE_FREE_TARGET(free)       (15 + (free) / 100)
 #else
-#define        VM_PAGE_FREE_TARGET(free)       (15 + (free) / 80)
+#define VM_PAGE_FREE_TARGET(free)       (15 + (free) / 80)
 #endif
-#endif /* VM_PAGE_FREE_TARGET */
+#endif  /* VM_PAGE_FREE_TARGET */
+
 
 /*
  *     The pageout daemon always starts running once vm_page_free_count
  *     falls below vm_page_free_min.
  */
 
-#ifndef        VM_PAGE_FREE_MIN
-#ifdef CONFIG_EMBEDDED
-#define        VM_PAGE_FREE_MIN(free)          (10 + (free) / 200)
+#ifndef VM_PAGE_FREE_MIN
+#ifdef  CONFIG_EMBEDDED
+#define VM_PAGE_FREE_MIN(free)          (10 + (free) / 200)
 #else
-#define        VM_PAGE_FREE_MIN(free)          (10 + (free) / 100)
+#define VM_PAGE_FREE_MIN(free)          (10 + (free) / 100)
 #endif
-#endif /* VM_PAGE_FREE_MIN */
-
-#define VM_PAGE_FREE_MIN_LIMIT         1500
-#define VM_PAGE_FREE_TARGET_LIMIT      2000
+#endif  /* VM_PAGE_FREE_MIN */
 
+#ifdef  CONFIG_EMBEDDED
+#define VM_PAGE_FREE_RESERVED_LIMIT     100
+#define VM_PAGE_FREE_MIN_LIMIT          1500
+#define VM_PAGE_FREE_TARGET_LIMIT       2000
+#else
+#define VM_PAGE_FREE_RESERVED_LIMIT     1700
+#define VM_PAGE_FREE_MIN_LIMIT          3500
+#define VM_PAGE_FREE_TARGET_LIMIT       4000
+#endif
 
 /*
  *     When vm_page_free_count falls below vm_page_free_reserved,
  *     operation by dipping into the reserved pool of pages.
  */
 
-#ifndef        VM_PAGE_FREE_RESERVED
-#define        VM_PAGE_FREE_RESERVED(n)        \
-       ((6 * VM_PAGE_LAUNDRY_MAX) + (n))
-#endif /* VM_PAGE_FREE_RESERVED */
+#ifndef VM_PAGE_FREE_RESERVED
+#define VM_PAGE_FREE_RESERVED(n)        \
+       ((unsigned) (6 * VM_PAGE_LAUNDRY_MAX) + (n))
+#endif  /* VM_PAGE_FREE_RESERVED */
 
 /*
  *     When we dequeue pages from the inactive list, they are
  *     we will make per call of vm_pageout_scan().
  */
 #define VM_PAGE_REACTIVATE_LIMIT_MAX 20000
-#ifndef        VM_PAGE_REACTIVATE_LIMIT
-#ifdef CONFIG_EMBEDDED
-#define        VM_PAGE_REACTIVATE_LIMIT(avail) (VM_PAGE_INACTIVE_TARGET(avail) / 2)
+
+#ifndef VM_PAGE_REACTIVATE_LIMIT
+#ifdef  CONFIG_EMBEDDED
+#define VM_PAGE_REACTIVATE_LIMIT(avail) (VM_PAGE_INACTIVE_TARGET(avail) / 2)
 #else
-#define        VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX))
+#define VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX))
 #endif
-#endif /* VM_PAGE_REACTIVATE_LIMIT */
-#define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM      100
+#endif  /* VM_PAGE_REACTIVATE_LIMIT */
+#define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM       1000
 
+extern boolean_t hibernate_cleaning_in_progress;
 
 /*
- * must hold the page queues lock to
- * manipulate this structure
+ * Forward declarations for internal routines.
  */
-struct vm_pageout_queue {
-        queue_head_t   pgo_pending;    /* laundry pages to be processed by pager's iothread */
-        unsigned int   pgo_laundry;    /* current count of laundry pages on queue or in flight */
-        unsigned int   pgo_maxlaundry;
-
-        unsigned int   pgo_idle:1,     /* iothread is blocked waiting for work to do */
-                       pgo_busy:1,     /* iothread is currently processing request from pgo_pending */
-                       pgo_throttled:1,/* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */
-                       :0;
+struct cq {
+       struct vm_pageout_queue *q;
+       void                    *current_chead;
+       char                    *scratch_buf;
+       int                     id;
 };
 
-#define VM_PAGE_Q_THROTTLED(q)         \
-        ((q)->pgo_laundry >= (q)->pgo_maxlaundry)
+struct cq ciq[MAX_COMPRESSOR_THREAD_COUNT];
 
 
-/*
- * Exported variable used to broadcast the activation of the pageout scan
- * Working Set uses this to throttle its use of pmap removes.  In this
- * way, code which runs within memory in an uncontested context does
- * not keep encountering soft faults.
- */
+#if VM_PRESSURE_EVENTS
+void vm_pressure_thread(void);
 
-unsigned int   vm_pageout_scan_event_counter = 0;
+boolean_t VM_PRESSURE_NORMAL_TO_WARNING(void);
+boolean_t VM_PRESSURE_WARNING_TO_CRITICAL(void);
 
-/*
- * Forward declarations for internal routines.
- */
+boolean_t VM_PRESSURE_WARNING_TO_NORMAL(void);
+boolean_t VM_PRESSURE_CRITICAL_TO_WARNING(void);
+#endif
 
-static void vm_pageout_garbage_collect(int);
-static void vm_pageout_iothread_continue(struct vm_pageout_queue *);
+void vm_pageout_garbage_collect(int);
 static void vm_pageout_iothread_external(void);
-static void vm_pageout_iothread_internal(void);
-static void vm_pageout_queue_steal(vm_page_t);
+static void vm_pageout_iothread_internal(struct cq *cq);
+static void vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue *, boolean_t);
 
 extern void vm_pageout_continue(void);
 extern void vm_pageout_scan(void);
 
-static thread_t        vm_pageout_external_iothread = THREAD_NULL;
-static thread_t        vm_pageout_internal_iothread = THREAD_NULL;
+boolean_t vm_pageout_running = FALSE;
 
-unsigned int vm_pageout_reserved_internal = 0;
-unsigned int vm_pageout_reserved_really = 0;
+uint32_t vm_page_upl_tainted = 0;
+uint32_t vm_page_iopl_tainted = 0;
 
-unsigned int vm_pageout_idle_wait = 0;         /* milliseconds */
-unsigned int vm_pageout_empty_wait = 0;                /* milliseconds */
-unsigned int vm_pageout_burst_wait = 0;                /* milliseconds */
-unsigned int vm_pageout_deadlock_wait = 0;     /* milliseconds */
-unsigned int vm_pageout_deadlock_relief = 0;
-unsigned int vm_pageout_inactive_relief = 0;
-unsigned int vm_pageout_burst_active_throttle = 0;
-unsigned int vm_pageout_burst_inactive_throttle = 0;
+#if !CONFIG_EMBEDDED
+static boolean_t vm_pageout_waiter  = FALSE;
+#endif /* !CONFIG_EMBEDDED */
 
-/*
- *     Protection against zero fill flushing live working sets derived
- *     from existing backing store and files
- */
-unsigned int vm_accellerate_zf_pageout_trigger = 400;
-unsigned int zf_queue_min_count = 100;
-unsigned int vm_zf_count = 0;
-unsigned int vm_zf_queue_count = 0;
 
-/*
- *     These variables record the pageout daemon's actions:
- *     how many pages it looks at and what happens to those pages.
- *     No locking needed because only one thread modifies the variables.
- */
+#if DEVELOPMENT || DEBUG
+struct vm_pageout_debug vm_pageout_debug;
+#endif
+struct vm_pageout_vminfo vm_pageout_vminfo;
+struct vm_pageout_state  vm_pageout_state;
+struct vm_config         vm_config;
 
-unsigned int vm_pageout_active = 0;            /* debugging */
-unsigned int vm_pageout_inactive = 0;          /* debugging */
-unsigned int vm_pageout_inactive_throttled = 0;        /* debugging */
-unsigned int vm_pageout_inactive_forced = 0;   /* debugging */
-unsigned int vm_pageout_inactive_nolock = 0;   /* debugging */
-unsigned int vm_pageout_inactive_avoid = 0;    /* debugging */
-unsigned int vm_pageout_inactive_busy = 0;     /* debugging */
-unsigned int vm_pageout_inactive_absent = 0;   /* debugging */
-unsigned int vm_pageout_inactive_used = 0;     /* debugging */
-unsigned int vm_pageout_inactive_clean = 0;    /* debugging */
-unsigned int vm_pageout_inactive_dirty = 0;    /* debugging */
-unsigned int vm_pageout_dirty_no_pager = 0;    /* debugging */
-unsigned int vm_pageout_purged_objects = 0;    /* debugging */
-unsigned int vm_stat_discard = 0;              /* debugging */
-unsigned int vm_stat_discard_sent = 0;         /* debugging */
-unsigned int vm_stat_discard_failure = 0;      /* debugging */
-unsigned int vm_stat_discard_throttle = 0;     /* debugging */
-unsigned int vm_pageout_reactivation_limit_exceeded = 0;       /* debugging */
-unsigned int vm_pageout_catch_ups = 0;                         /* debugging */
-unsigned int vm_pageout_inactive_force_reclaim = 0;    /* debugging */
-
-unsigned int vm_pageout_scan_active_throttled = 0;
-unsigned int vm_pageout_scan_inactive_throttled = 0;
-unsigned int vm_pageout_scan_throttle = 0;                     /* debugging */
-unsigned int vm_pageout_scan_burst_throttle = 0;               /* debugging */
-unsigned int vm_pageout_scan_empty_throttle = 0;               /* debugging */
-unsigned int vm_pageout_scan_deadlock_detected = 0;            /* debugging */
-unsigned int vm_pageout_scan_active_throttle_success = 0;      /* debugging */
-unsigned int vm_pageout_scan_inactive_throttle_success = 0;    /* debugging */
-/*
- * Backing store throttle when BS is exhausted
- */
-unsigned int   vm_backing_store_low = 0;
+struct  vm_pageout_queue vm_pageout_queue_internal VM_PAGE_PACKED_ALIGNED;
+struct  vm_pageout_queue vm_pageout_queue_external VM_PAGE_PACKED_ALIGNED;
 
-unsigned int vm_pageout_out_of_line  = 0;
-unsigned int vm_pageout_in_place  = 0;
+int         vm_upl_wait_for_pages = 0;
+vm_object_t vm_pageout_scan_wants_object = VM_OBJECT_NULL;
 
-/*
- * ENCRYPTED SWAP:
- * counters and statistics...
- */
-unsigned long vm_page_decrypt_counter = 0;
-unsigned long vm_page_decrypt_for_upl_counter = 0;
-unsigned long vm_page_encrypt_counter = 0;
-unsigned long vm_page_encrypt_abort_counter = 0;
-unsigned long vm_page_encrypt_already_encrypted_counter = 0;
-boolean_t vm_pages_encrypted = FALSE; /* are there encrypted pages ? */
+boolean_t(*volatile consider_buffer_cache_collect)(int) = NULL;
 
-struct vm_pageout_queue vm_pageout_queue_internal;
-struct vm_pageout_queue vm_pageout_queue_external;
+int     vm_debug_events = 0;
 
-unsigned int vm_page_speculative_target = 0;
+LCK_GRP_DECLARE(vm_pageout_lck_grp, "vm_pageout");
 
-vm_object_t    vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+#if CONFIG_MEMORYSTATUS
+extern boolean_t memorystatus_kill_on_VM_page_shortage(boolean_t async);
 
-unsigned long vm_cs_validated_resets = 0;
+uint32_t vm_pageout_memorystatus_fb_factor_nr = 5;
+uint32_t vm_pageout_memorystatus_fb_factor_dr = 2;
 
-/*
- *     Routine:        vm_backing_store_disable
- *     Purpose:
- *             Suspend non-privileged threads wishing to extend
- *             backing store when we are low on backing store
- *             (Synchronized by caller)
- */
-void
-vm_backing_store_disable(
-       boolean_t       disable)
-{
-       if(disable) {
-               vm_backing_store_low = 1;
-       } else {
-               if(vm_backing_store_low) {
-                       vm_backing_store_low = 0;
-                       thread_wakeup((event_t) &vm_backing_store_low);
-               }
-       }
-}
+#endif
+
+#if __AMP__
+int vm_compressor_ebound = 1;
+int vm_pgo_pbound = 0;
+extern void thread_bind_cluster_type(thread_t, char, bool);
+#endif /* __AMP__ */
 
 
-#if MACH_CLUSTER_STATS
-unsigned long vm_pageout_cluster_dirtied = 0;
-unsigned long vm_pageout_cluster_cleaned = 0;
-unsigned long vm_pageout_cluster_collisions = 0;
-unsigned long vm_pageout_cluster_clusters = 0;
-unsigned long vm_pageout_cluster_conversions = 0;
-unsigned long vm_pageout_target_collisions = 0;
-unsigned long vm_pageout_target_page_dirtied = 0;
-unsigned long vm_pageout_target_page_freed = 0;
-#define CLUSTER_STAT(clause)   clause
-#else  /* MACH_CLUSTER_STATS */
-#define CLUSTER_STAT(clause)
-#endif /* MACH_CLUSTER_STATS */
-
-/* 
+/*
  *     Routine:        vm_pageout_object_terminate
  *     Purpose:
  *             Destroy the pageout_object, and perform all of the
  *             required cleanup actions.
- * 
+ *
  *     In/Out conditions:
  *             The object must be locked, and will be returned locked.
  */
 void
 vm_pageout_object_terminate(
-       vm_object_t     object)
+       vm_object_t     object)
 {
-       vm_object_t     shadow_object;
+       vm_object_t     shadow_object;
 
        /*
         * Deal with the deallocation (last reference) of a pageout object
@@ -429,43 +379,38 @@ vm_pageout_object_terminate(
        shadow_object = object->shadow;
        vm_object_lock(shadow_object);
 
-       while (!queue_empty(&object->memq)) {
-               vm_page_t               p, m;
-               vm_object_offset_t      offset;
+       while (!vm_page_queue_empty(&object->memq)) {
+               vm_page_t               p, m;
+               vm_object_offset_t      offset;
 
-               p = (vm_page_t) queue_first(&object->memq);
+               p = (vm_page_t) vm_page_queue_first(&object->memq);
 
-               assert(p->private);
-               assert(p->pageout);
-               p->pageout = FALSE;
-               assert(!p->cleaning);
+               assert(p->vmp_private);
+               assert(p->vmp_free_when_done);
+               p->vmp_free_when_done = FALSE;
+               assert(!p->vmp_cleaning);
+               assert(!p->vmp_laundry);
 
-               offset = p->offset;
+               offset = p->vmp_offset;
                VM_PAGE_FREE(p);
                p = VM_PAGE_NULL;
 
                m = vm_page_lookup(shadow_object,
-                       offset + object->shadow_offset);
+                   offset + object->vo_shadow_offset);
 
-               if(m == VM_PAGE_NULL)
+               if (m == VM_PAGE_NULL) {
                        continue;
-               assert(m->cleaning);
-               /* used as a trigger on upl_commit etc to recognize the */
-               /* pageout daemon's subseqent desire to pageout a cleaning */
-               /* page.  When the bit is on the upl commit code will   */
-               /* respect the pageout bit in the target page over the  */
-               /* caller's page list indication */
-               m->dump_cleaning = FALSE;
+               }
 
-               assert((m->dirty) || (m->precious) ||
-                               (m->busy && m->cleaning));
+               assert((m->vmp_dirty) || (m->vmp_precious) ||
+                   (m->vmp_busy && m->vmp_cleaning));
 
                /*
                 * Handle the trusted pager throttle.
                 * Also decrement the burst throttle (if external).
                 */
                vm_page_lock_queues();
-               if (m->laundry) {
+               if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
                        vm_pageout_throttle_up(m);
                }
 
@@ -477,15 +422,12 @@ vm_pageout_object_terminate(
                 * pages may have been modified between the selection as an
                 * adjacent page and conversion to a target.
                 */
-               if (m->pageout) {
-                       assert(m->busy);
-                       assert(m->wire_count == 1);
-                       m->cleaning = FALSE;
-                       m->encrypted_cleaning = FALSE;
-                       m->pageout = FALSE;
-#if MACH_CLUSTER_STATS
-                       if (m->wanted) vm_pageout_target_collisions++;
-#endif
+               if (m->vmp_free_when_done) {
+                       assert(m->vmp_busy);
+                       assert(m->vmp_q_state == VM_PAGE_IS_WIRED);
+                       assert(m->vmp_wire_count == 1);
+                       m->vmp_cleaning = FALSE;
+                       m->vmp_free_when_done = FALSE;
                        /*
                         * Revoke all access to the page. Since the object is
                         * locked, and the page is busy, this prevents the page
@@ -496,19 +438,18 @@ vm_pageout_object_terminate(
                         * can detect whether the page was redirtied during
                         * pageout by checking the modify state.
                         */
-                       if (pmap_disconnect(m->phys_page) & VM_MEM_MODIFIED)
-                             m->dirty = TRUE;
-                       else
-                             m->dirty = FALSE;
-
-                       if (m->dirty) {
-                               CLUSTER_STAT(vm_pageout_target_page_dirtied++;)
-                               vm_page_unwire(m);/* reactivates */
+                       if (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED) {
+                               SET_PAGE_DIRTY(m, FALSE);
+                       } else {
+                               m->vmp_dirty = FALSE;
+                       }
+
+                       if (m->vmp_dirty) {
+                               vm_page_unwire(m, TRUE);        /* reactivates */
                                VM_STAT_INCR(reactivations);
                                PAGE_WAKEUP_DONE(m);
                        } else {
-                               CLUSTER_STAT(vm_pageout_target_page_freed++;)
-                               vm_page_free(m);/* clears busy, etc. */
+                               vm_page_free(m);  /* clears busy, etc. */
                        }
                        vm_page_unlock_queues();
                        continue;
@@ -519,57 +460,45 @@ vm_pageout_object_terminate(
                 * If prep_pin_count is nonzero, then someone is using the
                 * page, so make it active.
                 */
-               if (!m->active && !m->inactive && !m->throttled && !m->private) {
-                       if (m->reference)
+               if ((m->vmp_q_state == VM_PAGE_NOT_ON_Q) && !m->vmp_private) {
+                       if (m->vmp_reference) {
                                vm_page_activate(m);
-                       else
+                       } else {
                                vm_page_deactivate(m);
+                       }
                }
-               if((m->busy) && (m->cleaning)) {
-
-                       /* the request_page_list case, (COPY_OUT_FROM FALSE) */
-                       m->busy = FALSE;
-
-                       /* We do not re-set m->dirty ! */
-                       /* The page was busy so no extraneous activity     */
-                       /* could have occurred. COPY_INTO is a read into the */
-                       /* new pages. CLEAN_IN_PLACE does actually write   */
-                       /* out the pages but handling outside of this code */
-                       /* will take care of resetting dirty. We clear the */
-                       /* modify however for the Programmed I/O case.     */ 
-                       pmap_clear_modify(m->phys_page);
-
-                       m->absent = FALSE;
-                       m->overwriting = FALSE;
-               } else if (m->overwriting) {
-                       /* alternate request page list, write to page_list */
-                       /* case.  Occurs when the original page was wired  */
-                       /* at the time of the list request */
-                       assert(m->wire_count != 0);
-                       vm_page_unwire(m);/* reactivates */
-                       m->overwriting = FALSE;
-               } else {
-               /*
-                * Set the dirty state according to whether or not the page was
-                * modified during the pageout. Note that we purposefully do
-                * NOT call pmap_clear_modify since the page is still mapped.
-                * If the page were to be dirtied between the 2 calls, this
-                * this fact would be lost. This code is only necessary to
-                * maintain statistics, since the pmap module is always
-                * consulted if m->dirty is false.
-                */
-#if MACH_CLUSTER_STATS
-                       m->dirty = pmap_is_modified(m->phys_page);
+               if (m->vmp_overwriting) {
+                       /*
+                        * the (COPY_OUT_FROM == FALSE) request_page_list case
+                        */
+                       if (m->vmp_busy) {
+                               /*
+                                * We do not re-set m->vmp_dirty !
+                                * The page was busy so no extraneous activity
+                                * could have occurred. COPY_INTO is a read into the
+                                * new pages. CLEAN_IN_PLACE does actually write
+                                * out the pages but handling outside of this code
+                                * will take care of resetting dirty. We clear the
+                                * modify however for the Programmed I/O case.
+                                */
+                               pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
 
-                       if (m->dirty)   vm_pageout_cluster_dirtied++;
-                       else            vm_pageout_cluster_cleaned++;
-                       if (m->wanted)  vm_pageout_cluster_collisions++;
-#else
-                       m->dirty = 0;
-#endif
+                               m->vmp_busy = FALSE;
+                               m->vmp_absent = FALSE;
+                       } else {
+                               /*
+                                * alternate (COPY_OUT_FROM == FALSE) request_page_list case
+                                * Occurs when the original page was wired
+                                * at the time of the list request
+                                */
+                               assert(VM_PAGE_WIRED(m));
+                               vm_page_unwire(m, TRUE);        /* reactivates */
+                       }
+                       m->vmp_overwriting = FALSE;
+               } else {
+                       m->vmp_dirty = FALSE;
                }
-               m->cleaning = FALSE;
-               m->encrypted_cleaning = FALSE;
+               m->vmp_cleaning = FALSE;
 
                /*
                 * Wakeup any thread waiting for the page to be un-cleaning.
@@ -580,11 +509,12 @@ vm_pageout_object_terminate(
        /*
         * Account for the paging reference taken in vm_paging_object_allocate.
         */
-       vm_object_paging_end(shadow_object);
+       vm_object_activity_end(shadow_object);
        vm_object_unlock(shadow_object);
 
        assert(object->ref_count == 0);
        assert(object->paging_in_progress == 0);
+       assert(object->activity_in_progress == 0);
        assert(object->resident_page_count == 0);
        return;
 }
@@ -596,51 +526,49 @@ vm_pageout_object_terminate(
  *             necessarily flushed from the VM page cache.
  *             This is accomplished by cleaning in place.
  *
- *             The page must not be busy, and the object and page
- *             queues must be locked.
- *             
+ *             The page must not be busy, and new_object
+ *             must be locked.
+ *
  */
-void
+static void
 vm_pageclean_setup(
-       vm_page_t               m,
-       vm_page_t               new_m,
-       vm_object_t             new_object,
-       vm_object_offset_t      new_offset)
+       vm_page_t               m,
+       vm_page_t               new_m,
+       vm_object_t             new_object,
+       vm_object_offset_t      new_offset)
 {
-       assert(!m->busy);
+       assert(!m->vmp_busy);
 #if 0
-       assert(!m->cleaning);
+       assert(!m->vmp_cleaning);
 #endif
 
-       XPR(XPR_VM_PAGEOUT,
-    "vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n",
-               (integer_t)m->object, m->offset, (integer_t)m, 
-               (integer_t)new_m, new_offset);
-
-       pmap_clear_modify(m->phys_page);
+       pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
 
        /*
         * Mark original page as cleaning in place.
         */
-       m->cleaning = TRUE;
-       m->dirty = TRUE;
-       m->precious = FALSE;
+       m->vmp_cleaning = TRUE;
+       SET_PAGE_DIRTY(m, FALSE);
+       m->vmp_precious = FALSE;
 
        /*
         * Convert the fictitious page to a private shadow of
         * the real page.
         */
-       assert(new_m->fictitious);
-       assert(new_m->phys_page == vm_page_fictitious_addr);
-       new_m->fictitious = FALSE;
-       new_m->private = TRUE;
-       new_m->pageout = TRUE;
-       new_m->phys_page = m->phys_page;
-       vm_page_wire(new_m);
-
-       vm_page_insert(new_m, new_object, new_offset);
-       assert(!new_m->wanted);
-       new_m->busy = FALSE;
+       assert(new_m->vmp_fictitious);
+       assert(VM_PAGE_GET_PHYS_PAGE(new_m) == vm_page_fictitious_addr);
+       new_m->vmp_fictitious = FALSE;
+       new_m->vmp_private = TRUE;
+       new_m->vmp_free_when_done = TRUE;
+       VM_PAGE_SET_PHYS_PAGE(new_m, VM_PAGE_GET_PHYS_PAGE(m));
+
+       vm_page_lockspin_queues();
+       vm_page_wire(new_m, VM_KERN_MEMORY_NONE, TRUE);
+       vm_page_unlock_queues();
+
+       vm_page_insert_wired(new_m, new_object, new_offset, VM_KERN_MEMORY_NONE);
+       assert(!new_m->vmp_wanted);
+       new_m->vmp_busy = FALSE;
 }
 
 /*
@@ -661,43 +589,44 @@ vm_pageclean_setup(
  *     Implementation:
  *             Move this page to a completely new object.
  */
-void   
+void
 vm_pageout_initialize_page(
-       vm_page_t       m)
+       vm_page_t       m)
 {
-       vm_object_t             object;
-       vm_object_offset_t      paging_offset;
-       vm_page_t               holding_page;
-       memory_object_t         pager;
+       vm_object_t             object;
+       vm_object_offset_t      paging_offset;
+       memory_object_t         pager;
+
+       assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
 
-       XPR(XPR_VM_PAGEOUT,
-               "vm_pageout_initialize_page, page 0x%X\n",
-               (integer_t)m, 0, 0, 0, 0);
-       assert(m->busy);
+       object = VM_PAGE_OBJECT(m);
+
+       assert(m->vmp_busy);
+       assert(object->internal);
 
        /*
         *      Verify that we really want to clean this page
         */
-       assert(!m->absent);
-       assert(!m->error);
-       assert(m->dirty);
+       assert(!m->vmp_absent);
+       assert(!m->vmp_error);
+       assert(m->vmp_dirty);
 
        /*
         *      Create a paging reference to let us play with the object.
         */
-       object = m->object;
-       paging_offset = m->offset + object->paging_offset;
+       paging_offset = m->vmp_offset + object->paging_offset;
 
-       if (m->absent || m->error || m->restart || (!m->dirty && !m->precious)) {
-               VM_PAGE_FREE(m);
+       if (m->vmp_absent || m->vmp_error || m->vmp_restart || (!m->vmp_dirty && !m->vmp_precious)) {
                panic("reservation without pageout?"); /* alan */
+
+               VM_PAGE_FREE(m);
                vm_object_unlock(object);
 
                return;
        }
 
        /*
-        * If there's no pager, then we can't clean the page.  This should 
+        * If there's no pager, then we can't clean the page.  This should
         * never happen since this should be a copy object and therefore not
         * an external object, so the pager should always be there.
         */
@@ -705,23 +634,22 @@ vm_pageout_initialize_page(
        pager = object->pager;
 
        if (pager == MEMORY_OBJECT_NULL) {
-               VM_PAGE_FREE(m);
                panic("missing pager for copy object");
+
+               VM_PAGE_FREE(m);
                return;
        }
 
-       /* set the page for future call to vm_fault_list_request */
+       /*
+        * set the page for future call to vm_fault_list_request
+        */
+       pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
+       SET_PAGE_DIRTY(m, FALSE);
+
+       /*
+        * keep the object from collapsing or terminating
+        */
        vm_object_paging_begin(object);
-       holding_page = NULL;
-       vm_page_lock_queues();
-       pmap_clear_modify(m->phys_page);
-       m->dirty = TRUE;
-       m->busy = TRUE;
-       m->list_req_pending = TRUE;
-       m->cleaning = TRUE;
-       m->pageout = TRUE;
-       vm_page_wire(m);
-       vm_page_unlock_queues();
        vm_object_unlock(object);
 
        /*
@@ -738,15 +666,6 @@ vm_pageout_initialize_page(
        vm_object_paging_end(object);
 }
 
-#if    MACH_CLUSTER_STATS
-#define MAXCLUSTERPAGES        16
-struct {
-       unsigned long pages_in_cluster;
-       unsigned long pages_at_higher_offsets;
-       unsigned long pages_at_lower_offsets;
-} cluster_stats[MAXCLUSTERPAGES];
-#endif /* MACH_CLUSTER_STATS */
-
 
 /*
  * vm_pageout_cluster:
@@ -755,1592 +674,4156 @@ struct {
  * which will page it out and attempt to clean adjacent pages
  * in the same operation.
  *
- * The page must be busy, and the object and queues locked. We will take a
+ * The object and queues must be locked. We will take a
  * paging reference to prevent deallocation or collapse when we
  * release the object lock back at the call site.  The I/O thread
  * is responsible for consuming this reference
  *
  * The page must not be on any pageout queue.
  */
+#if DEVELOPMENT || DEBUG
+vmct_stats_t vmct_stats;
+
+int32_t vmct_active = 0;
+uint64_t vm_compressor_epoch_start = 0;
+uint64_t vm_compressor_epoch_stop = 0;
+
+typedef enum vmct_state_t {
+       VMCT_IDLE,
+       VMCT_AWAKENED,
+       VMCT_ACTIVE,
+} vmct_state_t;
+vmct_state_t vmct_state[MAX_COMPRESSOR_THREAD_COUNT];
+#endif
+
 
 void
 vm_pageout_cluster(vm_page_t m)
 {
-       vm_object_t     object = m->object;
-        struct         vm_pageout_queue *q;
-
+       vm_object_t     object = VM_PAGE_OBJECT(m);
+       struct          vm_pageout_queue *q;
 
-       XPR(XPR_VM_PAGEOUT,
-               "vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n",
-               (integer_t)object, m->offset, (integer_t)m, 0, 0);
+       VM_PAGE_CHECK(m);
+       LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+       vm_object_lock_assert_exclusive(object);
 
        /*
         * Only a certain kind of page is appreciated here.
         */
-       assert(m->busy && (m->dirty || m->precious) && (m->wire_count == 0));
-       assert(!m->cleaning && !m->pageout && !m->inactive && !m->active);
-       assert(!m->throttled);
+       assert((m->vmp_dirty || m->vmp_precious) && (!VM_PAGE_WIRED(m)));
+       assert(!m->vmp_cleaning && !m->vmp_laundry);
+       assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
 
        /*
-        * protect the object from collapse - 
-        * locking in the object's paging_offset.
+        * protect the object from collapse or termination
         */
-       vm_object_paging_begin(object);
+       vm_object_activity_begin(object);
+
+       if (object->internal == TRUE) {
+               assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
+
+               m->vmp_busy = TRUE;
+
+               q = &vm_pageout_queue_internal;
+       } else {
+               q = &vm_pageout_queue_external;
+       }
 
        /*
-        * set the page for future call to vm_fault_list_request
-        * page should already be marked busy
+        * pgo_laundry count is tied to the laundry bit
         */
-       vm_page_wire(m);
-       m->list_req_pending = TRUE;
-       m->cleaning = TRUE;
-       m->pageout = TRUE;
-        m->laundry = TRUE;
-
-       if (object->internal == TRUE)
-               q = &vm_pageout_queue_internal;
-       else
-               q = &vm_pageout_queue_external;
+       m->vmp_laundry = TRUE;
        q->pgo_laundry++;
 
-       m->pageout_queue = TRUE;
-       queue_enter(&q->pgo_pending, m, vm_page_t, pageq);
-       
+       m->vmp_q_state = VM_PAGE_ON_PAGEOUT_Q;
+       vm_page_queue_enter(&q->pgo_pending, m, vmp_pageq);
+
        if (q->pgo_idle == TRUE) {
-               q->pgo_idle = FALSE;
-               thread_wakeup((event_t) &q->pgo_pending);
+               q->pgo_idle = FALSE;
+               thread_wakeup((event_t) &q->pgo_pending);
        }
+       VM_PAGE_CHECK(m);
 }
 
 
-unsigned long vm_pageout_throttle_up_count = 0;
-
 /*
- * A page is back from laundry.  See if there are some pages waiting to
+ * A page is back from laundry or we are stealing it back from
+ * the laundering state.  See if there are some pages waiting to
  * go to laundry and if we can let some of them go now.
  *
  * Object and page queues must be locked.
  */
 void
 vm_pageout_throttle_up(
-       vm_page_t       m)
+       vm_page_t       m)
 {
-        struct vm_pageout_queue *q;
+       struct vm_pageout_queue *q;
+       vm_object_t      m_object;
+
+       m_object = VM_PAGE_OBJECT(m);
+
+       assert(m_object != VM_OBJECT_NULL);
+       assert(m_object != kernel_object);
+
+       LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+       vm_object_lock_assert_exclusive(m_object);
+
+       if (m_object->internal == TRUE) {
+               q = &vm_pageout_queue_internal;
+       } else {
+               q = &vm_pageout_queue_external;
+       }
+
+       if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
+               vm_page_queue_remove(&q->pgo_pending, m, vmp_pageq);
+               m->vmp_q_state = VM_PAGE_NOT_ON_Q;
+
+               VM_PAGE_ZERO_PAGEQ_ENTRY(m);
+
+               vm_object_activity_end(m_object);
+
+               VM_PAGEOUT_DEBUG(vm_page_steal_pageout_page, 1);
+       }
+       if (m->vmp_laundry == TRUE) {
+               m->vmp_laundry = FALSE;
+               q->pgo_laundry--;
+
+               if (q->pgo_throttled == TRUE) {
+                       q->pgo_throttled = FALSE;
+                       thread_wakeup((event_t) &q->pgo_laundry);
+               }
+               if (q->pgo_draining == TRUE && q->pgo_laundry == 0) {
+                       q->pgo_draining = FALSE;
+                       thread_wakeup((event_t) (&q->pgo_laundry + 1));
+               }
+               VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count, 1);
+       }
+}
 
-       vm_pageout_throttle_up_count++;
 
-       assert(m->laundry);
-       assert(m->object != VM_OBJECT_NULL);
-       assert(m->object != kernel_object);
+static void
+vm_pageout_throttle_up_batch(
+       struct vm_pageout_queue *q,
+       int             batch_cnt)
+{
+       LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
 
-       if (m->object->internal == TRUE)
-               q = &vm_pageout_queue_internal;
-       else
-               q = &vm_pageout_queue_external;
+       VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count, batch_cnt);
 
-       m->laundry = FALSE;
-       q->pgo_laundry--;
+       q->pgo_laundry -= batch_cnt;
 
        if (q->pgo_throttled == TRUE) {
-               q->pgo_throttled = FALSE;
-               thread_wakeup((event_t) &q->pgo_laundry);
+               q->pgo_throttled = FALSE;
+               thread_wakeup((event_t) &q->pgo_laundry);
+       }
+       if (q->pgo_draining == TRUE && q->pgo_laundry == 0) {
+               q->pgo_draining = FALSE;
+               thread_wakeup((event_t) (&q->pgo_laundry + 1));
        }
 }
 
 
+
 /*
- *     vm_pageout_scan does the dirty work for the pageout daemon.
- *     It returns with vm_page_queue_free_lock held and
- *     vm_page_free_wanted == 0.
+ * VM memory pressure monitoring.
+ *
+ * vm_pageout_scan() keeps track of the number of pages it considers and
+ * reclaims, in the currently active vm_pageout_stat[vm_pageout_stat_now].
+ *
+ * compute_memory_pressure() is called every second from compute_averages()
+ * and moves "vm_pageout_stat_now" forward, to start accumulating the number
+ * of recalimed pages in a new vm_pageout_stat[] bucket.
+ *
+ * mach_vm_pressure_monitor() collects past statistics about memory pressure.
+ * The caller provides the number of seconds ("nsecs") worth of statistics
+ * it wants, up to 30 seconds.
+ * It computes the number of pages reclaimed in the past "nsecs" seconds and
+ * also returns the number of pages the system still needs to reclaim at this
+ * moment in time.
  */
+#if DEVELOPMENT || DEBUG
+#define VM_PAGEOUT_STAT_SIZE    (30 * 8) + 1
+#else
+#define VM_PAGEOUT_STAT_SIZE    (1 * 8) + 1
+#endif
+struct vm_pageout_stat {
+       unsigned long vm_page_active_count;
+       unsigned long vm_page_speculative_count;
+       unsigned long vm_page_inactive_count;
+       unsigned long vm_page_anonymous_count;
 
-#define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT  (3 * MAX_UPL_TRANSFER)
-
-#define        FCS_IDLE                0
-#define FCS_DELAYED            1
-#define FCS_DEADLOCK_DETECTED  2
-
-struct flow_control {
-        int            state;
-        mach_timespec_t        ts;
-};
+       unsigned long vm_page_free_count;
+       unsigned long vm_page_wire_count;
+       unsigned long vm_page_compressor_count;
 
-void
-vm_pageout_scan(void)
-{
-       unsigned int loop_count = 0;
-       unsigned int inactive_burst_count = 0;
-       unsigned int active_burst_count = 0;
-       unsigned int reactivated_this_call;
-       unsigned int reactivate_limit;
-       vm_page_t   local_freeq = NULL;
-       int         local_freed = 0;
-       int         delayed_unlock;
-       int         need_internal_inactive = 0;
-       int         refmod_state = 0;
-        int    vm_pageout_deadlock_target = 0;
-       struct  vm_pageout_queue *iq;
-       struct  vm_pageout_queue *eq;
-        struct vm_speculative_age_q *sq;
-       struct  flow_control    flow_control;
-        boolean_t inactive_throttled = FALSE;
-       boolean_t try_failed;
-       mach_timespec_t         ts;
-       unsigned int msecs = 0;
-       vm_object_t     object;
-       vm_object_t     last_object_tried;
-       int     zf_ratio;
-       int     zf_run_count;
-       uint32_t        catch_up_count = 0;
-       uint32_t        inactive_reclaim_run;
-       boolean_t       forced_reclaim;
+       unsigned long vm_page_pages_compressed;
+       unsigned long vm_page_pageable_internal_count;
+       unsigned long vm_page_pageable_external_count;
+       unsigned long vm_page_xpmapped_external_count;
 
-       flow_control.state = FCS_IDLE;
-       iq = &vm_pageout_queue_internal;
-       eq = &vm_pageout_queue_external;
-       sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
+       unsigned int pages_grabbed;
+       unsigned int pages_freed;
 
+       unsigned int pages_compressed;
+       unsigned int pages_grabbed_by_compressor;
+       unsigned int failed_compressions;
 
-        XPR(XPR_VM_PAGEOUT, "vm_pageout_scan\n", 0, 0, 0, 0, 0);
+       unsigned int pages_evicted;
+       unsigned int pages_purged;
 
-        
-       vm_page_lock_queues();
-       delayed_unlock = 1;     /* must be nonzero if Qs are locked, 0 if unlocked */
+       unsigned int considered;
+       unsigned int considered_bq_internal;
+       unsigned int considered_bq_external;
 
-       /*
-        *      Calculate the max number of referenced pages on the inactive
-        *      queue that we will reactivate.
-        */
-       reactivated_this_call = 0;
-       reactivate_limit = VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count +
-                                                   vm_page_inactive_count);
-       inactive_reclaim_run = 0;
+       unsigned int skipped_external;
+       unsigned int filecache_min_reactivations;
 
+       unsigned int freed_speculative;
+       unsigned int freed_cleaned;
+       unsigned int freed_internal;
+       unsigned int freed_external;
 
-/*???*/        /*
-        *      We want to gradually dribble pages from the active queue
-        *      to the inactive queue.  If we let the inactive queue get
-        *      very small, and then suddenly dump many pages into it,
-        *      those pages won't get a sufficient chance to be referenced
-        *      before we start taking them from the inactive queue.
-        *
-        *      We must limit the rate at which we send pages to the pagers.
-        *      data_write messages consume memory, for message buffers and
-        *      for map-copy objects.  If we get too far ahead of the pagers,
-        *      we can potentially run out of memory.
-        *
-        *      We can use the laundry count to limit directly the number
-        *      of pages outstanding to the default pager.  A similar
-        *      strategy for external pagers doesn't work, because
-        *      external pagers don't have to deallocate the pages sent them,
-        *      and because we might have to send pages to external pagers
-        *      even if they aren't processing writes.  So we also
-        *      use a burst count to limit writes to external pagers.
-        *
-        *      When memory is very tight, we can't rely on external pagers to
-        *      clean pages.  They probably aren't running, because they
-        *      aren't vm-privileged.  If we kept sending dirty pages to them,
-        *      we could exhaust the free list.
-        */
+       unsigned int cleaned_dirty_external;
+       unsigned int cleaned_dirty_internal;
 
+       unsigned int inactive_referenced;
+       unsigned int inactive_nolock;
+       unsigned int reactivation_limit_exceeded;
+       unsigned int forced_inactive_reclaim;
 
-Restart:
-       assert(delayed_unlock!=0);
-       
-       /*
-        *      A page is "zero-filled" if it was not paged in from somewhere,
-        *      and it belongs to an object at least VM_ZF_OBJECT_SIZE_THRESHOLD big.
-        *      Recalculate the zero-filled page ratio.  We use this to apportion
-        *      victimized pages between the normal and zero-filled inactive
-        *      queues according to their relative abundance in memory.  Thus if a task
-        *      is flooding memory with zf pages, we begin to hunt them down.
-        *      It would be better to throttle greedy tasks at a higher level,
-        *      but at the moment mach vm cannot do this.
-        */
-       {
-               uint32_t  total  = vm_page_active_count + vm_page_inactive_count;
-               uint32_t  normal = total - vm_zf_count;
-               
-               /* zf_ratio is the number of zf pages we victimize per normal page */
-               
-               if (vm_zf_count < vm_accellerate_zf_pageout_trigger)
-                       zf_ratio = 0;
-               else if ((vm_zf_count <= normal) || (normal == 0))
-                       zf_ratio = 1;
-               else 
-                       zf_ratio = vm_zf_count / normal;
-                       
-               zf_run_count = 0;
-       }
-        
-       /*
-        *      Recalculate vm_page_inactivate_target.
-        */
-       vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
-                                                         vm_page_inactive_count +
-                                                         vm_page_speculative_count);
-       /*
-        * don't want to wake the pageout_scan thread up everytime we fall below
-        * the targets... set a low water mark at 0.25% below the target
-        */
-       vm_page_inactive_min = vm_page_inactive_target - (vm_page_inactive_target / 400);
-
-       vm_page_speculative_target = VM_PAGE_SPECULATIVE_TARGET(vm_page_active_count +
-                                                               vm_page_inactive_count);
-       object = NULL;
-       last_object_tried = NULL;
-       try_failed = FALSE;
-       
-       if ((vm_page_inactive_count + vm_page_speculative_count) < VM_PAGE_INACTIVE_HEALTHY_LIMIT(vm_page_active_count))
-               catch_up_count = vm_page_inactive_count + vm_page_speculative_count;
-       else
-               catch_up_count = 0;
-                   
-       for (;;) {
-               vm_page_t m;
+       unsigned int throttled_internal_q;
+       unsigned int throttled_external_q;
 
-               DTRACE_VM2(rev, int, 1, (uint64_t *), NULL);
+       unsigned int phantom_ghosts_found;
+       unsigned int phantom_ghosts_added;
+} vm_pageout_stats[VM_PAGEOUT_STAT_SIZE] = {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, };
 
-               if (delayed_unlock == 0) {
-                       vm_page_lock_queues();
-                       delayed_unlock = 1;
-               }
+unsigned int vm_pageout_stat_now = 0;
 
-               /*
-                *      Don't sweep through active queue more than the throttle
-                *      which should be kept relatively low
-                */
-               active_burst_count = MIN(vm_pageout_burst_active_throttle, vm_page_active_count);
+#define VM_PAGEOUT_STAT_BEFORE(i) \
+       (((i) == 0) ? VM_PAGEOUT_STAT_SIZE - 1 : (i) - 1)
+#define VM_PAGEOUT_STAT_AFTER(i) \
+       (((i) == VM_PAGEOUT_STAT_SIZE - 1) ? 0 : (i) + 1)
 
-               /*
-                *      Move pages from active to inactive.
-                */
-               if (need_internal_inactive == 0 && (vm_page_inactive_count + vm_page_speculative_count) >= vm_page_inactive_target)
-                       goto done_moving_active_pages;
+#if VM_PAGE_BUCKETS_CHECK
+int vm_page_buckets_check_interval = 80; /* in eighths of a second */
+#endif /* VM_PAGE_BUCKETS_CHECK */
 
-               while (!queue_empty(&vm_page_queue_active) &&
-                      (need_internal_inactive || active_burst_count)) {
 
-                       if (active_burst_count)
-                              active_burst_count--;
+void
+record_memory_pressure(void);
+void
+record_memory_pressure(void)
+{
+       unsigned int vm_pageout_next;
 
-                       vm_pageout_active++;
+#if VM_PAGE_BUCKETS_CHECK
+       /* check the consistency of VM page buckets at regular interval */
+       static int counter = 0;
+       if ((++counter % vm_page_buckets_check_interval) == 0) {
+               vm_page_buckets_check();
+       }
+#endif /* VM_PAGE_BUCKETS_CHECK */
 
-                       m = (vm_page_t) queue_first(&vm_page_queue_active);
+       vm_pageout_state.vm_memory_pressure =
+           vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_speculative +
+           vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_cleaned +
+           vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_internal +
+           vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_external;
 
-                       assert(m->active && !m->inactive);
-                       assert(!m->laundry);
-                       assert(m->object != kernel_object);
-                       assert(m->phys_page != vm_page_guard_addr);
+       commpage_set_memory_pressure((unsigned int)vm_pageout_state.vm_memory_pressure );
 
-                       DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
+       /* move "now" forward */
+       vm_pageout_next = VM_PAGEOUT_STAT_AFTER(vm_pageout_stat_now);
 
-                       /*
-                        * Try to lock object; since we've already got the
-                        * page queues lock, we can only 'try' for this one.
-                        * if the 'try' fails, we need to do a mutex_pause
-                        * to allow the owner of the object lock a chance to
-                        * run... otherwise, we're likely to trip over this
-                        * object in the same state as we work our way through
-                        * the queue... clumps of pages associated with the same
-                        * object are fairly typical on the inactive and active queues
-                        */
-                       if (m->object != object) {
-                               if (object != NULL) {
-                                       vm_object_unlock(object);
-                                       object = NULL;
-                                       vm_pageout_scan_wants_object = VM_OBJECT_NULL;
-                               }
-                               if (!vm_object_lock_try_scan(m->object)) {
-                                       /*
-                                        * move page to end of active queue and continue
-                                        */
-                                       queue_remove(&vm_page_queue_active, m,
-                                                    vm_page_t, pageq);
-                                       queue_enter(&vm_page_queue_active, m,
-                                                   vm_page_t, pageq);
-
-                                       try_failed = TRUE;
-                                       
-                                       m = (vm_page_t) queue_first(&vm_page_queue_active);
-                                       /*
-                                        * this is the next object we're going to be interested in
-                                        * try to make sure its available after the mutex_yield
-                                        * returns control
-                                        */
-                                       vm_pageout_scan_wants_object = m->object;
+       bzero(&vm_pageout_stats[vm_pageout_next], sizeof(struct vm_pageout_stat));
 
-                                       goto done_with_activepage;
-                               }
-                               object = m->object;
+       vm_pageout_stat_now = vm_pageout_next;
+}
 
-                               try_failed = FALSE;
-                       }
 
-                       /*
-                        * if the page is BUSY, then we pull it
-                        * off the active queue and leave it alone.
-                        * when BUSY is cleared, it will get stuck
-                        * back on the appropriate queue
-                        */
-                       if (m->busy) {
-                               queue_remove(&vm_page_queue_active, m,
-                                            vm_page_t, pageq);
-                               m->pageq.next = NULL;
-                               m->pageq.prev = NULL;
+/*
+ * IMPORTANT
+ * mach_vm_ctl_page_free_wanted() is called indirectly, via
+ * mach_vm_pressure_monitor(), when taking a stackshot. Therefore,
+ * it must be safe in the restricted stackshot context. Locks and/or
+ * blocking are not allowable.
+ */
+unsigned int
+mach_vm_ctl_page_free_wanted(void)
+{
+       unsigned int page_free_target, page_free_count, page_free_wanted;
 
-                               if (!m->fictitious)
-                                       vm_page_active_count--;
-                               m->active = FALSE;
+       page_free_target = vm_page_free_target;
+       page_free_count = vm_page_free_count;
+       if (page_free_target > page_free_count) {
+               page_free_wanted = page_free_target - page_free_count;
+       } else {
+               page_free_wanted = 0;
+       }
 
-                               goto done_with_activepage;
-                       }
+       return page_free_wanted;
+}
 
-                       /*
-                        *      Deactivate the page while holding the object
-                        *      locked, so we know the page is still not busy.
-                        *      This should prevent races between pmap_enter
-                        *      and pmap_clear_reference.  The page might be
-                        *      absent or fictitious, but vm_page_deactivate
-                        *      can handle that.
-                        */
-                       vm_page_deactivate(m);
 
-                       if (need_internal_inactive) {
-                               vm_pageout_scan_active_throttle_success++;
-                               need_internal_inactive--;
-                       }
-done_with_activepage:
-                       if (delayed_unlock++ > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT || try_failed == TRUE) {
+/*
+ * IMPORTANT:
+ * mach_vm_pressure_monitor() is called when taking a stackshot, with
+ * wait_for_pressure FALSE, so that code path must remain safe in the
+ * restricted stackshot context. No blocking or locks are allowable.
+ * on that code path.
+ */
 
-                               if (object != NULL) {
-                                       vm_object_unlock(object);
-                                       object = NULL;
-                                       vm_pageout_scan_wants_object = VM_OBJECT_NULL;
-                               }
-                               if (local_freeq) {
-                                       vm_page_free_list(local_freeq);
-                                       
-                                       local_freeq = NULL;
-                                       local_freed = 0;
-                               }
-                               mutex_yield(&vm_page_queue_lock);
+kern_return_t
+mach_vm_pressure_monitor(
+       boolean_t       wait_for_pressure,
+       unsigned int    nsecs_monitored,
+       unsigned int    *pages_reclaimed_p,
+       unsigned int    *pages_wanted_p)
+{
+       wait_result_t   wr;
+       unsigned int    vm_pageout_then, vm_pageout_now;
+       unsigned int    pages_reclaimed;
+       unsigned int    units_of_monitor;
 
-                               delayed_unlock = 1;
+       units_of_monitor = 8 * nsecs_monitored;
+       /*
+        * We don't take the vm_page_queue_lock here because we don't want
+        * vm_pressure_monitor() to get in the way of the vm_pageout_scan()
+        * thread when it's trying to reclaim memory.  We don't need fully
+        * accurate monitoring anyway...
+        */
 
+       if (wait_for_pressure) {
+               /* wait until there's memory pressure */
+               while (vm_page_free_count >= vm_page_free_target) {
+                       wr = assert_wait((event_t) &vm_page_free_wanted,
+                           THREAD_INTERRUPTIBLE);
+                       if (wr == THREAD_WAITING) {
+                               wr = thread_block(THREAD_CONTINUE_NULL);
+                       }
+                       if (wr == THREAD_INTERRUPTED) {
+                               return KERN_ABORTED;
+                       }
+                       if (wr == THREAD_AWAKENED) {
                                /*
-                                * continue the while loop processing
-                                * the active queue... need to hold
-                                * the page queues lock
+                                * The memory pressure might have already
+                                * been relieved but let's not block again
+                                * and let's report that there was memory
+                                * pressure at some point.
                                 */
+                               break;
                        }
                }
+       }
 
+       /* provide the number of pages the system wants to reclaim */
+       if (pages_wanted_p != NULL) {
+               *pages_wanted_p = mach_vm_ctl_page_free_wanted();
+       }
 
+       if (pages_reclaimed_p == NULL) {
+               return KERN_SUCCESS;
+       }
 
-               /**********************************************************************
-                * above this point we're playing with the active queue
-                * below this point we're playing with the throttling mechanisms
-                * and the inactive queue
-                **********************************************************************/
+       /* provide number of pages reclaimed in the last "nsecs_monitored" */
+       vm_pageout_now = vm_pageout_stat_now;
+       pages_reclaimed = 0;
+       for (vm_pageout_then =
+           VM_PAGEOUT_STAT_BEFORE(vm_pageout_now);
+           vm_pageout_then != vm_pageout_now &&
+           units_of_monitor-- != 0;
+           vm_pageout_then =
+           VM_PAGEOUT_STAT_BEFORE(vm_pageout_then)) {
+               pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_speculative;
+               pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_cleaned;
+               pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_internal;
+               pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_external;
+       }
+       *pages_reclaimed_p = pages_reclaimed;
 
-done_moving_active_pages:
+       return KERN_SUCCESS;
+}
 
-               /*
-                *      We are done if we have met our target *and*
-                *      nobody is still waiting for a page.
-                */
-               if (vm_page_free_count + local_freed >= vm_page_free_target) {
-                       if (object != NULL) {
-                               vm_object_unlock(object);
-                               object = NULL;
-                       }
-                       vm_pageout_scan_wants_object = VM_OBJECT_NULL;
 
-                       if (local_freeq) {
-                               vm_page_free_list(local_freeq);
-                                       
-                               local_freeq = NULL;
-                               local_freed = 0;
-                       }
-                       /*
-                        * inactive target still not met... keep going
-                        * until we get the queues balanced
-                        */
 
-                       /*
-                        *      Recalculate vm_page_inactivate_target.
-                        */
-                       vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
-                                                                         vm_page_inactive_count +
-                                                                         vm_page_speculative_count);
-
-#ifndef        CONFIG_EMBEDDED
-                       /*
-                        * XXX: if no active pages can be reclaimed, pageout scan can be stuck trying 
-                        *      to balance the queues
-                        */
-                       if (((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) &&
-                           !queue_empty(&vm_page_queue_active))
-                               continue;
-#endif
+#if DEVELOPMENT || DEBUG
 
-                       mutex_lock(&vm_page_queue_free_lock);
+static void
+vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t *, int);
 
-                       if ((vm_page_free_count >= vm_page_free_target) &&
-                           (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
+/*
+ * condition variable used to make sure there is
+ * only a single sweep going on at a time
+ */
+boolean_t       vm_pageout_disconnect_all_pages_active = FALSE;
 
-                               vm_page_unlock_queues();
 
-                               thread_wakeup((event_t) &vm_pageout_garbage_collect);
+void
+vm_pageout_disconnect_all_pages()
+{
+       vm_page_lock_queues();
 
-                               assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
+       if (vm_pageout_disconnect_all_pages_active == TRUE) {
+               vm_page_unlock_queues();
+               return;
+       }
+       vm_pageout_disconnect_all_pages_active = TRUE;
+       vm_page_unlock_queues();
 
-                               return;
-                       }
-                       mutex_unlock(&vm_page_queue_free_lock);
-               }
-               /*
-                * Before anything, we check if we have any ripe volatile objects around.
-                * If so, purge the first and see what it gives us.
-                */
-               assert (available_for_purge>=0);
-               if (available_for_purge)
-               {
-                       if (object != NULL) {
-                               vm_object_unlock(object);
-                               object = NULL;
-                       }
-                       vm_purgeable_object_purge_one();
-                       continue;
-               }
-        
-               if (queue_empty(&sq->age_q) && vm_page_speculative_count) {
-                       /*
-                        * try to pull pages from the aging bins
-                        * see vm_page.h for an explanation of how
-                        * this mechanism works
-                        */
-                       struct vm_speculative_age_q     *aq;
-                       mach_timespec_t ts_fully_aged;
-                       boolean_t       can_steal = FALSE;
-                      
-                       aq = &vm_page_queue_speculative[speculative_steal_index];
+       vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_throttled, vm_page_throttled_count);
+       vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_anonymous, vm_page_anonymous_count);
+       vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_active, vm_page_active_count);
 
-                       while (queue_empty(&aq->age_q)) {
+       vm_pageout_disconnect_all_pages_active = FALSE;
+}
 
-                               speculative_steal_index++;
 
-                               if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q)
-                                       speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
-                               
-                               aq = &vm_page_queue_speculative[speculative_steal_index];
-                       }
-                       if (vm_page_speculative_count > vm_page_speculative_target)
-                               can_steal = TRUE;
-                       else {
-                               ts_fully_aged.tv_sec = (VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS) / 1000;
-                               ts_fully_aged.tv_nsec = ((VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS) % 1000)
-                                                     * 1000 * NSEC_PER_USEC;
+void
+vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t *q, int qcount)
+{
+       vm_page_t       m;
+       vm_object_t     t_object = NULL;
+       vm_object_t     l_object = NULL;
+       vm_object_t     m_object = NULL;
+       int             delayed_unlock = 0;
+       int             try_failed_count = 0;
+       int             disconnected_count = 0;
+       int             paused_count = 0;
+       int             object_locked_count = 0;
+
+       KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_ALL_PAGE_MAPPINGS)) | DBG_FUNC_START,
+           q, qcount, 0, 0, 0);
 
-                               ADD_MACH_TIMESPEC(&ts_fully_aged, &aq->age_ts);
+       vm_page_lock_queues();
 
-                               clock_get_system_nanotime(&ts.tv_sec, (unsigned *)&ts.tv_nsec);
+       while (qcount && !vm_page_queue_empty(q)) {
+               LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
 
-                               if (CMP_MACH_TIMESPEC(&ts, &ts_fully_aged) >= 0)
-                                       can_steal = TRUE;
-                       }
-                       if (can_steal == TRUE)
-                               vm_page_speculate_ageit(aq);
-               }
+               m = (vm_page_t) vm_page_queue_first(q);
+               m_object = VM_PAGE_OBJECT(m);
 
                /*
-                * Sometimes we have to pause:
-                *      1) No inactive pages - nothing to do.
-                *      2) Flow control - default pageout queue is full
-                *      3) Loop control - no acceptable pages found on the inactive queue
-                *         within the last vm_pageout_burst_inactive_throttle iterations
+                * check to see if we currently are working
+                * with the same object... if so, we've
+                * already got the lock
                 */
-               if (queue_empty(&vm_page_queue_inactive) && queue_empty(&vm_page_queue_zf) && queue_empty(&sq->age_q) &&
-                   (VM_PAGE_Q_THROTTLED(iq) || queue_empty(&vm_page_queue_throttled))) {
-                       vm_pageout_scan_empty_throttle++;
-                       msecs = vm_pageout_empty_wait;
-                       goto vm_pageout_scan_delay;
-
-               } else if (inactive_burst_count >=
-                          MIN(vm_pageout_burst_inactive_throttle,
-                              (vm_page_inactive_count +
-                               vm_page_speculative_count))) {
-                       vm_pageout_scan_burst_throttle++;
-                       msecs = vm_pageout_burst_wait;
-                       goto vm_pageout_scan_delay;
-
-               } else if (VM_PAGE_Q_THROTTLED(iq) && IP_VALID(memory_manager_default)) {
-
-                       switch (flow_control.state) {
-
-                       case FCS_IDLE:
-reset_deadlock_timer:
-                               ts.tv_sec = vm_pageout_deadlock_wait / 1000;
-                               ts.tv_nsec = (vm_pageout_deadlock_wait % 1000) * 1000 * NSEC_PER_USEC;
-                               clock_get_system_nanotime(&flow_control.ts.tv_sec,
-                                                         (unsigned *)&flow_control.ts.tv_nsec);
-                               ADD_MACH_TIMESPEC(&flow_control.ts, &ts);
-                               
-                               flow_control.state = FCS_DELAYED;
-                               msecs = vm_pageout_deadlock_wait;
-
-                               break;
-                                       
-                       case FCS_DELAYED:
-                               clock_get_system_nanotime(&ts.tv_sec,
-                                                         (unsigned *)&ts.tv_nsec);
-
-                               if (CMP_MACH_TIMESPEC(&ts, &flow_control.ts) >= 0) {
-                                       /*
-                                        * the pageout thread for the default pager is potentially
-                                        * deadlocked since the 
-                                        * default pager queue has been throttled for more than the
-                                        * allowable time... we need to move some clean pages or dirty
-                                        * pages belonging to the external pagers if they aren't throttled
-                                        * vm_page_free_wanted represents the number of threads currently
-                                        * blocked waiting for pages... we'll move one page for each of
-                                        * these plus a fixed amount to break the logjam... once we're done
-                                        * moving this number of pages, we'll re-enter the FSC_DELAYED state
-                                        * with a new timeout target since we have no way of knowing 
-                                        * whether we've broken the deadlock except through observation
-                                        * of the queue associated with the default pager... we need to
-                                        * stop moving pages and allow the system to run to see what
-                                        * state it settles into.
-                                        */
-                                       vm_pageout_deadlock_target = vm_pageout_deadlock_relief + vm_page_free_wanted + vm_page_free_wanted_privileged;
-                                       vm_pageout_scan_deadlock_detected++;
-                                       flow_control.state = FCS_DEADLOCK_DETECTED;
+               if (m_object != l_object) {
+                       /*
+                        * the object associated with candidate page is
+                        * different from the one we were just working
+                        * with... dump the lock if we still own it
+                        */
+                       if (l_object != NULL) {
+                               vm_object_unlock(l_object);
+                               l_object = NULL;
+                       }
+                       if (m_object != t_object) {
+                               try_failed_count = 0;
+                       }
 
-                                       thread_wakeup((event_t) &vm_pageout_garbage_collect);
-                                       goto consider_inactive;
+                       /*
+                        * Try to lock object; since we've alread got the
+                        * page queues lock, we can only 'try' for this one.
+                        * if the 'try' fails, we need to do a mutex_pause
+                        * to allow the owner of the object lock a chance to
+                        * run...
+                        */
+                       if (!vm_object_lock_try_scan(m_object)) {
+                               if (try_failed_count > 20) {
+                                       goto reenter_pg_on_q;
                                }
-                               /*
-                                * just resniff instead of trying
-                                * to compute a new delay time... we're going to be
-                                * awakened immediately upon a laundry completion,
-                                * so we won't wait any longer than necessary
-                                */
-                               msecs = vm_pageout_idle_wait;
-                               break;
-
-                       case FCS_DEADLOCK_DETECTED:
-                               if (vm_pageout_deadlock_target)
-                                       goto consider_inactive;
-                               goto reset_deadlock_timer;
+                               vm_page_unlock_queues();
+                               mutex_pause(try_failed_count++);
+                               vm_page_lock_queues();
+                               delayed_unlock = 0;
 
-                       }
-                       vm_pageout_scan_throttle++;
-                       iq->pgo_throttled = TRUE;
-vm_pageout_scan_delay:
-                       if (object != NULL) {
-                               vm_object_unlock(object);
-                               object = NULL;
-                       }
-                       vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+                               paused_count++;
 
-                       if (local_freeq) {
-                               vm_page_free_list(local_freeq);
-                                       
-                               local_freeq = NULL;
-                               local_freed = 0;
+                               t_object = m_object;
+                               continue;
                        }
-#if CONFIG_EMBEDDED
-                       {
-                       int percent_avail;
+                       object_locked_count++;
 
+                       l_object = m_object;
+               }
+               if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || m->vmp_error || m->vmp_free_when_done) {
                        /*
-                        * Decide if we need to send a memory status notification.
+                        * put it back on the head of its queue
                         */
-                       percent_avail = 
-                               (vm_page_active_count + vm_page_inactive_count + 
-                                vm_page_speculative_count + vm_page_free_count +
-                                (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
-                               atop_64(max_mem);
-                       if (percent_avail >= (kern_memorystatus_level + 5) || 
-                           percent_avail <= (kern_memorystatus_level - 5)) {
-                               kern_memorystatus_level = percent_avail;
-                               thread_wakeup((event_t)&kern_memorystatus_wakeup);
-                       }
+                       goto reenter_pg_on_q;
+               }
+               if (m->vmp_pmapped == TRUE) {
+                       pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
+
+                       disconnected_count++;
+               }
+reenter_pg_on_q:
+               vm_page_queue_remove(q, m, vmp_pageq);
+               vm_page_queue_enter(q, m, vmp_pageq);
+
+               qcount--;
+               try_failed_count = 0;
+
+               if (delayed_unlock++ > 128) {
+                       if (l_object != NULL) {
+                               vm_object_unlock(l_object);
+                               l_object = NULL;
                        }
+                       lck_mtx_yield(&vm_page_queue_lock);
+                       delayed_unlock = 0;
+               }
+       }
+       if (l_object != NULL) {
+               vm_object_unlock(l_object);
+               l_object = NULL;
+       }
+       vm_page_unlock_queues();
+
+       KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_ALL_PAGE_MAPPINGS)) | DBG_FUNC_END,
+           q, disconnected_count, object_locked_count, paused_count, 0);
+}
+
 #endif
-                       assert_wait_timeout((event_t) &iq->pgo_laundry, THREAD_INTERRUPTIBLE, msecs, 1000*NSEC_PER_USEC);
 
-                       counter(c_vm_pageout_scan_block++);
 
-                       vm_page_unlock_queues();
+static void
+vm_pageout_page_queue(vm_page_queue_head_t *, int);
 
-                       assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
-                               
-                       thread_block(THREAD_CONTINUE_NULL);
+/*
+ * condition variable used to make sure there is
+ * only a single sweep going on at a time
+ */
+boolean_t       vm_pageout_anonymous_pages_active = FALSE;
 
-                       vm_page_lock_queues();
-                       delayed_unlock = 1;
 
-                       iq->pgo_throttled = FALSE;
+void
+vm_pageout_anonymous_pages()
+{
+       if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
+               vm_page_lock_queues();
 
-                       if (loop_count >= vm_page_inactive_count)
-                               loop_count = 0;
-                       inactive_burst_count = 0;
+               if (vm_pageout_anonymous_pages_active == TRUE) {
+                       vm_page_unlock_queues();
+                       return;
+               }
+               vm_pageout_anonymous_pages_active = TRUE;
+               vm_page_unlock_queues();
+
+               vm_pageout_page_queue(&vm_page_queue_throttled, vm_page_throttled_count);
+               vm_pageout_page_queue(&vm_page_queue_anonymous, vm_page_anonymous_count);
+               vm_pageout_page_queue(&vm_page_queue_active, vm_page_active_count);
 
-                       goto Restart;
-                       /*NOTREACHED*/
+               if (VM_CONFIG_SWAP_IS_PRESENT) {
+                       vm_consider_swapping();
                }
 
+               vm_page_lock_queues();
+               vm_pageout_anonymous_pages_active = FALSE;
+               vm_page_unlock_queues();
+       }
+}
 
-               flow_control.state = FCS_IDLE;
-consider_inactive:
-               loop_count++;
-               inactive_burst_count++;
-               vm_pageout_inactive++;
 
-               /* Choose a victim. */
-               
-               while (1) {     
-                       m = NULL;
-                       
-                       /*
-                        * the most eligible pages are ones that were throttled because the
-                        * pager wasn't ready at the time.  If a pager is ready now,
-                        * see if one of these is useful.
-                        */
-                       if (!VM_PAGE_Q_THROTTLED(iq) && !queue_empty(&vm_page_queue_throttled)) {
-                               m = (vm_page_t) queue_first(&vm_page_queue_throttled);
-                               break;
-                       }
+void
+vm_pageout_page_queue(vm_page_queue_head_t *q, int qcount)
+{
+       vm_page_t       m;
+       vm_object_t     t_object = NULL;
+       vm_object_t     l_object = NULL;
+       vm_object_t     m_object = NULL;
+       int             delayed_unlock = 0;
+       int             try_failed_count = 0;
+       int             refmod_state;
+       int             pmap_options;
+       struct          vm_pageout_queue *iq;
+       ppnum_t         phys_page;
 
-                       /*
-                        * The second most eligible pages are ones we paged in speculatively,
-                        * but which have not yet been touched.
-                        */
-                       if ( !queue_empty(&sq->age_q) ) {
-                               m = (vm_page_t) queue_first(&sq->age_q);
-                               break;
-                       }
-                       /*
-                        * Time for a zero-filled inactive page?
-                        */
-                       if ( ((zf_run_count < zf_ratio) && vm_zf_queue_count >= zf_queue_min_count) ||
-                            queue_empty(&vm_page_queue_inactive)) {
-                               if ( !queue_empty(&vm_page_queue_zf) ) {
-                                       m = (vm_page_t) queue_first(&vm_page_queue_zf);
-                                       zf_run_count++;
-                                       break;
-                               }
+
+       iq = &vm_pageout_queue_internal;
+
+       vm_page_lock_queues();
+
+       while (qcount && !vm_page_queue_empty(q)) {
+               LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+
+               if (VM_PAGE_Q_THROTTLED(iq)) {
+                       if (l_object != NULL) {
+                               vm_object_unlock(l_object);
+                               l_object = NULL;
                        }
-                       /*
-                        * It's either a normal inactive page or nothing.
-                        */
-                        if ( !queue_empty(&vm_page_queue_inactive) ) {
-                                m = (vm_page_t) queue_first(&vm_page_queue_inactive);
-                                zf_run_count = 0;
-                               break;
-                        }
+                       iq->pgo_draining = TRUE;
 
-                        panic("vm_pageout: no victim");
-               }
+                       assert_wait((event_t) (&iq->pgo_laundry + 1), THREAD_INTERRUPTIBLE);
+                       vm_page_unlock_queues();
 
-               assert(!m->active && (m->inactive || m->speculative || m->throttled));
-               assert(!m->laundry);
-               assert(m->object != kernel_object);
-               assert(m->phys_page != vm_page_guard_addr);
+                       thread_block(THREAD_CONTINUE_NULL);
 
-               DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
+                       vm_page_lock_queues();
+                       delayed_unlock = 0;
+                       continue;
+               }
+               m = (vm_page_t) vm_page_queue_first(q);
+               m_object = VM_PAGE_OBJECT(m);
 
                /*
                 * check to see if we currently are working
                 * with the same object... if so, we've
                 * already got the lock
                 */
-               if (m->object != object) {
-                       /*
-                        * the object associated with candidate page is 
+               if (m_object != l_object) {
+                       if (!m_object->internal) {
+                               goto reenter_pg_on_q;
+                       }
+
+                       /*
+                        * the object associated with candidate page is
                         * different from the one we were just working
                         * with... dump the lock if we still own it
                         */
-                       if (object != NULL) {
-                               vm_object_unlock(object);
-                               object = NULL;
-                               vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+                       if (l_object != NULL) {
+                               vm_object_unlock(l_object);
+                               l_object = NULL;
                        }
+                       if (m_object != t_object) {
+                               try_failed_count = 0;
+                       }
+
                        /*
                         * Try to lock object; since we've alread got the
                         * page queues lock, we can only 'try' for this one.
                         * if the 'try' fails, we need to do a mutex_pause
                         * to allow the owner of the object lock a chance to
-                        * run... otherwise, we're likely to trip over this
-                        * object in the same state as we work our way through
-                        * the queue... clumps of pages associated with the same
-                        * object are fairly typical on the inactive and active queues
+                        * run...
                         */
-                       if (!vm_object_lock_try_scan(m->object)) {
-                               /*
-                                *      Move page to end and continue.
-                                *      Don't re-issue ticket
-                                */
-                               if (m->zero_fill) {
-                                       queue_remove(&vm_page_queue_zf, m,
-                                                    vm_page_t, pageq);
-                                       queue_enter(&vm_page_queue_zf, m,
-                                                   vm_page_t, pageq);
-                               } else if (m->speculative) {
-                                       remque(&m->pageq);
-                                       m->speculative = FALSE;
-                                       vm_page_speculative_count--;
-                                       
-                                       /*
-                                        * move to the tail of the inactive queue
-                                        * to get it out of the way... the speculative
-                                        * queue is generally too small to depend
-                                        * on there being enough pages from other
-                                        * objects to make cycling it back on the
-                                        * same queue a winning proposition
-                                        */
-                                       queue_enter(&vm_page_queue_inactive, m,
-                                                   vm_page_t, pageq);
-                                       m->inactive = TRUE;
-                                       vm_page_inactive_count++;
-                                       token_new_pagecount++;
-                               }  else if (m->throttled) {
-                                       queue_remove(&vm_page_queue_throttled, m,
-                                                    vm_page_t, pageq);
-                                       m->throttled = FALSE;
-                                       vm_page_throttled_count--;
-                                       
-                                       /*
-                                        * not throttled any more, so can stick
-                                        * it on the inactive queue.
-                                        */
-                                       queue_enter(&vm_page_queue_inactive, m,
-                                                   vm_page_t, pageq);
-                                       m->inactive = TRUE;
-                                       vm_page_inactive_count++;
-                                       token_new_pagecount++;
-                               } else {
-                                       queue_remove(&vm_page_queue_inactive, m,
-                                                    vm_page_t, pageq);
-#if MACH_ASSERT
-                                       vm_page_inactive_count--;       /* balance for purgeable queue asserts */
-#endif
-                                       vm_purgeable_q_advance_all();
-
-                                       queue_enter(&vm_page_queue_inactive, m,
-                                                   vm_page_t, pageq);
-#if MACH_ASSERT
-                                       vm_page_inactive_count++;       /* balance for purgeable queue asserts */
-#endif
-                                       token_new_pagecount++;
-                               }
-                               pmap_clear_reference(m->phys_page);
-                               m->reference = FALSE;
-
-                               vm_pageout_inactive_nolock++;
-
-                               if ( !queue_empty(&sq->age_q) )
-                                       m = (vm_page_t) queue_first(&sq->age_q);
-                               else if ( ((zf_run_count < zf_ratio) && vm_zf_queue_count >= zf_queue_min_count) ||
-                                         queue_empty(&vm_page_queue_inactive)) {
-                                       if ( !queue_empty(&vm_page_queue_zf) )
-                                               m = (vm_page_t) queue_first(&vm_page_queue_zf);
-                               } else if ( !queue_empty(&vm_page_queue_inactive) ) {
-                                       m = (vm_page_t) queue_first(&vm_page_queue_inactive);
+                       if (!vm_object_lock_try_scan(m_object)) {
+                               if (try_failed_count > 20) {
+                                       goto reenter_pg_on_q;
                                }
-                               /*
-                                * this is the next object we're going to be interested in
-                                * try to make sure its available after the mutex_yield
-                                * returns control
-                                */
-                               vm_pageout_scan_wants_object = m->object;
-
-                               /*
-                                * force us to dump any collected free pages
-                                * and to pause before moving on
-                                */
-                               try_failed = TRUE;
+                               vm_page_unlock_queues();
+                               mutex_pause(try_failed_count++);
+                               vm_page_lock_queues();
+                               delayed_unlock = 0;
 
-                               goto done_with_inactivepage;
+                               t_object = m_object;
+                               continue;
                        }
-                       object = m->object;
-                       vm_pageout_scan_wants_object = VM_OBJECT_NULL;
-
-                       try_failed = FALSE;
+                       l_object = m_object;
                }
-
-               /*
-                *      Paging out pages of external objects which
-                *      are currently being created must be avoided.
-                *      The pager may claim for memory, thus leading to a
-                *      possible dead lock between it and the pageout thread,
-                *      if such pages are finally chosen. The remaining assumption
-                *      is that there will finally be enough available pages in the
-                *      inactive pool to page out in order to satisfy all memory
-                *      claimed by the thread which concurrently creates the pager.
-                */
-               if (!object->pager_initialized && object->pager_created) {
+               if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || m->vmp_error || m->vmp_free_when_done) {
                        /*
-                        *      Move page to end and continue, hoping that
-                        *      there will be enough other inactive pages to
-                        *      page out so that the thread which currently
-                        *      initializes the pager will succeed.
-                        *      Don't re-grant the ticket, the page should
-                        *      pulled from the queue and paged out whenever
-                        *      one of its logically adjacent fellows is
-                        *      targeted.
-                        *
-                        *      Pages found on the speculative list can never be
-                        *      in this state... they always have a pager associated
-                        *      with them.
+                        * page is not to be cleaned
+                        * put it back on the head of its queue
                         */
-                       assert(!m->speculative);
+                       goto reenter_pg_on_q;
+               }
+               phys_page = VM_PAGE_GET_PHYS_PAGE(m);
 
-                       if (m->zero_fill) {
-                               queue_remove(&vm_page_queue_zf, m,
-                                            vm_page_t, pageq);
-                               queue_enter(&vm_page_queue_zf, m,
-                                           vm_page_t, pageq);
-                       } else {
-                               queue_remove(&vm_page_queue_inactive, m,
-                                            vm_page_t, pageq);
-#if MACH_ASSERT
-                               vm_page_inactive_count--;       /* balance for purgeable queue asserts */
-#endif
-                               vm_purgeable_q_advance_all();
+               if (m->vmp_reference == FALSE && m->vmp_pmapped == TRUE) {
+                       refmod_state = pmap_get_refmod(phys_page);
 
-                               queue_enter(&vm_page_queue_inactive, m,
-                                           vm_page_t, pageq);
-#if MACH_ASSERT
-                               vm_page_inactive_count++;       /* balance for purgeable queue asserts */
-#endif
-                               token_new_pagecount++;
+                       if (refmod_state & VM_MEM_REFERENCED) {
+                               m->vmp_reference = TRUE;
+                       }
+                       if (refmod_state & VM_MEM_MODIFIED) {
+                               SET_PAGE_DIRTY(m, FALSE);
                        }
-                       vm_pageout_inactive_avoid++;
-
-                       goto done_with_inactivepage;
                }
-               /*
-                *      Remove the page from its list.
-                */
-               if (m->speculative) {
-                       remque(&m->pageq);
-                       m->speculative = FALSE;
-                       vm_page_speculative_count--;
-               } else if (m->throttled) {
-                       queue_remove(&vm_page_queue_throttled, m, vm_page_t, pageq);
-                       m->throttled = FALSE;
-                       vm_page_throttled_count--;
-               } else {
-                       if (m->zero_fill) {
-                               queue_remove(&vm_page_queue_zf, m, vm_page_t, pageq);
-                               vm_zf_queue_count--;
+               if (m->vmp_reference == TRUE) {
+                       m->vmp_reference = FALSE;
+                       pmap_clear_refmod_options(phys_page, VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
+                       goto reenter_pg_on_q;
+               }
+               if (m->vmp_pmapped == TRUE) {
+                       if (m->vmp_dirty || m->vmp_precious) {
+                               pmap_options = PMAP_OPTIONS_COMPRESSOR;
                        } else {
-                               queue_remove(&vm_page_queue_inactive, m, vm_page_t, pageq);
+                               pmap_options = PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
+                       }
+                       refmod_state = pmap_disconnect_options(phys_page, pmap_options, NULL);
+                       if (refmod_state & VM_MEM_MODIFIED) {
+                               SET_PAGE_DIRTY(m, FALSE);
                        }
-                       m->inactive = FALSE;
-                       if (!m->fictitious)
-                               vm_page_inactive_count--;
-                               vm_purgeable_q_advance_all();
                }
 
-               /* If the object is empty, the page must be reclaimed even if dirty or used. */
-               /* If the page belongs to a volatile object, we stick it back on. */
-               if (object->copy == VM_OBJECT_NULL) {
-                       if(object->purgable == VM_PURGABLE_EMPTY && !m->cleaning) {
-                               m->busy = TRUE;
-                               if (m->pmapped == TRUE) {
-                                       /* unmap the page */
-                                       refmod_state = pmap_disconnect(m->phys_page);
-                                       if (refmod_state & VM_MEM_MODIFIED) {
-                                               m->dirty = TRUE;
-                                       }
-                               }
-                               if (m->dirty || m->precious) {
-                                       /* we saved the cost of cleaning this page ! */
-                                       vm_page_purged_count++;
-                               }
-                               goto reclaim_page;
-                       }
-                       if (object->purgable == VM_PURGABLE_VOLATILE) {
-                               /* if it's wired, we can't put it on our queue */
-                               assert(m->wire_count == 0);
-                               /* just stick it back on! */
-                               goto reactivate_page;
-                       }
+               if (!m->vmp_dirty && !m->vmp_precious) {
+                       vm_page_unlock_queues();
+                       VM_PAGE_FREE(m);
+                       vm_page_lock_queues();
+                       delayed_unlock = 0;
+
+                       goto next_pg;
                }
-               m->pageq.next = NULL;
-               m->pageq.prev = NULL;
+               if (!m_object->pager_initialized || m_object->pager == MEMORY_OBJECT_NULL) {
+                       if (!m_object->pager_initialized) {
+                               vm_page_unlock_queues();
 
-               if ( !m->fictitious && catch_up_count)
-                       catch_up_count--;
+                               vm_object_collapse(m_object, (vm_object_offset_t) 0, TRUE);
 
-               /*
-                * ENCRYPTED SWAP:
-                * if this page has already been picked up as part of a
-                * page-out cluster, it will be busy because it is being
-                * encrypted (see vm_object_upl_request()).  But we still
-                * want to demote it from "clean-in-place" (aka "adjacent")
-                * to "clean-and-free" (aka "target"), so let's ignore its
-                * "busy" bit here and proceed to check for "cleaning" a
-                * little bit below...
-                */
-               if ( !m->encrypted_cleaning && (m->busy || !object->alive)) {
+                               if (!m_object->pager_initialized) {
+                                       vm_object_compressor_pager_create(m_object);
+                               }
+
+                               vm_page_lock_queues();
+                               delayed_unlock = 0;
+                       }
+                       if (!m_object->pager_initialized || m_object->pager == MEMORY_OBJECT_NULL) {
+                               goto reenter_pg_on_q;
+                       }
                        /*
-                        *      Somebody is already playing with this page.
-                        *      Leave it off the pageout queues.
-                        *
+                        * vm_object_compressor_pager_create will drop the object lock
+                        * which means 'm' may no longer be valid to use
                         */
-                       vm_pageout_inactive_busy++;
-
-                       goto done_with_inactivepage;
+                       continue;
                }
-
                /*
-                *      If it's absent or in error, we can reclaim the page.
+                * we've already factored out pages in the laundry which
+                * means this page can't be on the pageout queue so it's
+                * safe to do the vm_page_queues_remove
                 */
+               vm_page_queues_remove(m, TRUE);
 
-               if (m->absent || m->error) {
-                       vm_pageout_inactive_absent++;
-reclaim_page:
-                       if (vm_pageout_deadlock_target) {
-                               vm_pageout_scan_inactive_throttle_success++;
-                               vm_pageout_deadlock_target--;
-                       }
+               LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
 
-                       DTRACE_VM2(dfree, int, 1, (uint64_t *), NULL);
+               vm_pageout_cluster(m);
 
-                       if (m->object->internal) {
-                               DTRACE_VM2(anonfree, int, 1, (uint64_t *), NULL);
-                       } else {
-                               DTRACE_VM2(fsfree, int, 1, (uint64_t *), NULL);
+               goto next_pg;
+
+reenter_pg_on_q:
+               vm_page_queue_remove(q, m, vmp_pageq);
+               vm_page_queue_enter(q, m, vmp_pageq);
+next_pg:
+               qcount--;
+               try_failed_count = 0;
+
+               if (delayed_unlock++ > 128) {
+                       if (l_object != NULL) {
+                               vm_object_unlock(l_object);
+                               l_object = NULL;
                        }
+                       lck_mtx_yield(&vm_page_queue_lock);
+                       delayed_unlock = 0;
+               }
+       }
+       if (l_object != NULL) {
+               vm_object_unlock(l_object);
+               l_object = NULL;
+       }
+       vm_page_unlock_queues();
+}
 
-                       vm_page_free_prepare(m);
 
-                       assert(m->pageq.next == NULL &&
-                              m->pageq.prev == NULL);
-                       m->pageq.next = (queue_entry_t)local_freeq;
-                       local_freeq = m;
-                       local_freed++;
 
-                       inactive_burst_count = 0;
+/*
+ * function in BSD to apply I/O throttle to the pageout thread
+ */
+extern void vm_pageout_io_throttle(void);
+
+#define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, obj)                    \
+       MACRO_BEGIN                                                     \
+       /* \
+        * If a "reusable" page somehow made it back into \
+        * the active queue, it's been re-used and is not \
+        * quite re-usable. \
+        * If the VM object was "all_reusable", consider it \
+        * as "all re-used" instead of converting it to \
+        * "partially re-used", which could be expensive. \
+        */                                                             \
+       assert(VM_PAGE_OBJECT((m)) == (obj));                           \
+       if ((m)->vmp_reusable ||                                        \
+           (obj)->all_reusable) {                                      \
+               vm_object_reuse_pages((obj),                            \
+                                     (m)->vmp_offset,                  \
+                                     (m)->vmp_offset + PAGE_SIZE_64,   \
+                                     FALSE);                           \
+       }                                                               \
+       MACRO_END
+
+
+#define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT         64
+#define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX     1024
+
+#define FCS_IDLE                0
+#define FCS_DELAYED             1
+#define FCS_DEADLOCK_DETECTED   2
 
-                       goto done_with_inactivepage;
-               }
+struct flow_control {
+       int             state;
+       mach_timespec_t ts;
+};
 
-               assert(!m->private);
-               assert(!m->fictitious);
 
-               /*
-                *      If already cleaning this page in place, convert from
-                *      "adjacent" to "target". We can leave the page mapped,
-                *      and vm_pageout_object_terminate will determine whether
-                *      to free or reactivate.
-                */
+#if CONFIG_BACKGROUND_QUEUE
+uint64_t vm_pageout_rejected_bq_internal = 0;
+uint64_t vm_pageout_rejected_bq_external = 0;
+uint64_t vm_pageout_skipped_bq_internal = 0;
+#endif
 
-               if (m->cleaning) {
-                       m->busy = TRUE;
-                       m->pageout = TRUE;
-                       m->dump_cleaning = TRUE;
-                       vm_page_wire(m);
+#define ANONS_GRABBED_LIMIT     2
 
-                       CLUSTER_STAT(vm_pageout_cluster_conversions++);
 
-                       inactive_burst_count = 0;
+#if 0
+static void vm_pageout_delayed_unlock(int *, int *, vm_page_t *);
+#endif
+static void vm_pageout_prepare_to_block(vm_object_t *, int *, vm_page_t *, int *, int);
 
-                       goto done_with_inactivepage;
-               }
+#define VM_PAGEOUT_PB_NO_ACTION                         0
+#define VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER 1
+#define VM_PAGEOUT_PB_THREAD_YIELD                      2
 
-               /*
-                *      If it's being used, reactivate.
-                *      (Fictitious pages are either busy or absent.)
-                *      First, update the reference and dirty bits
-                *      to make sure the page is unreferenced.
-                */
-               refmod_state = -1;
 
-               if (m->reference == FALSE && m->pmapped == TRUE) {
-                       refmod_state = pmap_get_refmod(m->phys_page);
-                 
-                       if (refmod_state & VM_MEM_REFERENCED)
-                               m->reference = TRUE;
-                       if (refmod_state & VM_MEM_MODIFIED)
-                               m->dirty = TRUE;
-               }
-               if (m->reference && !m->no_cache) {
-                       /*
-                        * The page we pulled off the inactive list has
-                        * been referenced.  It is possible for other
-                        * processors to be touching pages faster than we
-                        * can clear the referenced bit and traverse the
+#if 0
+static void
+vm_pageout_delayed_unlock(int *delayed_unlock, int *local_freed, vm_page_t *local_freeq)
+{
+       if (*local_freeq) {
+               vm_page_unlock_queues();
+
+               VM_DEBUG_CONSTANT_EVENT(
+                       vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
+                       vm_page_free_count, 0, 0, 1);
+
+               vm_page_free_list(*local_freeq, TRUE);
+
+               VM_DEBUG_CONSTANT_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
+                   vm_page_free_count, *local_freed, 0, 1);
+
+               *local_freeq = NULL;
+               *local_freed = 0;
+
+               vm_page_lock_queues();
+       } else {
+               lck_mtx_yield(&vm_page_queue_lock);
+       }
+       *delayed_unlock = 1;
+}
+#endif
+
+
+static void
+vm_pageout_prepare_to_block(vm_object_t *object, int *delayed_unlock,
+    vm_page_t *local_freeq, int *local_freed, int action)
+{
+       vm_page_unlock_queues();
+
+       if (*object != NULL) {
+               vm_object_unlock(*object);
+               *object = NULL;
+       }
+       if (*local_freeq) {
+               vm_page_free_list(*local_freeq, TRUE);
+
+               *local_freeq = NULL;
+               *local_freed = 0;
+       }
+       *delayed_unlock = 1;
+
+       switch (action) {
+       case VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER:
+               vm_consider_waking_compactor_swapper();
+               break;
+       case VM_PAGEOUT_PB_THREAD_YIELD:
+               thread_yield_internal(1);
+               break;
+       case VM_PAGEOUT_PB_NO_ACTION:
+       default:
+               break;
+       }
+       vm_page_lock_queues();
+}
+
+
+static struct vm_pageout_vminfo last;
+
+uint64_t last_vm_page_pages_grabbed = 0;
+
+extern  uint32_t c_segment_pages_compressed;
+
+extern uint64_t shared_region_pager_reclaimed;
+extern struct memory_object_pager_ops shared_region_pager_ops;
+
+void
+update_vm_info(void)
+{
+       unsigned long tmp;
+       uint64_t tmp64;
+
+       vm_pageout_stats[vm_pageout_stat_now].vm_page_active_count = vm_page_active_count;
+       vm_pageout_stats[vm_pageout_stat_now].vm_page_speculative_count = vm_page_speculative_count;
+       vm_pageout_stats[vm_pageout_stat_now].vm_page_inactive_count = vm_page_inactive_count;
+       vm_pageout_stats[vm_pageout_stat_now].vm_page_anonymous_count = vm_page_anonymous_count;
+
+       vm_pageout_stats[vm_pageout_stat_now].vm_page_free_count = vm_page_free_count;
+       vm_pageout_stats[vm_pageout_stat_now].vm_page_wire_count = vm_page_wire_count;
+       vm_pageout_stats[vm_pageout_stat_now].vm_page_compressor_count = VM_PAGE_COMPRESSOR_COUNT;
+
+       vm_pageout_stats[vm_pageout_stat_now].vm_page_pages_compressed = c_segment_pages_compressed;
+       vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_internal_count = vm_page_pageable_internal_count;
+       vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_external_count = vm_page_pageable_external_count;
+       vm_pageout_stats[vm_pageout_stat_now].vm_page_xpmapped_external_count = vm_page_xpmapped_external_count;
+
+
+       tmp = vm_pageout_vminfo.vm_pageout_considered_page;
+       vm_pageout_stats[vm_pageout_stat_now].considered = (unsigned int)(tmp - last.vm_pageout_considered_page);
+       last.vm_pageout_considered_page = tmp;
+
+       tmp64 = vm_pageout_vminfo.vm_pageout_compressions;
+       vm_pageout_stats[vm_pageout_stat_now].pages_compressed = (unsigned int)(tmp64 - last.vm_pageout_compressions);
+       last.vm_pageout_compressions = tmp64;
+
+       tmp = vm_pageout_vminfo.vm_compressor_failed;
+       vm_pageout_stats[vm_pageout_stat_now].failed_compressions = (unsigned int)(tmp - last.vm_compressor_failed);
+       last.vm_compressor_failed = tmp;
+
+       tmp64 = vm_pageout_vminfo.vm_compressor_pages_grabbed;
+       vm_pageout_stats[vm_pageout_stat_now].pages_grabbed_by_compressor = (unsigned int)(tmp64 - last.vm_compressor_pages_grabbed);
+       last.vm_compressor_pages_grabbed = tmp64;
+
+       tmp = vm_pageout_vminfo.vm_phantom_cache_found_ghost;
+       vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_found = (unsigned int)(tmp - last.vm_phantom_cache_found_ghost);
+       last.vm_phantom_cache_found_ghost = tmp;
+
+       tmp = vm_pageout_vminfo.vm_phantom_cache_added_ghost;
+       vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_added = (unsigned int)(tmp - last.vm_phantom_cache_added_ghost);
+       last.vm_phantom_cache_added_ghost = tmp;
+
+       tmp64 = get_pages_grabbed_count();
+       vm_pageout_stats[vm_pageout_stat_now].pages_grabbed = (unsigned int)(tmp64 - last_vm_page_pages_grabbed);
+       last_vm_page_pages_grabbed = tmp64;
+
+       tmp = vm_pageout_vminfo.vm_page_pages_freed;
+       vm_pageout_stats[vm_pageout_stat_now].pages_freed = (unsigned int)(tmp - last.vm_page_pages_freed);
+       last.vm_page_pages_freed = tmp;
+
+
+       if (vm_pageout_stats[vm_pageout_stat_now].considered) {
+               tmp = vm_pageout_vminfo.vm_pageout_pages_evicted;
+               vm_pageout_stats[vm_pageout_stat_now].pages_evicted = (unsigned int)(tmp - last.vm_pageout_pages_evicted);
+               last.vm_pageout_pages_evicted = tmp;
+
+               tmp = vm_pageout_vminfo.vm_pageout_pages_purged;
+               vm_pageout_stats[vm_pageout_stat_now].pages_purged = (unsigned int)(tmp - last.vm_pageout_pages_purged);
+               last.vm_pageout_pages_purged = tmp;
+
+               tmp = vm_pageout_vminfo.vm_pageout_freed_speculative;
+               vm_pageout_stats[vm_pageout_stat_now].freed_speculative = (unsigned int)(tmp - last.vm_pageout_freed_speculative);
+               last.vm_pageout_freed_speculative = tmp;
+
+               tmp = vm_pageout_vminfo.vm_pageout_freed_external;
+               vm_pageout_stats[vm_pageout_stat_now].freed_external = (unsigned int)(tmp - last.vm_pageout_freed_external);
+               last.vm_pageout_freed_external = tmp;
+
+               tmp = vm_pageout_vminfo.vm_pageout_inactive_referenced;
+               vm_pageout_stats[vm_pageout_stat_now].inactive_referenced = (unsigned int)(tmp - last.vm_pageout_inactive_referenced);
+               last.vm_pageout_inactive_referenced = tmp;
+
+               tmp = vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_external;
+               vm_pageout_stats[vm_pageout_stat_now].throttled_external_q = (unsigned int)(tmp - last.vm_pageout_scan_inactive_throttled_external);
+               last.vm_pageout_scan_inactive_throttled_external = tmp;
+
+               tmp = vm_pageout_vminfo.vm_pageout_inactive_dirty_external;
+               vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_external = (unsigned int)(tmp - last.vm_pageout_inactive_dirty_external);
+               last.vm_pageout_inactive_dirty_external = tmp;
+
+               tmp = vm_pageout_vminfo.vm_pageout_freed_cleaned;
+               vm_pageout_stats[vm_pageout_stat_now].freed_cleaned = (unsigned int)(tmp - last.vm_pageout_freed_cleaned);
+               last.vm_pageout_freed_cleaned = tmp;
+
+               tmp = vm_pageout_vminfo.vm_pageout_inactive_nolock;
+               vm_pageout_stats[vm_pageout_stat_now].inactive_nolock = (unsigned int)(tmp - last.vm_pageout_inactive_nolock);
+               last.vm_pageout_inactive_nolock = tmp;
+
+               tmp = vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_internal;
+               vm_pageout_stats[vm_pageout_stat_now].throttled_internal_q = (unsigned int)(tmp - last.vm_pageout_scan_inactive_throttled_internal);
+               last.vm_pageout_scan_inactive_throttled_internal = tmp;
+
+               tmp = vm_pageout_vminfo.vm_pageout_skipped_external;
+               vm_pageout_stats[vm_pageout_stat_now].skipped_external = (unsigned int)(tmp - last.vm_pageout_skipped_external);
+               last.vm_pageout_skipped_external = tmp;
+
+               tmp = vm_pageout_vminfo.vm_pageout_reactivation_limit_exceeded;
+               vm_pageout_stats[vm_pageout_stat_now].reactivation_limit_exceeded = (unsigned int)(tmp - last.vm_pageout_reactivation_limit_exceeded);
+               last.vm_pageout_reactivation_limit_exceeded = tmp;
+
+               tmp = vm_pageout_vminfo.vm_pageout_inactive_force_reclaim;
+               vm_pageout_stats[vm_pageout_stat_now].forced_inactive_reclaim = (unsigned int)(tmp - last.vm_pageout_inactive_force_reclaim);
+               last.vm_pageout_inactive_force_reclaim = tmp;
+
+               tmp = vm_pageout_vminfo.vm_pageout_freed_internal;
+               vm_pageout_stats[vm_pageout_stat_now].freed_internal = (unsigned int)(tmp - last.vm_pageout_freed_internal);
+               last.vm_pageout_freed_internal = tmp;
+
+               tmp = vm_pageout_vminfo.vm_pageout_considered_bq_internal;
+               vm_pageout_stats[vm_pageout_stat_now].considered_bq_internal = (unsigned int)(tmp - last.vm_pageout_considered_bq_internal);
+               last.vm_pageout_considered_bq_internal = tmp;
+
+               tmp = vm_pageout_vminfo.vm_pageout_considered_bq_external;
+               vm_pageout_stats[vm_pageout_stat_now].considered_bq_external = (unsigned int)(tmp - last.vm_pageout_considered_bq_external);
+               last.vm_pageout_considered_bq_external = tmp;
+
+               tmp = vm_pageout_vminfo.vm_pageout_filecache_min_reactivated;
+               vm_pageout_stats[vm_pageout_stat_now].filecache_min_reactivations = (unsigned int)(tmp - last.vm_pageout_filecache_min_reactivated);
+               last.vm_pageout_filecache_min_reactivated = tmp;
+
+               tmp = vm_pageout_vminfo.vm_pageout_inactive_dirty_internal;
+               vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_internal = (unsigned int)(tmp - last.vm_pageout_inactive_dirty_internal);
+               last.vm_pageout_inactive_dirty_internal = tmp;
+       }
+
+       KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO1)) | DBG_FUNC_NONE,
+           vm_pageout_stats[vm_pageout_stat_now].vm_page_active_count,
+           vm_pageout_stats[vm_pageout_stat_now].vm_page_speculative_count,
+           vm_pageout_stats[vm_pageout_stat_now].vm_page_inactive_count,
+           vm_pageout_stats[vm_pageout_stat_now].vm_page_anonymous_count,
+           0);
+
+       KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO2)) | DBG_FUNC_NONE,
+           vm_pageout_stats[vm_pageout_stat_now].vm_page_free_count,
+           vm_pageout_stats[vm_pageout_stat_now].vm_page_wire_count,
+           vm_pageout_stats[vm_pageout_stat_now].vm_page_compressor_count,
+           0,
+           0);
+
+       KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO3)) | DBG_FUNC_NONE,
+           vm_pageout_stats[vm_pageout_stat_now].vm_page_pages_compressed,
+           vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_internal_count,
+           vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_external_count,
+           vm_pageout_stats[vm_pageout_stat_now].vm_page_xpmapped_external_count,
+           0);
+
+       if (vm_pageout_stats[vm_pageout_stat_now].considered ||
+           vm_pageout_stats[vm_pageout_stat_now].pages_compressed ||
+           vm_pageout_stats[vm_pageout_stat_now].failed_compressions) {
+               KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO4)) | DBG_FUNC_NONE,
+                   vm_pageout_stats[vm_pageout_stat_now].considered,
+                   vm_pageout_stats[vm_pageout_stat_now].freed_speculative,
+                   vm_pageout_stats[vm_pageout_stat_now].freed_external,
+                   vm_pageout_stats[vm_pageout_stat_now].inactive_referenced,
+                   0);
+
+               KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO5)) | DBG_FUNC_NONE,
+                   vm_pageout_stats[vm_pageout_stat_now].throttled_external_q,
+                   vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_external,
+                   vm_pageout_stats[vm_pageout_stat_now].freed_cleaned,
+                   vm_pageout_stats[vm_pageout_stat_now].inactive_nolock,
+                   0);
+
+               KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO6)) | DBG_FUNC_NONE,
+                   vm_pageout_stats[vm_pageout_stat_now].throttled_internal_q,
+                   vm_pageout_stats[vm_pageout_stat_now].pages_compressed,
+                   vm_pageout_stats[vm_pageout_stat_now].pages_grabbed_by_compressor,
+                   vm_pageout_stats[vm_pageout_stat_now].skipped_external,
+                   0);
+
+               KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO7)) | DBG_FUNC_NONE,
+                   vm_pageout_stats[vm_pageout_stat_now].reactivation_limit_exceeded,
+                   vm_pageout_stats[vm_pageout_stat_now].forced_inactive_reclaim,
+                   vm_pageout_stats[vm_pageout_stat_now].failed_compressions,
+                   vm_pageout_stats[vm_pageout_stat_now].freed_internal,
+                   0);
+
+               KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO8)) | DBG_FUNC_NONE,
+                   vm_pageout_stats[vm_pageout_stat_now].considered_bq_internal,
+                   vm_pageout_stats[vm_pageout_stat_now].considered_bq_external,
+                   vm_pageout_stats[vm_pageout_stat_now].filecache_min_reactivations,
+                   vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_internal,
+                   0);
+       }
+       KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO9)) | DBG_FUNC_NONE,
+           vm_pageout_stats[vm_pageout_stat_now].pages_grabbed,
+           vm_pageout_stats[vm_pageout_stat_now].pages_freed,
+           vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_found,
+           vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_added,
+           0);
+
+       record_memory_pressure();
+}
+
+extern boolean_t hibernation_vmqueues_inspection;
+
+/*
+ * Return values for functions called by vm_pageout_scan
+ * that control its flow.
+ *
+ * PROCEED -- vm_pageout_scan will keep making forward progress.
+ * DONE_RETURN -- page demand satisfied, work is done -> vm_pageout_scan returns.
+ * NEXT_ITERATION -- restart the 'for' loop in vm_pageout_scan aka continue.
+ */
+
+#define VM_PAGEOUT_SCAN_PROCEED                 (0)
+#define VM_PAGEOUT_SCAN_DONE_RETURN             (1)
+#define VM_PAGEOUT_SCAN_NEXT_ITERATION          (2)
+
+/*
+ * This function is called only from vm_pageout_scan and
+ * it moves overflow secluded pages (one-at-a-time) to the
+ * batched 'local' free Q or active Q.
+ */
+static void
+vps_deal_with_secluded_page_overflow(vm_page_t *local_freeq, int *local_freed)
+{
+#if CONFIG_SECLUDED_MEMORY
+       /*
+        * Deal with secluded_q overflow.
+        */
+       if (vm_page_secluded_count > vm_page_secluded_target) {
+               vm_page_t secluded_page;
+
+               /*
+                * SECLUDED_AGING_BEFORE_ACTIVE:
+                * Excess secluded pages go to the active queue and
+                * will later go to the inactive queue.
+                */
+               assert((vm_page_secluded_count_free +
+                   vm_page_secluded_count_inuse) ==
+                   vm_page_secluded_count);
+               secluded_page = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
+               assert(secluded_page->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
+
+               vm_page_queues_remove(secluded_page, FALSE);
+               assert(!secluded_page->vmp_fictitious);
+               assert(!VM_PAGE_WIRED(secluded_page));
+
+               if (secluded_page->vmp_object == 0) {
+                       /* transfer to free queue */
+                       assert(secluded_page->vmp_busy);
+                       secluded_page->vmp_snext = *local_freeq;
+                       *local_freeq = secluded_page;
+                       *local_freed += 1;
+               } else {
+                       /* transfer to head of active queue */
+                       vm_page_enqueue_active(secluded_page, FALSE);
+                       secluded_page = VM_PAGE_NULL;
+               }
+       }
+#else /* CONFIG_SECLUDED_MEMORY */
+
+#pragma unused(local_freeq)
+#pragma unused(local_freed)
+
+       return;
+
+#endif /* CONFIG_SECLUDED_MEMORY */
+}
+
+/*
+ * This function is called only from vm_pageout_scan and
+ * it initializes the loop targets for vm_pageout_scan().
+ */
+static void
+vps_init_page_targets(void)
+{
+       /*
+        * LD TODO: Other page targets should be calculated here too.
+        */
+       vm_page_anonymous_min = vm_page_inactive_target / 20;
+
+       if (vm_pageout_state.vm_page_speculative_percentage > 50) {
+               vm_pageout_state.vm_page_speculative_percentage = 50;
+       } else if (vm_pageout_state.vm_page_speculative_percentage <= 0) {
+               vm_pageout_state.vm_page_speculative_percentage = 1;
+       }
+
+       vm_pageout_state.vm_page_speculative_target = VM_PAGE_SPECULATIVE_TARGET(vm_page_active_count +
+           vm_page_inactive_count);
+}
+
+/*
+ * This function is called only from vm_pageout_scan and
+ * it purges a single VM object at-a-time and will either
+ * make vm_pageout_scan() restart the loop or keeping moving forward.
+ */
+static int
+vps_purge_object()
+{
+       int             force_purge;
+
+       assert(available_for_purge >= 0);
+       force_purge = 0; /* no force-purging */
+
+#if VM_PRESSURE_EVENTS
+       vm_pressure_level_t pressure_level;
+
+       pressure_level = memorystatus_vm_pressure_level;
+
+       if (pressure_level > kVMPressureNormal) {
+               if (pressure_level >= kVMPressureCritical) {
+                       force_purge = vm_pageout_state.memorystatus_purge_on_critical;
+               } else if (pressure_level >= kVMPressureUrgent) {
+                       force_purge = vm_pageout_state.memorystatus_purge_on_urgent;
+               } else if (pressure_level >= kVMPressureWarning) {
+                       force_purge = vm_pageout_state.memorystatus_purge_on_warning;
+               }
+       }
+#endif /* VM_PRESSURE_EVENTS */
+
+       if (available_for_purge || force_purge) {
+               memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_START);
+
+               VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_START, vm_page_free_count, 0, 0, 0);
+               if (vm_purgeable_object_purge_one(force_purge, C_DONT_BLOCK)) {
+                       VM_PAGEOUT_DEBUG(vm_pageout_purged_objects, 1);
+                       VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, vm_page_free_count, 0, 0, 0);
+                       memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_END);
+
+                       return VM_PAGEOUT_SCAN_NEXT_ITERATION;
+               }
+               VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, 0, 0, 0, -1);
+               memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_END);
+       }
+
+       return VM_PAGEOUT_SCAN_PROCEED;
+}
+
+/*
+ * This function is called only from vm_pageout_scan and
+ * it will try to age the next speculative Q if the oldest
+ * one is empty.
+ */
+static int
+vps_age_speculative_queue(boolean_t force_speculative_aging)
+{
+#define DELAY_SPECULATIVE_AGE   1000
+
+       /*
+        * try to pull pages from the aging bins...
+        * see vm_page.h for an explanation of how
+        * this mechanism works
+        */
+       boolean_t                       can_steal = FALSE;
+       int                             num_scanned_queues;
+       static int                      delay_speculative_age = 0; /* depends the # of times we go through the main pageout_scan loop.*/
+       mach_timespec_t                 ts;
+       struct vm_speculative_age_q     *aq;
+       struct vm_speculative_age_q     *sq;
+
+       sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
+
+       aq = &vm_page_queue_speculative[speculative_steal_index];
+
+       num_scanned_queues = 0;
+       while (vm_page_queue_empty(&aq->age_q) &&
+           num_scanned_queues++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
+               speculative_steal_index++;
+
+               if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
+                       speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
+               }
+
+               aq = &vm_page_queue_speculative[speculative_steal_index];
+       }
+
+       if (num_scanned_queues == VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1) {
+               /*
+                * XXX We've scanned all the speculative
+                * queues but still haven't found one
+                * that is not empty, even though
+                * vm_page_speculative_count is not 0.
+                */
+               if (!vm_page_queue_empty(&sq->age_q)) {
+                       return VM_PAGEOUT_SCAN_NEXT_ITERATION;
+               }
+#if DEVELOPMENT || DEBUG
+               panic("vm_pageout_scan: vm_page_speculative_count=%d but queues are empty", vm_page_speculative_count);
+#endif
+               /* readjust... */
+               vm_page_speculative_count = 0;
+               /* ... and continue */
+               return VM_PAGEOUT_SCAN_NEXT_ITERATION;
+       }
+
+       if (vm_page_speculative_count > vm_pageout_state.vm_page_speculative_target || force_speculative_aging == TRUE) {
+               can_steal = TRUE;
+       } else {
+               if (!delay_speculative_age) {
+                       mach_timespec_t ts_fully_aged;
+
+                       ts_fully_aged.tv_sec = (VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_pageout_state.vm_page_speculative_q_age_ms) / 1000;
+                       ts_fully_aged.tv_nsec = ((VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_pageout_state.vm_page_speculative_q_age_ms) % 1000)
+                           * 1000 * NSEC_PER_USEC;
+
+                       ADD_MACH_TIMESPEC(&ts_fully_aged, &aq->age_ts);
+
+                       clock_sec_t sec;
+                       clock_nsec_t nsec;
+                       clock_get_system_nanotime(&sec, &nsec);
+                       ts.tv_sec = (unsigned int) sec;
+                       ts.tv_nsec = nsec;
+
+                       if (CMP_MACH_TIMESPEC(&ts, &ts_fully_aged) >= 0) {
+                               can_steal = TRUE;
+                       } else {
+                               delay_speculative_age++;
+                       }
+               } else {
+                       delay_speculative_age++;
+                       if (delay_speculative_age == DELAY_SPECULATIVE_AGE) {
+                               delay_speculative_age = 0;
+                       }
+               }
+       }
+       if (can_steal == TRUE) {
+               vm_page_speculate_ageit(aq);
+       }
+
+       return VM_PAGEOUT_SCAN_PROCEED;
+}
+
+/*
+ * This function is called only from vm_pageout_scan and
+ * it evicts a single VM object from the cache.
+ */
+static int inline
+vps_object_cache_evict(vm_object_t *object_to_unlock)
+{
+       static int                      cache_evict_throttle = 0;
+       struct vm_speculative_age_q     *sq;
+
+       sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
+
+       if (vm_page_queue_empty(&sq->age_q) && cache_evict_throttle == 0) {
+               int     pages_evicted;
+
+               if (*object_to_unlock != NULL) {
+                       vm_object_unlock(*object_to_unlock);
+                       *object_to_unlock = NULL;
+               }
+               KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_START, 0, 0, 0, 0, 0);
+
+               pages_evicted = vm_object_cache_evict(100, 10);
+
+               KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_END, pages_evicted, 0, 0, 0, 0);
+
+               if (pages_evicted) {
+                       vm_pageout_vminfo.vm_pageout_pages_evicted += pages_evicted;
+
+                       VM_DEBUG_EVENT(vm_pageout_cache_evict, VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE,
+                           vm_page_free_count, pages_evicted, vm_pageout_vminfo.vm_pageout_pages_evicted, 0);
+                       memoryshot(VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE);
+
+                       /*
+                        * we just freed up to 100 pages,
+                        * so go back to the top of the main loop
+                        * and re-evaulate the memory situation
+                        */
+                       return VM_PAGEOUT_SCAN_NEXT_ITERATION;
+               } else {
+                       cache_evict_throttle = 1000;
+               }
+       }
+       if (cache_evict_throttle) {
+               cache_evict_throttle--;
+       }
+
+       return VM_PAGEOUT_SCAN_PROCEED;
+}
+
+
+/*
+ * This function is called only from vm_pageout_scan and
+ * it calculates the filecache min. that needs to be maintained
+ * as we start to steal pages.
+ */
+static void
+vps_calculate_filecache_min(void)
+{
+       int divisor = vm_pageout_state.vm_page_filecache_min_divisor;
+
+#if CONFIG_JETSAM
+       /*
+        * don't let the filecache_min fall below 15% of available memory
+        * on systems with an active compressor that isn't nearing its
+        * limits w/r to accepting new data
+        *
+        * on systems w/o the compressor/swapper, the filecache is always
+        * a very large percentage of the AVAILABLE_NON_COMPRESSED_MEMORY
+        * since most (if not all) of the anonymous pages are in the
+        * throttled queue (which isn't counted as available) which
+        * effectively disables this filter
+        */
+       if (vm_compressor_low_on_space() || divisor == 0) {
+               vm_pageout_state.vm_page_filecache_min = 0;
+       } else {
+               vm_pageout_state.vm_page_filecache_min =
+                   ((AVAILABLE_NON_COMPRESSED_MEMORY) * 10) / divisor;
+       }
+#else
+       if (vm_compressor_out_of_space() || divisor == 0) {
+               vm_pageout_state.vm_page_filecache_min = 0;
+       } else {
+               /*
+                * don't let the filecache_min fall below the specified critical level
+                */
+               vm_pageout_state.vm_page_filecache_min =
+                   ((AVAILABLE_NON_COMPRESSED_MEMORY) * 10) / divisor;
+       }
+#endif
+       if (vm_page_free_count < (vm_page_free_reserved / 4)) {
+               vm_pageout_state.vm_page_filecache_min = 0;
+       }
+}
+
+/*
+ * This function is called only from vm_pageout_scan and
+ * it updates the flow control time to detect if VM pageoutscan
+ * isn't making progress.
+ */
+static void
+vps_flow_control_reset_deadlock_timer(struct flow_control *flow_control)
+{
+       mach_timespec_t ts;
+       clock_sec_t sec;
+       clock_nsec_t nsec;
+
+       ts.tv_sec = vm_pageout_state.vm_pageout_deadlock_wait / 1000;
+       ts.tv_nsec = (vm_pageout_state.vm_pageout_deadlock_wait % 1000) * 1000 * NSEC_PER_USEC;
+       clock_get_system_nanotime(&sec, &nsec);
+       flow_control->ts.tv_sec = (unsigned int) sec;
+       flow_control->ts.tv_nsec = nsec;
+       ADD_MACH_TIMESPEC(&flow_control->ts, &ts);
+
+       flow_control->state = FCS_DELAYED;
+
+       vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_internal++;
+}
+
+/*
+ * This function is called only from vm_pageout_scan and
+ * it is the flow control logic of VM pageout scan which
+ * controls if it should block and for how long.
+ * Any blocking of vm_pageout_scan happens ONLY in this function.
+ */
+static int
+vps_flow_control(struct flow_control *flow_control, int *anons_grabbed, vm_object_t *object, int *delayed_unlock,
+    vm_page_t *local_freeq, int *local_freed, int *vm_pageout_deadlock_target, unsigned int inactive_burst_count)
+{
+       boolean_t       exceeded_burst_throttle = FALSE;
+       unsigned int    msecs = 0;
+       uint32_t        inactive_external_count;
+       mach_timespec_t ts;
+       struct  vm_pageout_queue *iq;
+       struct  vm_pageout_queue *eq;
+       struct  vm_speculative_age_q *sq;
+
+       iq = &vm_pageout_queue_internal;
+       eq = &vm_pageout_queue_external;
+       sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
+
+       /*
+        * Sometimes we have to pause:
+        *      1) No inactive pages - nothing to do.
+        *      2) Loop control - no acceptable pages found on the inactive queue
+        *         within the last vm_pageout_burst_inactive_throttle iterations
+        *      3) Flow control - default pageout queue is full
+        */
+       if (vm_page_queue_empty(&vm_page_queue_inactive) &&
+           vm_page_queue_empty(&vm_page_queue_anonymous) &&
+           vm_page_queue_empty(&vm_page_queue_cleaned) &&
+           vm_page_queue_empty(&sq->age_q)) {
+               VM_PAGEOUT_DEBUG(vm_pageout_scan_empty_throttle, 1);
+               msecs = vm_pageout_state.vm_pageout_empty_wait;
+       } else if (inactive_burst_count >=
+           MIN(vm_pageout_state.vm_pageout_burst_inactive_throttle,
+           (vm_page_inactive_count +
+           vm_page_speculative_count))) {
+               VM_PAGEOUT_DEBUG(vm_pageout_scan_burst_throttle, 1);
+               msecs = vm_pageout_state.vm_pageout_burst_wait;
+
+               exceeded_burst_throttle = TRUE;
+       } else if (VM_PAGE_Q_THROTTLED(iq) &&
+           VM_DYNAMIC_PAGING_ENABLED()) {
+               clock_sec_t sec;
+               clock_nsec_t nsec;
+
+               switch (flow_control->state) {
+               case FCS_IDLE:
+                       if ((vm_page_free_count + *local_freed) < vm_page_free_target &&
+                           vm_pageout_state.vm_restricted_to_single_processor == FALSE) {
+                               /*
+                                * since the compressor is running independently of vm_pageout_scan
+                                * let's not wait for it just yet... as long as we have a healthy supply
+                                * of filecache pages to work with, let's keep stealing those.
+                                */
+                               inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count;
+
+                               if (vm_page_pageable_external_count > vm_pageout_state.vm_page_filecache_min &&
+                                   (inactive_external_count >= VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count))) {
+                                       *anons_grabbed = ANONS_GRABBED_LIMIT;
+                                       VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle_deferred, 1);
+                                       return VM_PAGEOUT_SCAN_PROCEED;
+                               }
+                       }
+
+                       vps_flow_control_reset_deadlock_timer(flow_control);
+                       msecs = vm_pageout_state.vm_pageout_deadlock_wait;
+
+                       break;
+
+               case FCS_DELAYED:
+                       clock_get_system_nanotime(&sec, &nsec);
+                       ts.tv_sec = (unsigned int) sec;
+                       ts.tv_nsec = nsec;
+
+                       if (CMP_MACH_TIMESPEC(&ts, &flow_control->ts) >= 0) {
+                               /*
+                                * the pageout thread for the default pager is potentially
+                                * deadlocked since the
+                                * default pager queue has been throttled for more than the
+                                * allowable time... we need to move some clean pages or dirty
+                                * pages belonging to the external pagers if they aren't throttled
+                                * vm_page_free_wanted represents the number of threads currently
+                                * blocked waiting for pages... we'll move one page for each of
+                                * these plus a fixed amount to break the logjam... once we're done
+                                * moving this number of pages, we'll re-enter the FSC_DELAYED state
+                                * with a new timeout target since we have no way of knowing
+                                * whether we've broken the deadlock except through observation
+                                * of the queue associated with the default pager... we need to
+                                * stop moving pages and allow the system to run to see what
+                                * state it settles into.
+                                */
+
+                               *vm_pageout_deadlock_target = vm_pageout_state.vm_pageout_deadlock_relief +
+                                   vm_page_free_wanted + vm_page_free_wanted_privileged;
+                               VM_PAGEOUT_DEBUG(vm_pageout_scan_deadlock_detected, 1);
+                               flow_control->state = FCS_DEADLOCK_DETECTED;
+                               thread_wakeup((event_t) &vm_pageout_garbage_collect);
+                               return VM_PAGEOUT_SCAN_PROCEED;
+                       }
+                       /*
+                        * just resniff instead of trying
+                        * to compute a new delay time... we're going to be
+                        * awakened immediately upon a laundry completion,
+                        * so we won't wait any longer than necessary
+                        */
+                       msecs = vm_pageout_state.vm_pageout_idle_wait;
+                       break;
+
+               case FCS_DEADLOCK_DETECTED:
+                       if (*vm_pageout_deadlock_target) {
+                               return VM_PAGEOUT_SCAN_PROCEED;
+                       }
+
+                       vps_flow_control_reset_deadlock_timer(flow_control);
+                       msecs = vm_pageout_state.vm_pageout_deadlock_wait;
+
+                       break;
+               }
+       } else {
+               /*
+                * No need to pause...
+                */
+               return VM_PAGEOUT_SCAN_PROCEED;
+       }
+
+       vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+
+       vm_pageout_prepare_to_block(object, delayed_unlock, local_freeq, local_freed,
+           VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
+
+       if (vm_page_free_count >= vm_page_free_target) {
+               /*
+                * we're here because
+                *  1) someone else freed up some pages while we had
+                *     the queues unlocked above
+                * and we've hit one of the 3 conditions that
+                * cause us to pause the pageout scan thread
+                *
+                * since we already have enough free pages,
+                * let's avoid stalling and return normally
+                *
+                * before we return, make sure the pageout I/O threads
+                * are running throttled in case there are still requests
+                * in the laundry... since we have enough free pages
+                * we don't need the laundry to be cleaned in a timely
+                * fashion... so let's avoid interfering with foreground
+                * activity
+                *
+                * we don't want to hold vm_page_queue_free_lock when
+                * calling vm_pageout_adjust_eq_iothrottle (since it
+                * may cause other locks to be taken), we do the intitial
+                * check outside of the lock.  Once we take the lock,
+                * we recheck the condition since it may have changed.
+                * if it has, no problem, we will make the threads
+                * non-throttled before actually blocking
+                */
+               vm_pageout_adjust_eq_iothrottle(eq, TRUE);
+       }
+       lck_mtx_lock(&vm_page_queue_free_lock);
+
+       if (vm_page_free_count >= vm_page_free_target &&
+           (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
+               return VM_PAGEOUT_SCAN_DONE_RETURN;
+       }
+       lck_mtx_unlock(&vm_page_queue_free_lock);
+
+       if ((vm_page_free_count + vm_page_cleaned_count) < vm_page_free_target) {
+               /*
+                * we're most likely about to block due to one of
+                * the 3 conditions that cause vm_pageout_scan to
+                * not be able to make forward progress w/r
+                * to providing new pages to the free queue,
+                * so unthrottle the I/O threads in case we
+                * have laundry to be cleaned... it needs
+                * to be completed ASAP.
+                *
+                * even if we don't block, we want the io threads
+                * running unthrottled since the sum of free +
+                * clean pages is still under our free target
+                */
+               vm_pageout_adjust_eq_iothrottle(eq, FALSE);
+       }
+       if (vm_page_cleaned_count > 0 && exceeded_burst_throttle == FALSE) {
+               /*
+                * if we get here we're below our free target and
+                * we're stalling due to a full laundry queue or
+                * we don't have any inactive pages other then
+                * those in the clean queue...
+                * however, we have pages on the clean queue that
+                * can be moved to the free queue, so let's not
+                * stall the pageout scan
+                */
+               flow_control->state = FCS_IDLE;
+               return VM_PAGEOUT_SCAN_PROCEED;
+       }
+       if (flow_control->state == FCS_DELAYED && !VM_PAGE_Q_THROTTLED(iq)) {
+               flow_control->state = FCS_IDLE;
+               return VM_PAGEOUT_SCAN_PROCEED;
+       }
+
+       VM_CHECK_MEMORYSTATUS;
+
+       if (flow_control->state != FCS_IDLE) {
+               VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle, 1);
+       }
+
+       iq->pgo_throttled = TRUE;
+       assert_wait_timeout((event_t) &iq->pgo_laundry, THREAD_INTERRUPTIBLE, msecs, 1000 * NSEC_PER_USEC);
+
+       counter(c_vm_pageout_scan_block++);
+
+       vm_page_unlock_queues();
+
+       assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
+
+       VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START,
+           iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
+       memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START);
+
+       thread_block(THREAD_CONTINUE_NULL);
+
+       VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END,
+           iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
+       memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END);
+
+       vm_page_lock_queues();
+
+       iq->pgo_throttled = FALSE;
+
+       vps_init_page_targets();
+
+       return VM_PAGEOUT_SCAN_NEXT_ITERATION;
+}
+
+/*
+ * This function is called only from vm_pageout_scan and
+ * it will find and return the most appropriate page to be
+ * reclaimed.
+ */
+static int
+vps_choose_victim_page(vm_page_t *victim_page, int *anons_grabbed, boolean_t *grab_anonymous, boolean_t force_anonymous,
+    boolean_t *is_page_from_bg_q, unsigned int *reactivated_this_call)
+{
+       vm_page_t                       m = NULL;
+       vm_object_t                     m_object = VM_OBJECT_NULL;
+       uint32_t                        inactive_external_count;
+       struct vm_speculative_age_q     *sq;
+       struct vm_pageout_queue         *iq;
+       int                             retval = VM_PAGEOUT_SCAN_PROCEED;
+
+       sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
+       iq = &vm_pageout_queue_internal;
+
+       *is_page_from_bg_q = FALSE;
+
+       m = NULL;
+       m_object = VM_OBJECT_NULL;
+
+       if (VM_DYNAMIC_PAGING_ENABLED()) {
+               assert(vm_page_throttled_count == 0);
+               assert(vm_page_queue_empty(&vm_page_queue_throttled));
+       }
+
+       /*
+        * Try for a clean-queue inactive page.
+        * These are pages that vm_pageout_scan tried to steal earlier, but
+        * were dirty and had to be cleaned.  Pick them up now that they are clean.
+        */
+       if (!vm_page_queue_empty(&vm_page_queue_cleaned)) {
+               m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
+
+               assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
+
+               goto found_page;
+       }
+
+       /*
+        * The next most eligible pages are ones we paged in speculatively,
+        * but which have not yet been touched and have been aged out.
+        */
+       if (!vm_page_queue_empty(&sq->age_q)) {
+               m = (vm_page_t) vm_page_queue_first(&sq->age_q);
+
+               assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q);
+
+               if (!m->vmp_dirty || force_anonymous == FALSE) {
+                       goto found_page;
+               } else {
+                       m = NULL;
+               }
+       }
+
+#if CONFIG_BACKGROUND_QUEUE
+       if (vm_page_background_mode != VM_PAGE_BG_DISABLED && (vm_page_background_count > vm_page_background_target)) {
+               vm_object_t     bg_m_object = NULL;
+
+               m = (vm_page_t) vm_page_queue_first(&vm_page_queue_background);
+
+               bg_m_object = VM_PAGE_OBJECT(m);
+
+               if (!VM_PAGE_PAGEABLE(m)) {
+                       /*
+                        * This page is on the background queue
+                        * but not on a pageable queue.  This is
+                        * likely a transient state and whoever
+                        * took it out of its pageable queue
+                        * will likely put it back on a pageable
+                        * queue soon but we can't deal with it
+                        * at this point, so let's ignore this
+                        * page.
+                        */
+               } else if (force_anonymous == FALSE || bg_m_object->internal) {
+                       if (bg_m_object->internal &&
+                           (VM_PAGE_Q_THROTTLED(iq) ||
+                           vm_compressor_out_of_space() == TRUE ||
+                           vm_page_free_count < (vm_page_free_reserved / 4))) {
+                               vm_pageout_skipped_bq_internal++;
+                       } else {
+                               *is_page_from_bg_q = TRUE;
+
+                               if (bg_m_object->internal) {
+                                       vm_pageout_vminfo.vm_pageout_considered_bq_internal++;
+                               } else {
+                                       vm_pageout_vminfo.vm_pageout_considered_bq_external++;
+                               }
+                               goto found_page;
+                       }
+               }
+       }
+#endif /* CONFIG_BACKGROUND_QUEUE */
+
+       inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count;
+
+       if ((vm_page_pageable_external_count < vm_pageout_state.vm_page_filecache_min || force_anonymous == TRUE) ||
+           (inactive_external_count < VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count))) {
+               *grab_anonymous = TRUE;
+               *anons_grabbed = 0;
+
+               vm_pageout_vminfo.vm_pageout_skipped_external++;
+               goto want_anonymous;
+       }
+       *grab_anonymous = (vm_page_anonymous_count > vm_page_anonymous_min);
+
+#if CONFIG_JETSAM
+       /* If the file-backed pool has accumulated
+        * significantly more pages than the jetsam
+        * threshold, prefer to reclaim those
+        * inline to minimise compute overhead of reclaiming
+        * anonymous pages.
+        * This calculation does not account for the CPU local
+        * external page queues, as those are expected to be
+        * much smaller relative to the global pools.
+        */
+
+       struct vm_pageout_queue *eq = &vm_pageout_queue_external;
+
+       if (*grab_anonymous == TRUE && !VM_PAGE_Q_THROTTLED(eq)) {
+               if (vm_page_pageable_external_count >
+                   vm_pageout_state.vm_page_filecache_min) {
+                       if ((vm_page_pageable_external_count *
+                           vm_pageout_memorystatus_fb_factor_dr) >
+                           (memorystatus_available_pages_critical *
+                           vm_pageout_memorystatus_fb_factor_nr)) {
+                               *grab_anonymous = FALSE;
+
+                               VM_PAGEOUT_DEBUG(vm_grab_anon_overrides, 1);
+                       }
+               }
+               if (*grab_anonymous) {
+                       VM_PAGEOUT_DEBUG(vm_grab_anon_nops, 1);
+               }
+       }
+#endif /* CONFIG_JETSAM */
+
+want_anonymous:
+       if (*grab_anonymous == FALSE || *anons_grabbed >= ANONS_GRABBED_LIMIT || vm_page_queue_empty(&vm_page_queue_anonymous)) {
+               if (!vm_page_queue_empty(&vm_page_queue_inactive)) {
+                       m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
+
+                       assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
+                       *anons_grabbed = 0;
+
+                       if (vm_page_pageable_external_count < vm_pageout_state.vm_page_filecache_min) {
+                               if (!vm_page_queue_empty(&vm_page_queue_anonymous)) {
+                                       if ((++(*reactivated_this_call) % 100)) {
+                                               vm_pageout_vminfo.vm_pageout_filecache_min_reactivated++;
+
+                                               vm_page_activate(m);
+                                               VM_STAT_INCR(reactivations);
+#if CONFIG_BACKGROUND_QUEUE
+#if DEVELOPMENT || DEBUG
+                                               if (*is_page_from_bg_q == TRUE) {
+                                                       if (m_object->internal) {
+                                                               vm_pageout_rejected_bq_internal++;
+                                                       } else {
+                                                               vm_pageout_rejected_bq_external++;
+                                                       }
+                                               }
+#endif /* DEVELOPMENT || DEBUG */
+#endif /* CONFIG_BACKGROUND_QUEUE */
+                                               vm_pageout_state.vm_pageout_inactive_used++;
+
+                                               m = NULL;
+                                               retval = VM_PAGEOUT_SCAN_NEXT_ITERATION;
+
+                                               goto found_page;
+                                       }
+
+                                       /*
+                                        * steal 1 of the file backed pages even if
+                                        * we are under the limit that has been set
+                                        * for a healthy filecache
+                                        */
+                               }
+                       }
+                       goto found_page;
+               }
+       }
+       if (!vm_page_queue_empty(&vm_page_queue_anonymous)) {
+               m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
+
+               assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
+               *anons_grabbed += 1;
+
+               goto found_page;
+       }
+
+       m = NULL;
+
+found_page:
+       *victim_page = m;
+
+       return retval;
+}
+
+/*
+ * This function is called only from vm_pageout_scan and
+ * it will put a page back on the active/inactive queue
+ * if we can't reclaim it for some reason.
+ */
+static void
+vps_requeue_page(vm_page_t m, int page_prev_q_state, __unused boolean_t page_from_bg_q)
+{
+       if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
+               vm_page_enqueue_inactive(m, FALSE);
+       } else {
+               vm_page_activate(m);
+       }
+
+#if CONFIG_BACKGROUND_QUEUE
+#if DEVELOPMENT || DEBUG
+       vm_object_t m_object = VM_PAGE_OBJECT(m);
+
+       if (page_from_bg_q == TRUE) {
+               if (m_object->internal) {
+                       vm_pageout_rejected_bq_internal++;
+               } else {
+                       vm_pageout_rejected_bq_external++;
+               }
+       }
+#endif /* DEVELOPMENT || DEBUG */
+#endif /* CONFIG_BACKGROUND_QUEUE */
+}
+
+/*
+ * This function is called only from vm_pageout_scan and
+ * it will try to grab the victim page's VM object (m_object)
+ * which differs from the previous victim page's object (object).
+ */
+static int
+vps_switch_object(vm_page_t m, vm_object_t m_object, vm_object_t *object, int page_prev_q_state, boolean_t avoid_anon_pages, boolean_t page_from_bg_q)
+{
+       struct vm_speculative_age_q *sq;
+
+       sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
+
+       /*
+        * the object associated with candidate page is
+        * different from the one we were just working
+        * with... dump the lock if we still own it
+        */
+       if (*object != NULL) {
+               vm_object_unlock(*object);
+               *object = NULL;
+       }
+       /*
+        * Try to lock object; since we've alread got the
+        * page queues lock, we can only 'try' for this one.
+        * if the 'try' fails, we need to do a mutex_pause
+        * to allow the owner of the object lock a chance to
+        * run... otherwise, we're likely to trip over this
+        * object in the same state as we work our way through
+        * the queue... clumps of pages associated with the same
+        * object are fairly typical on the inactive and active queues
+        */
+       if (!vm_object_lock_try_scan(m_object)) {
+               vm_page_t m_want = NULL;
+
+               vm_pageout_vminfo.vm_pageout_inactive_nolock++;
+
+               if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
+                       VM_PAGEOUT_DEBUG(vm_pageout_cleaned_nolock, 1);
+               }
+
+               pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
+
+               m->vmp_reference = FALSE;
+
+               if (!m_object->object_is_shared_cache) {
+                       /*
+                        * don't apply this optimization if this is the shared cache
+                        * object, it's too easy to get rid of very hot and important
+                        * pages...
+                        * m->vmp_object must be stable since we hold the page queues lock...
+                        * we can update the scan_collisions field sans the object lock
+                        * since it is a separate field and this is the only spot that does
+                        * a read-modify-write operation and it is never executed concurrently...
+                        * we can asynchronously set this field to 0 when creating a UPL, so it
+                        * is possible for the value to be a bit non-determistic, but that's ok
+                        * since it's only used as a hint
+                        */
+                       m_object->scan_collisions = 1;
+               }
+               if (!vm_page_queue_empty(&vm_page_queue_cleaned)) {
+                       m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
+               } else if (!vm_page_queue_empty(&sq->age_q)) {
+                       m_want = (vm_page_t) vm_page_queue_first(&sq->age_q);
+               } else if ((avoid_anon_pages || vm_page_queue_empty(&vm_page_queue_anonymous)) &&
+                   !vm_page_queue_empty(&vm_page_queue_inactive)) {
+                       m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
+               } else if (!vm_page_queue_empty(&vm_page_queue_anonymous)) {
+                       m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
+               }
+
+               /*
+                * this is the next object we're going to be interested in
+                * try to make sure its available after the mutex_pause
+                * returns control
+                */
+               if (m_want) {
+                       vm_pageout_scan_wants_object = VM_PAGE_OBJECT(m_want);
+               }
+
+               vps_requeue_page(m, page_prev_q_state, page_from_bg_q);
+
+               return VM_PAGEOUT_SCAN_NEXT_ITERATION;
+       } else {
+               *object = m_object;
+               vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+       }
+
+       return VM_PAGEOUT_SCAN_PROCEED;
+}
+
+/*
+ * This function is called only from vm_pageout_scan and
+ * it notices that pageout scan may be rendered ineffective
+ * due to a FS deadlock and will jetsam a process if possible.
+ * If jetsam isn't supported, it'll move the page to the active
+ * queue to try and get some different pages pushed onwards so
+ * we can try to get out of this scenario.
+ */
+static void
+vps_deal_with_throttled_queues(vm_page_t m, vm_object_t *object, uint32_t *vm_pageout_inactive_external_forced_reactivate_limit,
+    int *delayed_unlock, boolean_t *force_anonymous, __unused boolean_t is_page_from_bg_q)
+{
+       struct  vm_pageout_queue *eq;
+       vm_object_t cur_object = VM_OBJECT_NULL;
+
+       cur_object = *object;
+
+       eq = &vm_pageout_queue_external;
+
+       if (cur_object->internal == FALSE) {
+               /*
+                * we need to break up the following potential deadlock case...
+                *  a) The external pageout thread is stuck on the truncate lock for a file that is being extended i.e. written.
+                *  b) The thread doing the writing is waiting for pages while holding the truncate lock
+                *  c) Most of the pages in the inactive queue belong to this file.
+                *
+                * we are potentially in this deadlock because...
+                *  a) the external pageout queue is throttled
+                *  b) we're done with the active queue and moved on to the inactive queue
+                *  c) we've got a dirty external page
+                *
+                * since we don't know the reason for the external pageout queue being throttled we
+                * must suspect that we are deadlocked, so move the current page onto the active queue
+                * in an effort to cause a page from the active queue to 'age' to the inactive queue
+                *
+                * if we don't have jetsam configured (i.e. we have a dynamic pager), set
+                * 'force_anonymous' to TRUE to cause us to grab a page from the cleaned/anonymous
+                * pool the next time we select a victim page... if we can make enough new free pages,
+                * the deadlock will break, the external pageout queue will empty and it will no longer
+                * be throttled
+                *
+                * if we have jetsam configured, keep a count of the pages reactivated this way so
+                * that we can try to find clean pages in the active/inactive queues before
+                * deciding to jetsam a process
+                */
+               vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_external++;
+
+               vm_page_check_pageable_safe(m);
+               assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
+               vm_page_queue_enter(&vm_page_queue_active, m, vmp_pageq);
+               m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
+               vm_page_active_count++;
+               vm_page_pageable_external_count++;
+
+               vm_pageout_adjust_eq_iothrottle(eq, FALSE);
+
+#if CONFIG_MEMORYSTATUS && CONFIG_JETSAM
+
+#pragma unused(force_anonymous)
+
+               *vm_pageout_inactive_external_forced_reactivate_limit -= 1;
+
+               if (*vm_pageout_inactive_external_forced_reactivate_limit <= 0) {
+                       *vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
+                       /*
+                        * Possible deadlock scenario so request jetsam action
+                        */
+
+                       assert(cur_object);
+                       vm_object_unlock(cur_object);
+
+                       cur_object = VM_OBJECT_NULL;
+
+                       /*
+                        * VM pageout scan needs to know we have dropped this lock and so set the
+                        * object variable we got passed in to NULL.
+                        */
+                       *object = VM_OBJECT_NULL;
+
+                       vm_page_unlock_queues();
+
+                       VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_START,
+                           vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count);
+
+                       /* Kill first suitable process. If this call returned FALSE, we might have simply purged a process instead. */
+                       if (memorystatus_kill_on_VM_page_shortage(FALSE) == TRUE) {
+                               VM_PAGEOUT_DEBUG(vm_pageout_inactive_external_forced_jetsam_count, 1);
+                       }
+
+                       VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_END,
+                           vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count);
+
+                       vm_page_lock_queues();
+                       *delayed_unlock = 1;
+               }
+#else /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
+
+#pragma unused(vm_pageout_inactive_external_forced_reactivate_limit)
+#pragma unused(delayed_unlock)
+
+               *force_anonymous = TRUE;
+#endif /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
+       } else {
+               vm_page_activate(m);
+               VM_STAT_INCR(reactivations);
+
+#if CONFIG_BACKGROUND_QUEUE
+#if DEVELOPMENT || DEBUG
+               if (is_page_from_bg_q == TRUE) {
+                       if (cur_object->internal) {
+                               vm_pageout_rejected_bq_internal++;
+                       } else {
+                               vm_pageout_rejected_bq_external++;
+                       }
+               }
+#endif /* DEVELOPMENT || DEBUG */
+#endif /* CONFIG_BACKGROUND_QUEUE */
+
+               vm_pageout_state.vm_pageout_inactive_used++;
+       }
+}
+
+
+void
+vm_page_balance_inactive(int max_to_move)
+{
+       vm_page_t m;
+
+       LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+
+       if (hibernation_vmqueues_inspection || hibernate_cleaning_in_progress) {
+               /*
+                * It is likely that the hibernation code path is
+                * dealing with these very queues as we are about
+                * to move pages around in/from them and completely
+                * change the linkage of the pages.
+                *
+                * And so we skip the rebalancing of these queues.
+                */
+               return;
+       }
+       vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
+           vm_page_inactive_count +
+           vm_page_speculative_count);
+
+       while (max_to_move-- && (vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) {
+               VM_PAGEOUT_DEBUG(vm_pageout_balanced, 1);
+
+               m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
+
+               assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
+               assert(!m->vmp_laundry);
+               assert(VM_PAGE_OBJECT(m) != kernel_object);
+               assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
+
+               DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
+
+               /*
+                * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
+                *
+                * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
+                * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
+                * new reference happens. If no futher references happen on the page after that remote TLB flushes
+                * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
+                * by pageout_scan, which is just fine since the last reference would have happened quite far
+                * in the past (TLB caches don't hang around for very long), and of course could just as easily
+                * have happened before we moved the page
+                */
+               if (m->vmp_pmapped == TRUE) {
+                       pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
+               }
+
+               /*
+                * The page might be absent or busy,
+                * but vm_page_deactivate can handle that.
+                * FALSE indicates that we don't want a H/W clear reference
+                */
+               vm_page_deactivate_internal(m, FALSE);
+       }
+}
+
+
+/*
+ *     vm_pageout_scan does the dirty work for the pageout daemon.
+ *     It returns with both vm_page_queue_free_lock and vm_page_queue_lock
+ *     held and vm_page_free_wanted == 0.
+ */
+void
+vm_pageout_scan(void)
+{
+       unsigned int loop_count = 0;
+       unsigned int inactive_burst_count = 0;
+       unsigned int reactivated_this_call;
+       unsigned int reactivate_limit;
+       vm_page_t   local_freeq = NULL;
+       int         local_freed = 0;
+       int         delayed_unlock;
+       int         delayed_unlock_limit = 0;
+       int         refmod_state = 0;
+       int     vm_pageout_deadlock_target = 0;
+       struct  vm_pageout_queue *iq;
+       struct  vm_pageout_queue *eq;
+       struct  vm_speculative_age_q *sq;
+       struct  flow_control    flow_control = { .state = 0, .ts = { .tv_sec = 0, .tv_nsec = 0 } };
+       boolean_t inactive_throttled = FALSE;
+       vm_object_t     object = NULL;
+       uint32_t        inactive_reclaim_run;
+       boolean_t       grab_anonymous = FALSE;
+       boolean_t       force_anonymous = FALSE;
+       boolean_t       force_speculative_aging = FALSE;
+       int             anons_grabbed = 0;
+       int             page_prev_q_state = 0;
+       boolean_t       page_from_bg_q = FALSE;
+       uint32_t        vm_pageout_inactive_external_forced_reactivate_limit = 0;
+       vm_object_t     m_object = VM_OBJECT_NULL;
+       int             retval = 0;
+       boolean_t       lock_yield_check = FALSE;
+
+
+       VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_START,
+           vm_pageout_vminfo.vm_pageout_freed_speculative,
+           vm_pageout_state.vm_pageout_inactive_clean,
+           vm_pageout_vminfo.vm_pageout_inactive_dirty_internal,
+           vm_pageout_vminfo.vm_pageout_inactive_dirty_external);
+
+       flow_control.state = FCS_IDLE;
+       iq = &vm_pageout_queue_internal;
+       eq = &vm_pageout_queue_external;
+       sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
+
+       /* Ask the pmap layer to return any pages it no longer needs. */
+       uint64_t pmap_wired_pages_freed = pmap_release_pages_fast();
+
+       vm_page_lock_queues();
+
+       vm_page_wire_count -= pmap_wired_pages_freed;
+
+       delayed_unlock = 1;
+
+       /*
+        *      Calculate the max number of referenced pages on the inactive
+        *      queue that we will reactivate.
+        */
+       reactivated_this_call = 0;
+       reactivate_limit = VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count +
+           vm_page_inactive_count);
+       inactive_reclaim_run = 0;
+
+       vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
+
+       /*
+        *      We must limit the rate at which we send pages to the pagers
+        *      so that we don't tie up too many pages in the I/O queues.
+        *      We implement a throttling mechanism using the laundry count
+        *      to limit the number of pages outstanding to the default
+        *      and external pagers.  We can bypass the throttles and look
+        *      for clean pages if the pageout queues don't drain in a timely
+        *      fashion since this may indicate that the pageout paths are
+        *      stalled waiting for memory, which only we can provide.
+        */
+
+       vps_init_page_targets();
+       assert(object == NULL);
+       assert(delayed_unlock != 0);
+
+       for (;;) {
+               vm_page_t m;
+
+               DTRACE_VM2(rev, int, 1, (uint64_t *), NULL);
+
+               if (lock_yield_check) {
+                       lock_yield_check = FALSE;
+
+                       if (delayed_unlock++ > delayed_unlock_limit) {
+                               int freed = local_freed;
+
+                               vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
+                                   VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
+                               if (freed == 0) {
+                                       lck_mtx_yield(&vm_page_queue_lock);
+                               }
+                       } else if (vm_pageout_scan_wants_object) {
+                               vm_page_unlock_queues();
+                               mutex_pause(0);
+                               vm_page_lock_queues();
+                       }
+               }
+
+               if (vm_upl_wait_for_pages < 0) {
+                       vm_upl_wait_for_pages = 0;
+               }
+
+               delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT + vm_upl_wait_for_pages;
+
+               if (delayed_unlock_limit > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX) {
+                       delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX;
+               }
+
+               vps_deal_with_secluded_page_overflow(&local_freeq, &local_freed);
+
+               assert(delayed_unlock);
+
+               /*
+                * maintain our balance
+                */
+               vm_page_balance_inactive(1);
+
+
+               /**********************************************************************
+               * above this point we're playing with the active and secluded queues
+               * below this point we're playing with the throttling mechanisms
+               * and the inactive queue
+               **********************************************************************/
+
+               if (vm_page_free_count + local_freed >= vm_page_free_target) {
+                       vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+
+                       vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
+                           VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
+                       /*
+                        * make sure the pageout I/O threads are running
+                        * throttled in case there are still requests
+                        * in the laundry... since we have met our targets
+                        * we don't need the laundry to be cleaned in a timely
+                        * fashion... so let's avoid interfering with foreground
+                        * activity
+                        */
+                       vm_pageout_adjust_eq_iothrottle(eq, TRUE);
+
+                       lck_mtx_lock(&vm_page_queue_free_lock);
+
+                       if ((vm_page_free_count >= vm_page_free_target) &&
+                           (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
+                               /*
+                                * done - we have met our target *and*
+                                * there is no one waiting for a page.
+                                */
+return_from_scan:
+                               assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
+
+                               VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_NONE,
+                                   vm_pageout_state.vm_pageout_inactive,
+                                   vm_pageout_state.vm_pageout_inactive_used, 0, 0);
+                               VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_END,
+                                   vm_pageout_vminfo.vm_pageout_freed_speculative,
+                                   vm_pageout_state.vm_pageout_inactive_clean,
+                                   vm_pageout_vminfo.vm_pageout_inactive_dirty_internal,
+                                   vm_pageout_vminfo.vm_pageout_inactive_dirty_external);
+
+                               return;
+                       }
+                       lck_mtx_unlock(&vm_page_queue_free_lock);
+               }
+
+               /*
+                * Before anything, we check if we have any ripe volatile
+                * objects around. If so, try to purge the first object.
+                * If the purge fails, fall through to reclaim a page instead.
+                * If the purge succeeds, go back to the top and reevalute
+                * the new memory situation.
+                */
+               retval = vps_purge_object();
+
+               if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
+                       /*
+                        * Success
+                        */
+                       if (object != NULL) {
+                               vm_object_unlock(object);
+                               object = NULL;
+                       }
+
+                       lock_yield_check = FALSE;
+                       continue;
+               }
+
+               /*
+                * If our 'aged' queue is empty and we have some speculative pages
+                * in the other queues, let's go through and see if we need to age
+                * them.
+                *
+                * If we succeeded in aging a speculative Q or just that everything
+                * looks normal w.r.t queue age and queue counts, we keep going onward.
+                *
+                * If, for some reason, we seem to have a mismatch between the spec.
+                * page count and the page queues, we reset those variables and
+                * restart the loop (LD TODO: Track this better?).
+                */
+               if (vm_page_queue_empty(&sq->age_q) && vm_page_speculative_count) {
+                       retval = vps_age_speculative_queue(force_speculative_aging);
+
+                       if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
+                               lock_yield_check = FALSE;
+                               continue;
+                       }
+               }
+               force_speculative_aging = FALSE;
+
+               /*
+                * Check to see if we need to evict objects from the cache.
+                *
+                * Note: 'object' here doesn't have anything to do with
+                * the eviction part. We just need to make sure we have dropped
+                * any object lock we might be holding if we need to go down
+                * into the eviction logic.
+                */
+               retval = vps_object_cache_evict(&object);
+
+               if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
+                       lock_yield_check = FALSE;
+                       continue;
+               }
+
+
+               /*
+                * Calculate our filecache_min that will affect the loop
+                * going forward.
+                */
+               vps_calculate_filecache_min();
+
+               /*
+                * LD TODO: Use a structure to hold all state variables for a single
+                * vm_pageout_scan iteration and pass that structure to this function instead.
+                */
+               retval = vps_flow_control(&flow_control, &anons_grabbed, &object,
+                   &delayed_unlock, &local_freeq, &local_freed,
+                   &vm_pageout_deadlock_target, inactive_burst_count);
+
+               if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
+                       if (loop_count >= vm_page_inactive_count) {
+                               loop_count = 0;
+                       }
+
+                       inactive_burst_count = 0;
+
+                       assert(object == NULL);
+                       assert(delayed_unlock != 0);
+
+                       lock_yield_check = FALSE;
+                       continue;
+               } else if (retval == VM_PAGEOUT_SCAN_DONE_RETURN) {
+                       goto return_from_scan;
+               }
+
+               flow_control.state = FCS_IDLE;
+
+               vm_pageout_inactive_external_forced_reactivate_limit = MIN((vm_page_active_count + vm_page_inactive_count),
+                   vm_pageout_inactive_external_forced_reactivate_limit);
+               loop_count++;
+               inactive_burst_count++;
+               vm_pageout_state.vm_pageout_inactive++;
+
+               /*
+                * Choose a victim.
+                */
+
+               m = NULL;
+               retval = vps_choose_victim_page(&m, &anons_grabbed, &grab_anonymous, force_anonymous, &page_from_bg_q, &reactivated_this_call);
+
+               if (m == NULL) {
+                       if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
+                               inactive_burst_count = 0;
+
+                               if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
+                                       VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
+                               }
+
+                               lock_yield_check = TRUE;
+                               continue;
+                       }
+
+                       /*
+                        * if we've gotten here, we have no victim page.
+                        * check to see if we've not finished balancing the queues
+                        * or we have a page on the aged speculative queue that we
+                        * skipped due to force_anonymous == TRUE.. or we have
+                        * speculative  pages that we can prematurely age... if
+                        * one of these cases we'll keep going, else panic
+                        */
+                       force_anonymous = FALSE;
+                       VM_PAGEOUT_DEBUG(vm_pageout_no_victim, 1);
+
+                       if (!vm_page_queue_empty(&sq->age_q)) {
+                               lock_yield_check = TRUE;
+                               continue;
+                       }
+
+                       if (vm_page_speculative_count) {
+                               force_speculative_aging = TRUE;
+                               lock_yield_check = TRUE;
+                               continue;
+                       }
+                       panic("vm_pageout: no victim");
+
+                       /* NOTREACHED */
+               }
+
+               assert(VM_PAGE_PAGEABLE(m));
+               m_object = VM_PAGE_OBJECT(m);
+               force_anonymous = FALSE;
+
+               page_prev_q_state = m->vmp_q_state;
+               /*
+                * we just found this page on one of our queues...
+                * it can't also be on the pageout queue, so safe
+                * to call vm_page_queues_remove
+                */
+               vm_page_queues_remove(m, TRUE);
+
+               assert(!m->vmp_laundry);
+               assert(!m->vmp_private);
+               assert(!m->vmp_fictitious);
+               assert(m_object != kernel_object);
+               assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
+
+               vm_pageout_vminfo.vm_pageout_considered_page++;
+
+               DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
+
+               /*
+                * check to see if we currently are working
+                * with the same object... if so, we've
+                * already got the lock
+                */
+               if (m_object != object) {
+                       boolean_t avoid_anon_pages = (grab_anonymous == FALSE || anons_grabbed >= ANONS_GRABBED_LIMIT);
+
+                       /*
+                        * vps_switch_object() will always drop the 'object' lock first
+                        * and then try to acquire the 'm_object' lock. So 'object' has to point to
+                        * either 'm_object' or NULL.
+                        */
+                       retval = vps_switch_object(m, m_object, &object, page_prev_q_state, avoid_anon_pages, page_from_bg_q);
+
+                       if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
+                               lock_yield_check = TRUE;
+                               continue;
+                       }
+               }
+               assert(m_object == object);
+               assert(VM_PAGE_OBJECT(m) == m_object);
+
+               if (m->vmp_busy) {
+                       /*
+                        *      Somebody is already playing with this page.
+                        *      Put it back on the appropriate queue
+                        *
+                        */
+                       VM_PAGEOUT_DEBUG(vm_pageout_inactive_busy, 1);
+
+                       if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
+                               VM_PAGEOUT_DEBUG(vm_pageout_cleaned_busy, 1);
+                       }
+
+                       vps_requeue_page(m, page_prev_q_state, page_from_bg_q);
+
+                       lock_yield_check = TRUE;
+                       continue;
+               }
+
+               /*
+                *   if (m->vmp_cleaning && !m->vmp_free_when_done)
+                *      If already cleaning this page in place
+                *      just leave if off the paging queues.
+                *      We can leave the page mapped, and upl_commit_range
+                *      will put it on the clean queue.
+                *
+                *   if (m->vmp_free_when_done && !m->vmp_cleaning)
+                *      an msync INVALIDATE is in progress...
+                *      this page has been marked for destruction
+                *      after it has been cleaned,
+                *      but not yet gathered into a UPL
+                *      where 'cleaning' will be set...
+                *      just leave it off the paging queues
+                *
+                *   if (m->vmp_free_when_done && m->vmp_clenaing)
+                *      an msync INVALIDATE is in progress
+                *      and the UPL has already gathered this page...
+                *      just leave it off the paging queues
+                */
+               if (m->vmp_free_when_done || m->vmp_cleaning) {
+                       lock_yield_check = TRUE;
+                       continue;
+               }
+
+
+               /*
+                *      If it's absent, in error or the object is no longer alive,
+                *      we can reclaim the page... in the no longer alive case,
+                *      there are 2 states the page can be in that preclude us
+                *      from reclaiming it - busy or cleaning - that we've already
+                *      dealt with
+                */
+               if (m->vmp_absent || m->vmp_error || !object->alive) {
+                       if (m->vmp_absent) {
+                               VM_PAGEOUT_DEBUG(vm_pageout_inactive_absent, 1);
+                       } else if (!object->alive) {
+                               VM_PAGEOUT_DEBUG(vm_pageout_inactive_notalive, 1);
+                       } else {
+                               VM_PAGEOUT_DEBUG(vm_pageout_inactive_error, 1);
+                       }
+reclaim_page:
+                       if (vm_pageout_deadlock_target) {
+                               VM_PAGEOUT_DEBUG(vm_pageout_scan_inactive_throttle_success, 1);
+                               vm_pageout_deadlock_target--;
+                       }
+
+                       DTRACE_VM2(dfree, int, 1, (uint64_t *), NULL);
+
+                       if (object->internal) {
+                               DTRACE_VM2(anonfree, int, 1, (uint64_t *), NULL);
+                       } else {
+                               DTRACE_VM2(fsfree, int, 1, (uint64_t *), NULL);
+                       }
+                       assert(!m->vmp_cleaning);
+                       assert(!m->vmp_laundry);
+
+                       if (!object->internal &&
+                           object->pager != NULL &&
+                           object->pager->mo_pager_ops == &shared_region_pager_ops) {
+                               shared_region_pager_reclaimed++;
+                       }
+
+                       m->vmp_busy = TRUE;
+
+                       /*
+                        * remove page from object here since we're already
+                        * behind the object lock... defer the rest of the work
+                        * we'd normally do in vm_page_free_prepare_object
+                        * until 'vm_page_free_list' is called
+                        */
+                       if (m->vmp_tabled) {
+                               vm_page_remove(m, TRUE);
+                       }
+
+                       assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
+                       m->vmp_snext = local_freeq;
+                       local_freeq = m;
+                       local_freed++;
+
+                       if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
+                               vm_pageout_vminfo.vm_pageout_freed_speculative++;
+                       } else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
+                               vm_pageout_vminfo.vm_pageout_freed_cleaned++;
+                       } else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q) {
+                               vm_pageout_vminfo.vm_pageout_freed_internal++;
+                       } else {
+                               vm_pageout_vminfo.vm_pageout_freed_external++;
+                       }
+
+                       inactive_burst_count = 0;
+
+                       lock_yield_check = TRUE;
+                       continue;
+               }
+               if (object->copy == VM_OBJECT_NULL) {
+                       /*
+                        * No one else can have any interest in this page.
+                        * If this is an empty purgable object, the page can be
+                        * reclaimed even if dirty.
+                        * If the page belongs to a volatile purgable object, we
+                        * reactivate it if the compressor isn't active.
+                        */
+                       if (object->purgable == VM_PURGABLE_EMPTY) {
+                               if (m->vmp_pmapped == TRUE) {
+                                       /* unmap the page */
+                                       refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
+                                       if (refmod_state & VM_MEM_MODIFIED) {
+                                               SET_PAGE_DIRTY(m, FALSE);
+                                       }
+                               }
+                               if (m->vmp_dirty || m->vmp_precious) {
+                                       /* we saved the cost of cleaning this page ! */
+                                       vm_page_purged_count++;
+                               }
+                               goto reclaim_page;
+                       }
+
+                       if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
+                               /*
+                                * With the VM compressor, the cost of
+                                * reclaiming a page is much lower (no I/O),
+                                * so if we find a "volatile" page, it's better
+                                * to let it get compressed rather than letting
+                                * it occupy a full page until it gets purged.
+                                * So no need to check for "volatile" here.
+                                */
+                       } else if (object->purgable == VM_PURGABLE_VOLATILE) {
+                               /*
+                                * Avoid cleaning a "volatile" page which might
+                                * be purged soon.
+                                */
+
+                               /* if it's wired, we can't put it on our queue */
+                               assert(!VM_PAGE_WIRED(m));
+
+                               /* just stick it back on! */
+                               reactivated_this_call++;
+
+                               if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
+                                       VM_PAGEOUT_DEBUG(vm_pageout_cleaned_volatile_reactivated, 1);
+                               }
+
+                               goto reactivate_page;
+                       }
+               }
+               /*
+                *      If it's being used, reactivate.
+                *      (Fictitious pages are either busy or absent.)
+                *      First, update the reference and dirty bits
+                *      to make sure the page is unreferenced.
+                */
+               refmod_state = -1;
+
+               if (m->vmp_reference == FALSE && m->vmp_pmapped == TRUE) {
+                       refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
+
+                       if (refmod_state & VM_MEM_REFERENCED) {
+                               m->vmp_reference = TRUE;
+                       }
+                       if (refmod_state & VM_MEM_MODIFIED) {
+                               SET_PAGE_DIRTY(m, FALSE);
+                       }
+               }
+
+               if (m->vmp_reference || m->vmp_dirty) {
+                       /* deal with a rogue "reusable" page */
+                       VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, m_object);
+               }
+
+               if (vm_pageout_state.vm_page_xpmapped_min_divisor == 0) {
+                       vm_pageout_state.vm_page_xpmapped_min = 0;
+               } else {
+                       vm_pageout_state.vm_page_xpmapped_min = (vm_page_external_count * 10) / vm_pageout_state.vm_page_xpmapped_min_divisor;
+               }
+
+               if (!m->vmp_no_cache &&
+                   page_from_bg_q == FALSE &&
+                   (m->vmp_reference || (m->vmp_xpmapped && !object->internal &&
+                   (vm_page_xpmapped_external_count < vm_pageout_state.vm_page_xpmapped_min)))) {
+                       /*
+                        * The page we pulled off the inactive list has
+                        * been referenced.  It is possible for other
+                        * processors to be touching pages faster than we
+                        * can clear the referenced bit and traverse the
                         * inactive queue, so we limit the number of
                         * reactivations.
                         */
-                       if (++reactivated_this_call >= reactivate_limit) {
-                               vm_pageout_reactivation_limit_exceeded++;
-                       } else if (catch_up_count) {
-                               vm_pageout_catch_ups++;
-                       } else if (++inactive_reclaim_run >= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM) {
-                               vm_pageout_inactive_force_reclaim++;
-                       } else {
-                               /*
-                                * The page was being used, so put back on active list.
+                       if (++reactivated_this_call >= reactivate_limit) {
+                               vm_pageout_vminfo.vm_pageout_reactivation_limit_exceeded++;
+                       } else if (++inactive_reclaim_run >= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM) {
+                               vm_pageout_vminfo.vm_pageout_inactive_force_reclaim++;
+                       } else {
+                               uint32_t isinuse;
+
+                               if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
+                                       VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reference_reactivated, 1);
+                               }
+
+                               vm_pageout_vminfo.vm_pageout_inactive_referenced++;
+reactivate_page:
+                               if (!object->internal && object->pager != MEMORY_OBJECT_NULL &&
+                                   vnode_pager_get_isinuse(object->pager, &isinuse) == KERN_SUCCESS && !isinuse) {
+                                       /*
+                                        * no explict mappings of this object exist
+                                        * and it's not open via the filesystem
+                                        */
+                                       vm_page_deactivate(m);
+                                       VM_PAGEOUT_DEBUG(vm_pageout_inactive_deactivated, 1);
+                               } else {
+                                       /*
+                                        * The page was/is being used, so put back on active list.
+                                        */
+                                       vm_page_activate(m);
+                                       VM_STAT_INCR(reactivations);
+                                       inactive_burst_count = 0;
+                               }
+#if CONFIG_BACKGROUND_QUEUE
+#if DEVELOPMENT || DEBUG
+                               if (page_from_bg_q == TRUE) {
+                                       if (m_object->internal) {
+                                               vm_pageout_rejected_bq_internal++;
+                                       } else {
+                                               vm_pageout_rejected_bq_external++;
+                                       }
+                               }
+#endif /* DEVELOPMENT || DEBUG */
+#endif /* CONFIG_BACKGROUND_QUEUE */
+
+                               if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
+                                       VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
+                               }
+                               vm_pageout_state.vm_pageout_inactive_used++;
+
+                               lock_yield_check = TRUE;
+                               continue;
+                       }
+                       /*
+                        * Make sure we call pmap_get_refmod() if it
+                        * wasn't already called just above, to update
+                        * the dirty bit.
+                        */
+                       if ((refmod_state == -1) && !m->vmp_dirty && m->vmp_pmapped) {
+                               refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
+                               if (refmod_state & VM_MEM_MODIFIED) {
+                                       SET_PAGE_DIRTY(m, FALSE);
+                               }
+                       }
+               }
+
+               /*
+                * we've got a candidate page to steal...
+                *
+                * m->vmp_dirty is up to date courtesy of the
+                * preceding check for m->vmp_reference... if
+                * we get here, then m->vmp_reference had to be
+                * FALSE (or possibly "reactivate_limit" was
+                * exceeded), but in either case we called
+                * pmap_get_refmod() and updated both
+                * m->vmp_reference and m->vmp_dirty
+                *
+                * if it's dirty or precious we need to
+                * see if the target queue is throtttled
+                * it if is, we need to skip over it by moving it back
+                * to the end of the inactive queue
+                */
+
+               inactive_throttled = FALSE;
+
+               if (m->vmp_dirty || m->vmp_precious) {
+                       if (object->internal) {
+                               if (VM_PAGE_Q_THROTTLED(iq)) {
+                                       inactive_throttled = TRUE;
+                               }
+                       } else if (VM_PAGE_Q_THROTTLED(eq)) {
+                               inactive_throttled = TRUE;
+                       }
+               }
+throttle_inactive:
+               if (!VM_DYNAMIC_PAGING_ENABLED() &&
+                   object->internal && m->vmp_dirty &&
+                   (object->purgable == VM_PURGABLE_DENY ||
+                   object->purgable == VM_PURGABLE_NONVOLATILE ||
+                   object->purgable == VM_PURGABLE_VOLATILE)) {
+                       vm_page_check_pageable_safe(m);
+                       assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
+                       vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
+                       m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
+                       vm_page_throttled_count++;
+
+                       VM_PAGEOUT_DEBUG(vm_pageout_scan_reclaimed_throttled, 1);
+
+                       inactive_burst_count = 0;
+
+                       lock_yield_check = TRUE;
+                       continue;
+               }
+               if (inactive_throttled == TRUE) {
+                       vps_deal_with_throttled_queues(m, &object, &vm_pageout_inactive_external_forced_reactivate_limit,
+                           &delayed_unlock, &force_anonymous, page_from_bg_q);
+
+                       inactive_burst_count = 0;
+
+                       if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
+                               VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
+                       }
+
+                       lock_yield_check = TRUE;
+                       continue;
+               }
+
+               /*
+                * we've got a page that we can steal...
+                * eliminate all mappings and make sure
+                * we have the up-to-date modified state
+                *
+                * if we need to do a pmap_disconnect then we
+                * need to re-evaluate m->vmp_dirty since the pmap_disconnect
+                * provides the true state atomically... the
+                * page was still mapped up to the pmap_disconnect
+                * and may have been dirtied at the last microsecond
+                *
+                * Note that if 'pmapped' is FALSE then the page is not
+                * and has not been in any map, so there is no point calling
+                * pmap_disconnect().  m->vmp_dirty could have been set in anticipation
+                * of likely usage of the page.
+                */
+               if (m->vmp_pmapped == TRUE) {
+                       int pmap_options;
+
+                       /*
+                        * Don't count this page as going into the compressor
+                        * if any of these are true:
+                        * 1) compressed pager isn't enabled
+                        * 2) Freezer enabled device with compressed pager
+                        *    backend (exclusive use) i.e. most of the VM system
+                        *    (including vm_pageout_scan) has no knowledge of
+                        *    the compressor
+                        * 3) This page belongs to a file and hence will not be
+                        *    sent into the compressor
+                        */
+                       if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE ||
+                           object->internal == FALSE) {
+                               pmap_options = 0;
+                       } else if (m->vmp_dirty || m->vmp_precious) {
+                               /*
+                                * VM knows that this page is dirty (or
+                                * precious) and needs to be compressed
+                                * rather than freed.
+                                * Tell the pmap layer to count this page
+                                * as "compressed".
+                                */
+                               pmap_options = PMAP_OPTIONS_COMPRESSOR;
+                       } else {
+                               /*
+                                * VM does not know if the page needs to
+                                * be preserved but the pmap layer might tell
+                                * us if any mapping has "modified" it.
+                                * Let's the pmap layer to count this page
+                                * as compressed if and only if it has been
+                                * modified.
+                                */
+                               pmap_options =
+                                   PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
+                       }
+                       refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m),
+                           pmap_options,
+                           NULL);
+                       if (refmod_state & VM_MEM_MODIFIED) {
+                               SET_PAGE_DIRTY(m, FALSE);
+                       }
+               }
+
+               /*
+                * reset our count of pages that have been reclaimed
+                * since the last page was 'stolen'
+                */
+               inactive_reclaim_run = 0;
+
+               /*
+                *      If it's clean and not precious, we can free the page.
+                */
+               if (!m->vmp_dirty && !m->vmp_precious) {
+                       vm_pageout_state.vm_pageout_inactive_clean++;
+
+                       /*
+                        * OK, at this point we have found a page we are going to free.
+                        */
+#if CONFIG_PHANTOM_CACHE
+                       if (!object->internal) {
+                               vm_phantom_cache_add_ghost(m);
+                       }
+#endif
+                       goto reclaim_page;
+               }
+
+               /*
+                * The page may have been dirtied since the last check
+                * for a throttled target queue (which may have been skipped
+                * if the page was clean then).  With the dirty page
+                * disconnected here, we can make one final check.
+                */
+               if (object->internal) {
+                       if (VM_PAGE_Q_THROTTLED(iq)) {
+                               inactive_throttled = TRUE;
+                       }
+               } else if (VM_PAGE_Q_THROTTLED(eq)) {
+                       inactive_throttled = TRUE;
+               }
+
+               if (inactive_throttled == TRUE) {
+                       goto throttle_inactive;
+               }
+
+#if VM_PRESSURE_EVENTS
+#if CONFIG_JETSAM
+
+               /*
+                * If Jetsam is enabled, then the sending
+                * of memory pressure notifications is handled
+                * from the same thread that takes care of high-water
+                * and other jetsams i.e. the memorystatus_thread.
+                */
+
+#else /* CONFIG_JETSAM */
+
+               vm_pressure_response();
+
+#endif /* CONFIG_JETSAM */
+#endif /* VM_PRESSURE_EVENTS */
+
+               if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
+                       VM_PAGEOUT_DEBUG(vm_pageout_speculative_dirty, 1);
+               }
+
+               if (object->internal) {
+                       vm_pageout_vminfo.vm_pageout_inactive_dirty_internal++;
+               } else {
+                       vm_pageout_vminfo.vm_pageout_inactive_dirty_external++;
+               }
+
+               /*
+                * internal pages will go to the compressor...
+                * external pages will go to the appropriate pager to be cleaned
+                * and upon completion will end up on 'vm_page_queue_cleaned' which
+                * is a preferred queue to steal from
+                */
+               vm_pageout_cluster(m);
+               inactive_burst_count = 0;
+
+               /*
+                * back to top of pageout scan loop
+                */
+       }
+}
+
+
+void
+vm_page_free_reserve(
+       int pages)
+{
+       int             free_after_reserve;
+
+       if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
+               if ((vm_page_free_reserved + pages + COMPRESSOR_FREE_RESERVED_LIMIT) >= (VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT)) {
+                       vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT;
+               } else {
+                       vm_page_free_reserved += (pages + COMPRESSOR_FREE_RESERVED_LIMIT);
+               }
+       } else {
+               if ((vm_page_free_reserved + pages) >= VM_PAGE_FREE_RESERVED_LIMIT) {
+                       vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT;
+               } else {
+                       vm_page_free_reserved += pages;
+               }
+       }
+       free_after_reserve = vm_pageout_state.vm_page_free_count_init - vm_page_free_reserved;
+
+       vm_page_free_min = vm_page_free_reserved +
+           VM_PAGE_FREE_MIN(free_after_reserve);
+
+       if (vm_page_free_min > VM_PAGE_FREE_MIN_LIMIT) {
+               vm_page_free_min = VM_PAGE_FREE_MIN_LIMIT;
+       }
+
+       vm_page_free_target = vm_page_free_reserved +
+           VM_PAGE_FREE_TARGET(free_after_reserve);
+
+       if (vm_page_free_target > VM_PAGE_FREE_TARGET_LIMIT) {
+               vm_page_free_target = VM_PAGE_FREE_TARGET_LIMIT;
+       }
+
+       if (vm_page_free_target < vm_page_free_min + 5) {
+               vm_page_free_target = vm_page_free_min + 5;
+       }
+
+       vm_page_throttle_limit = vm_page_free_target - (vm_page_free_target / 2);
+}
+
+/*
+ *     vm_pageout is the high level pageout daemon.
+ */
+
+void
+vm_pageout_continue(void)
+{
+       DTRACE_VM2(pgrrun, int, 1, (uint64_t *), NULL);
+       VM_PAGEOUT_DEBUG(vm_pageout_scan_event_counter, 1);
+
+       lck_mtx_lock(&vm_page_queue_free_lock);
+       vm_pageout_running = TRUE;
+       lck_mtx_unlock(&vm_page_queue_free_lock);
+
+       vm_pageout_scan();
+       /*
+        * we hold both the vm_page_queue_free_lock
+        * and the vm_page_queues_lock at this point
+        */
+       assert(vm_page_free_wanted == 0);
+       assert(vm_page_free_wanted_privileged == 0);
+       assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT);
+
+       vm_pageout_running = FALSE;
+#if !CONFIG_EMBEDDED
+       if (vm_pageout_waiter) {
+               vm_pageout_waiter = FALSE;
+               thread_wakeup((event_t)&vm_pageout_waiter);
+       }
+#endif /* !CONFIG_EMBEDDED */
+
+       lck_mtx_unlock(&vm_page_queue_free_lock);
+       vm_page_unlock_queues();
+
+       counter(c_vm_pageout_block++);
+       thread_block((thread_continue_t)vm_pageout_continue);
+       /*NOTREACHED*/
+}
+
+#if !CONFIG_EMBEDDED
+kern_return_t
+vm_pageout_wait(uint64_t deadline)
+{
+       kern_return_t kr;
+
+       lck_mtx_lock(&vm_page_queue_free_lock);
+       for (kr = KERN_SUCCESS; vm_pageout_running && (KERN_SUCCESS == kr);) {
+               vm_pageout_waiter = TRUE;
+               if (THREAD_AWAKENED != lck_mtx_sleep_deadline(
+                           &vm_page_queue_free_lock, LCK_SLEEP_DEFAULT,
+                           (event_t) &vm_pageout_waiter, THREAD_UNINT, deadline)) {
+                       kr = KERN_OPERATION_TIMED_OUT;
+               }
+       }
+       lck_mtx_unlock(&vm_page_queue_free_lock);
+
+       return kr;
+}
+#endif /* !CONFIG_EMBEDDED */
+
+
+static void
+vm_pageout_iothread_external_continue(struct vm_pageout_queue *q)
+{
+       vm_page_t       m = NULL;
+       vm_object_t     object;
+       vm_object_offset_t offset;
+       memory_object_t pager;
+
+       /* On systems with a compressor, the external IO thread clears its
+        * VM privileged bit to accommodate large allocations (e.g. bulk UPL
+        * creation)
+        */
+       if (vm_pageout_state.vm_pageout_internal_iothread != THREAD_NULL) {
+               current_thread()->options &= ~TH_OPT_VMPRIV;
+       }
+
+       vm_page_lockspin_queues();
+
+       while (!vm_page_queue_empty(&q->pgo_pending)) {
+               q->pgo_busy = TRUE;
+               vm_page_queue_remove_first(&q->pgo_pending, m, vmp_pageq);
+
+               assert(m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q);
+               VM_PAGE_CHECK(m);
+               /*
+                * grab a snapshot of the object and offset this
+                * page is tabled in so that we can relookup this
+                * page after we've taken the object lock - these
+                * fields are stable while we hold the page queues lock
+                * but as soon as we drop it, there is nothing to keep
+                * this page in this object... we hold an activity_in_progress
+                * on this object which will keep it from terminating
+                */
+               object = VM_PAGE_OBJECT(m);
+               offset = m->vmp_offset;
+
+               m->vmp_q_state = VM_PAGE_NOT_ON_Q;
+               VM_PAGE_ZERO_PAGEQ_ENTRY(m);
+
+               vm_page_unlock_queues();
+
+               vm_object_lock(object);
+
+               m = vm_page_lookup(object, offset);
+
+               if (m == NULL || m->vmp_busy || m->vmp_cleaning ||
+                   !m->vmp_laundry || (m->vmp_q_state != VM_PAGE_NOT_ON_Q)) {
+                       /*
+                        * it's either the same page that someone else has
+                        * started cleaning (or it's finished cleaning or
+                        * been put back on the pageout queue), or
+                        * the page has been freed or we have found a
+                        * new page at this offset... in all of these cases
+                        * we merely need to release the activity_in_progress
+                        * we took when we put the page on the pageout queue
+                        */
+                       vm_object_activity_end(object);
+                       vm_object_unlock(object);
+
+                       vm_page_lockspin_queues();
+                       continue;
+               }
+               pager = object->pager;
+
+               if (pager == MEMORY_OBJECT_NULL) {
+                       /*
+                        * This pager has been destroyed by either
+                        * memory_object_destroy or vm_object_destroy, and
+                        * so there is nowhere for the page to go.
+                        */
+                       if (m->vmp_free_when_done) {
+                               /*
+                                * Just free the page... VM_PAGE_FREE takes
+                                * care of cleaning up all the state...
+                                * including doing the vm_pageout_throttle_up
                                 */
-reactivate_page:
+                               VM_PAGE_FREE(m);
+                       } else {
+                               vm_page_lockspin_queues();
+
+                               vm_pageout_throttle_up(m);
                                vm_page_activate(m);
-                               VM_STAT_INCR(reactivations);
 
-                               vm_pageout_inactive_used++;
-                               inactive_burst_count = 0;
+                               vm_page_unlock_queues();
 
-                                goto done_with_inactivepage;
+                               /*
+                                *      And we are done with it.
+                                */
                        }
-                       /* 
-                        * Make sure we call pmap_get_refmod() if it
-                        * wasn't already called just above, to update
-                        * the dirty bit.
-                        */
-                       if ((refmod_state == -1) && !m->dirty && m->pmapped) {
-                               refmod_state = pmap_get_refmod(m->phys_page);
-                               if (refmod_state & VM_MEM_MODIFIED)
-                                       m->dirty = TRUE;
+                       vm_object_activity_end(object);
+                       vm_object_unlock(object);
+
+                       vm_page_lockspin_queues();
+                       continue;
+               }
+#if 0
+               /*
+                * we don't hold the page queue lock
+                * so this check isn't safe to make
+                */
+               VM_PAGE_CHECK(m);
+#endif
+               /*
+                * give back the activity_in_progress reference we
+                * took when we queued up this page and replace it
+                * it with a paging_in_progress reference that will
+                * also hold the paging offset from changing and
+                * prevent the object from terminating
+                */
+               vm_object_activity_end(object);
+               vm_object_paging_begin(object);
+               vm_object_unlock(object);
+
+               /*
+                * Send the data to the pager.
+                * any pageout clustering happens there
+                */
+               memory_object_data_return(pager,
+                   m->vmp_offset + object->paging_offset,
+                   PAGE_SIZE,
+                   NULL,
+                   NULL,
+                   FALSE,
+                   FALSE,
+                   0);
+
+               vm_object_lock(object);
+               vm_object_paging_end(object);
+               vm_object_unlock(object);
+
+               vm_pageout_io_throttle();
+
+               vm_page_lockspin_queues();
+       }
+       q->pgo_busy = FALSE;
+       q->pgo_idle = TRUE;
+
+       assert_wait((event_t) &q->pgo_pending, THREAD_UNINT);
+       vm_page_unlock_queues();
+
+       thread_block_parameter((thread_continue_t)vm_pageout_iothread_external_continue, (void *) q);
+       /*NOTREACHED*/
+}
+
+
+#define         MAX_FREE_BATCH          32
+uint32_t vm_compressor_time_thread; /* Set via sysctl to record time accrued by
+                                     * this thread.
+                                     */
+
+
+void
+vm_pageout_iothread_internal_continue(struct cq *);
+void
+vm_pageout_iothread_internal_continue(struct cq *cq)
+{
+       struct vm_pageout_queue *q;
+       vm_page_t       m = NULL;
+       boolean_t       pgo_draining;
+       vm_page_t   local_q;
+       int         local_cnt;
+       vm_page_t   local_freeq = NULL;
+       int         local_freed = 0;
+       int         local_batch_size;
+#if DEVELOPMENT || DEBUG
+       int       ncomps = 0;
+       boolean_t marked_active = FALSE;
+#endif
+       KERNEL_DEBUG(0xe040000c | DBG_FUNC_END, 0, 0, 0, 0, 0);
+
+       q = cq->q;
+#if __AMP__
+       if (vm_compressor_ebound && (vm_pageout_state.vm_compressor_thread_count > 1)) {
+               local_batch_size = (q->pgo_maxlaundry >> 3);
+               local_batch_size = MAX(local_batch_size, 16);
+       } else {
+               local_batch_size = q->pgo_maxlaundry / (vm_pageout_state.vm_compressor_thread_count * 2);
+       }
+#else
+       local_batch_size = q->pgo_maxlaundry / (vm_pageout_state.vm_compressor_thread_count * 2);
+#endif
+
+#if RECORD_THE_COMPRESSED_DATA
+       if (q->pgo_laundry) {
+               c_compressed_record_init();
+       }
+#endif
+       while (TRUE) {
+               int     pages_left_on_q = 0;
+
+               local_cnt = 0;
+               local_q = NULL;
+
+               KERNEL_DEBUG(0xe0400014 | DBG_FUNC_START, 0, 0, 0, 0, 0);
+
+               vm_page_lock_queues();
+#if DEVELOPMENT || DEBUG
+               if (marked_active == FALSE) {
+                       vmct_active++;
+                       vmct_state[cq->id] = VMCT_ACTIVE;
+                       marked_active = TRUE;
+                       if (vmct_active == 1) {
+                               vm_compressor_epoch_start = mach_absolute_time();
                        }
-                       forced_reclaim = TRUE;
+               }
+#endif
+               KERNEL_DEBUG(0xe0400014 | DBG_FUNC_END, 0, 0, 0, 0, 0);
+
+               KERNEL_DEBUG(0xe0400018 | DBG_FUNC_START, q->pgo_laundry, 0, 0, 0, 0);
+
+               while (!vm_page_queue_empty(&q->pgo_pending) && local_cnt < local_batch_size) {
+                       vm_page_queue_remove_first(&q->pgo_pending, m, vmp_pageq);
+                       assert(m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q);
+                       VM_PAGE_CHECK(m);
+
+                       m->vmp_q_state = VM_PAGE_NOT_ON_Q;
+                       VM_PAGE_ZERO_PAGEQ_ENTRY(m);
+                       m->vmp_laundry = FALSE;
+
+                       m->vmp_snext = local_q;
+                       local_q = m;
+                       local_cnt++;
+               }
+               if (local_q == NULL) {
+                       break;
+               }
+
+               q->pgo_busy = TRUE;
+
+               if ((pgo_draining = q->pgo_draining) == FALSE) {
+                       vm_pageout_throttle_up_batch(q, local_cnt);
+                       pages_left_on_q = q->pgo_laundry;
                } else {
-                       forced_reclaim = FALSE;
+                       pages_left_on_q = q->pgo_laundry - local_cnt;
+               }
+
+               vm_page_unlock_queues();
+
+#if !RECORD_THE_COMPRESSED_DATA
+               if (pages_left_on_q >= local_batch_size && cq->id < (vm_pageout_state.vm_compressor_thread_count - 1)) {
+                       thread_wakeup((event_t) ((uintptr_t)&q->pgo_pending + cq->id + 1));
+               }
+#endif
+               KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, q->pgo_laundry, 0, 0, 0, 0);
+
+               while (local_q) {
+                       KERNEL_DEBUG(0xe0400024 | DBG_FUNC_START, local_cnt, 0, 0, 0, 0);
+
+                       m = local_q;
+                       local_q = m->vmp_snext;
+                       m->vmp_snext = NULL;
+
+                       if (vm_pageout_compress_page(&cq->current_chead, cq->scratch_buf, m) == KERN_SUCCESS) {
+#if DEVELOPMENT || DEBUG
+                               ncomps++;
+#endif
+                               KERNEL_DEBUG(0xe0400024 | DBG_FUNC_END, local_cnt, 0, 0, 0, 0);
+
+                               m->vmp_snext = local_freeq;
+                               local_freeq = m;
+                               local_freed++;
+
+                               if (local_freed >= MAX_FREE_BATCH) {
+                                       OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
+
+                                       vm_page_free_list(local_freeq, TRUE);
+
+                                       local_freeq = NULL;
+                                       local_freed = 0;
+                               }
+                       }
+#if !CONFIG_JETSAM
+                       while (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) {
+                               kern_return_t   wait_result;
+                               int             need_wakeup = 0;
+
+                               if (local_freeq) {
+                                       OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
+
+                                       vm_page_free_list(local_freeq, TRUE);
+                                       local_freeq = NULL;
+                                       local_freed = 0;
+
+                                       continue;
+                               }
+                               lck_mtx_lock_spin(&vm_page_queue_free_lock);
+
+                               if (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) {
+                                       if (vm_page_free_wanted_privileged++ == 0) {
+                                               need_wakeup = 1;
+                                       }
+                                       wait_result = assert_wait((event_t)&vm_page_free_wanted_privileged, THREAD_UNINT);
+
+                                       lck_mtx_unlock(&vm_page_queue_free_lock);
+
+                                       if (need_wakeup) {
+                                               thread_wakeup((event_t)&vm_page_free_wanted);
+                                       }
+
+                                       if (wait_result == THREAD_WAITING) {
+                                               thread_block(THREAD_CONTINUE_NULL);
+                                       }
+                               } else {
+                                       lck_mtx_unlock(&vm_page_queue_free_lock);
+                               }
+                       }
+#endif
                }
+               if (local_freeq) {
+                       OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
+
+                       vm_page_free_list(local_freeq, TRUE);
+                       local_freeq = NULL;
+                       local_freed = 0;
+               }
+               if (pgo_draining == TRUE) {
+                       vm_page_lockspin_queues();
+                       vm_pageout_throttle_up_batch(q, local_cnt);
+                       vm_page_unlock_queues();
+               }
+       }
+       KERNEL_DEBUG(0xe040000c | DBG_FUNC_START, 0, 0, 0, 0, 0);
+
+       /*
+        * queue lock is held and our q is empty
+        */
+       q->pgo_busy = FALSE;
+       q->pgo_idle = TRUE;
+
+       assert_wait((event_t) ((uintptr_t)&q->pgo_pending + cq->id), THREAD_UNINT);
+#if DEVELOPMENT || DEBUG
+       if (marked_active == TRUE) {
+               vmct_active--;
+               vmct_state[cq->id] = VMCT_IDLE;
+
+               if (vmct_active == 0) {
+                       vm_compressor_epoch_stop = mach_absolute_time();
+                       assertf(vm_compressor_epoch_stop >= vm_compressor_epoch_start,
+                           "Compressor epoch non-monotonic: 0x%llx -> 0x%llx",
+                           vm_compressor_epoch_start, vm_compressor_epoch_stop);
+                       /* This interval includes intervals where one or more
+                        * compressor threads were pre-empted
+                        */
+                       vmct_stats.vmct_cthreads_total += vm_compressor_epoch_stop - vm_compressor_epoch_start;
+               }
+       }
+#endif
+       vm_page_unlock_queues();
+#if DEVELOPMENT || DEBUG
+       if (__improbable(vm_compressor_time_thread)) {
+               vmct_stats.vmct_runtimes[cq->id] = thread_get_runtime_self();
+               vmct_stats.vmct_pages[cq->id] += ncomps;
+               vmct_stats.vmct_iterations[cq->id]++;
+               if (ncomps > vmct_stats.vmct_maxpages[cq->id]) {
+                       vmct_stats.vmct_maxpages[cq->id] = ncomps;
+               }
+               if (ncomps < vmct_stats.vmct_minpages[cq->id]) {
+                       vmct_stats.vmct_minpages[cq->id] = ncomps;
+               }
+       }
+#endif
+
+       KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, 0, 0, 0, 0, 0);
+
+       thread_block_parameter((thread_continue_t)vm_pageout_iothread_internal_continue, (void *) cq);
+       /*NOTREACHED*/
+}
+
+
+kern_return_t
+vm_pageout_compress_page(void **current_chead, char *scratch_buf, vm_page_t m)
+{
+       vm_object_t     object;
+       memory_object_t pager;
+       int             compressed_count_delta;
+       kern_return_t   retval;
+
+       object = VM_PAGE_OBJECT(m);
+
+       assert(!m->vmp_free_when_done);
+       assert(!m->vmp_laundry);
 
-                XPR(XPR_VM_PAGEOUT,
-                "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
-                (integer_t)object, (integer_t)m->offset, (integer_t)m, 0,0);
+       pager = object->pager;
+
+       if (!object->pager_initialized || pager == MEMORY_OBJECT_NULL) {
+               KERNEL_DEBUG(0xe0400010 | DBG_FUNC_START, object, pager, 0, 0, 0);
+
+               vm_object_lock(object);
 
                /*
-                * we've got a candidate page to steal...
-                *
-                * m->dirty is up to date courtesy of the
-                * preceding check for m->reference... if 
-                * we get here, then m->reference had to be
-                * FALSE (or possibly "reactivate_limit" was
-                 * exceeded), but in either case we called
-                 * pmap_get_refmod() and updated both
-                 * m->reference and m->dirty
-                *
-                * if it's dirty or precious we need to
-                * see if the target queue is throtttled
-                * it if is, we need to skip over it by moving it back
-                * to the end of the inactive queue
+                * If there is no memory object for the page, create
+                * one and hand it to the compression pager.
                 */
-               inactive_throttled = FALSE;
 
-               if (m->dirty || m->precious) {
-                       if (object->internal) {
-                               if (VM_PAGE_Q_THROTTLED(iq))
-                                       inactive_throttled = TRUE;
-                       } else if (VM_PAGE_Q_THROTTLED(eq)) {
-                               inactive_throttled = TRUE;
-                       }
+               if (!object->pager_initialized) {
+                       vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
                }
-               if (inactive_throttled == TRUE) {
-throttle_inactive:
-                       if (!IP_VALID(memory_manager_default) &&
-                               object->internal && 
-                               (object->purgable == VM_PURGABLE_DENY ||
-                                object->purgable == VM_PURGABLE_NONVOLATILE ||
-                                object->purgable == VM_PURGABLE_VOLATILE )) {
-                               queue_enter(&vm_page_queue_throttled, m,
-                                           vm_page_t, pageq);
-                               m->throttled = TRUE;
-                               vm_page_throttled_count++;
-                       } else {
-                               if (m->zero_fill) {
-                                       queue_enter(&vm_page_queue_zf, m,
-                                                   vm_page_t, pageq);
-                                       vm_zf_queue_count++;
-                               } else 
-                                       queue_enter(&vm_page_queue_inactive, m,
-                                                   vm_page_t, pageq);
-                               m->inactive = TRUE;
-                               if (!m->fictitious) {
-                                       vm_page_inactive_count++;
-                                       token_new_pagecount++;
-                               }
-                       }
-                       vm_pageout_scan_inactive_throttled++;
-                       goto done_with_inactivepage;
+               if (!object->pager_initialized) {
+                       vm_object_compressor_pager_create(object);
                }
 
-               /*
-                * we've got a page that we can steal...
-                * eliminate all mappings and make sure
-                * we have the up-to-date modified state
-                * first take the page BUSY, so that no new
-                * mappings can be made
-                */
-               m->busy = TRUE;
-               
-               /*
-                * if we need to do a pmap_disconnect then we
-                * need to re-evaluate m->dirty since the pmap_disconnect
-                * provides the true state atomically... the 
-                * page was still mapped up to the pmap_disconnect
-                * and may have been dirtied at the last microsecond
-                *
-                * we also check for the page being referenced 'late'
-                * if it was, we first need to do a WAKEUP_DONE on it
-                * since we already set m->busy = TRUE, before 
-                * going off to reactivate it
-                *
-                * Note that if 'pmapped' is FALSE then the page is not
-                * and has not been in any map, so there is no point calling
-                * pmap_disconnect().  m->dirty and/or m->reference could
-                * have been set in anticipation of likely usage of the page.
-                */
-               if (m->pmapped == TRUE) {
-                       refmod_state = pmap_disconnect(m->phys_page);
-
-                       if (refmod_state & VM_MEM_MODIFIED)
-                               m->dirty = TRUE;
-                       if (refmod_state & VM_MEM_REFERENCED) {
-                               
-                               /* If m->reference is already set, this page must have
-                                * already failed the reactivate_limit test, so don't
-                                * bump the counts twice.
-                                */
-                               if ( ! m->reference ) {
-                                       m->reference = TRUE;
-                                       if (forced_reclaim ||
-                                           ++reactivated_this_call >= reactivate_limit)
-                                               vm_pageout_reactivation_limit_exceeded++;
-                                       else {
-                                               PAGE_WAKEUP_DONE(m);
-                                               goto reactivate_page;
-                                       }
-                               }
-                       }
-               }
-               /*
-                * reset our count of pages that have been reclaimed 
-                * since the last page was 'stolen'
-                */
-               inactive_reclaim_run = 0;
+               pager = object->pager;
 
-               /*
-                *      If it's clean and not precious, we can free the page.
-                */
-               if (!m->dirty && !m->precious) {
-                       vm_pageout_inactive_clean++;
-                       goto reclaim_page;
+               if (!object->pager_initialized || pager == MEMORY_OBJECT_NULL) {
+                       /*
+                        * Still no pager for the object,
+                        * or the pager has been destroyed.
+                        * Reactivate the page.
+                        *
+                        * Should only happen if there is no
+                        * compression pager
+                        */
+                       PAGE_WAKEUP_DONE(m);
+
+                       vm_page_lockspin_queues();
+                       vm_page_activate(m);
+                       VM_PAGEOUT_DEBUG(vm_pageout_dirty_no_pager, 1);
+                       vm_page_unlock_queues();
+
+                       /*
+                        *      And we are done with it.
+                        */
+                       vm_object_activity_end(object);
+                       vm_object_unlock(object);
+
+                       return KERN_FAILURE;
                }
+               vm_object_unlock(object);
+
+               KERNEL_DEBUG(0xe0400010 | DBG_FUNC_END, object, pager, 0, 0, 0);
+       }
+       assert(object->pager_initialized && pager != MEMORY_OBJECT_NULL);
+       assert(object->activity_in_progress > 0);
+
+       retval = vm_compressor_pager_put(
+               pager,
+               m->vmp_offset + object->paging_offset,
+               VM_PAGE_GET_PHYS_PAGE(m),
+               current_chead,
+               scratch_buf,
+               &compressed_count_delta);
+
+       vm_object_lock(object);
 
+       assert(object->activity_in_progress > 0);
+       assert(VM_PAGE_OBJECT(m) == object);
+       assert( !VM_PAGE_WIRED(m));
+
+       vm_compressor_pager_count(pager,
+           compressed_count_delta,
+           FALSE,                       /* shared_lock */
+           object);
+
+       if (retval == KERN_SUCCESS) {
                /*
-                * The page may have been dirtied since the last check
-                * for a throttled target queue (which may have been skipped
-                * if the page was clean then).  With the dirty page
-                * disconnected here, we can make one final check.
+                * If the object is purgeable, its owner's
+                * purgeable ledgers will be updated in
+                * vm_page_remove() but the page still
+                * contributes to the owner's memory footprint,
+                * so account for it as such.
                 */
-               {
-                       boolean_t disconnect_throttled = FALSE;
-                       if (object->internal) {
-                               if (VM_PAGE_Q_THROTTLED(iq))
-                                       disconnect_throttled = TRUE;
-                       } else if (VM_PAGE_Q_THROTTLED(eq)) {
-                               disconnect_throttled = TRUE;
-                       }
+               if ((object->purgable != VM_PURGABLE_DENY ||
+                   object->vo_ledger_tag) &&
+                   object->vo_owner != NULL) {
+                       /* one more compressed purgeable/tagged page */
+                       vm_object_owner_compressed_update(object,
+                           +1);
+               }
+               VM_STAT_INCR(compressions);
 
-                       if (disconnect_throttled == TRUE) {
-                               PAGE_WAKEUP_DONE(m);
-                               goto throttle_inactive;
-                       }
+               if (m->vmp_tabled) {
+                       vm_page_remove(m, TRUE);
                }
+       } else {
+               PAGE_WAKEUP_DONE(m);
 
-               vm_pageout_cluster(m);
+               vm_page_lockspin_queues();
 
-               vm_pageout_inactive_dirty++;
+               vm_page_activate(m);
+               vm_pageout_vminfo.vm_compressor_failed++;
 
-               inactive_burst_count = 0;
+               vm_page_unlock_queues();
+       }
+       vm_object_activity_end(object);
+       vm_object_unlock(object);
 
-done_with_inactivepage:
-               if (delayed_unlock++ > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT || try_failed == TRUE) {
+       return retval;
+}
 
-                       if (object != NULL) {
-                               vm_object_unlock(object);
-                               object = NULL;
-                               vm_pageout_scan_wants_object = VM_OBJECT_NULL;
-                       }
-                       if (local_freeq) {
-                               vm_page_free_list(local_freeq);
-                               
-                               local_freeq = NULL;
-                               local_freed = 0;
-                       }
-                       mutex_yield(&vm_page_queue_lock);
 
-                       delayed_unlock = 1;
+static void
+vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue *eq, boolean_t req_lowpriority)
+{
+       uint32_t        policy;
+
+       if (hibernate_cleaning_in_progress == TRUE) {
+               req_lowpriority = FALSE;
+       }
+
+       if (eq->pgo_inited == TRUE && eq->pgo_lowpriority != req_lowpriority) {
+               vm_page_unlock_queues();
+
+               if (req_lowpriority == TRUE) {
+                       policy = THROTTLE_LEVEL_PAGEOUT_THROTTLED;
+                       DTRACE_VM(laundrythrottle);
+               } else {
+                       policy = THROTTLE_LEVEL_PAGEOUT_UNTHROTTLED;
+                       DTRACE_VM(laundryunthrottle);
                }
-               /*
-                * back to top of pageout scan loop
-                */
+               proc_set_thread_policy_with_tid(kernel_task, eq->pgo_tid,
+                   TASK_POLICY_EXTERNAL, TASK_POLICY_IO, policy);
+
+               vm_page_lock_queues();
+               eq->pgo_lowpriority = req_lowpriority;
        }
 }
 
 
-int vm_page_free_count_init;
-
-void
-vm_page_free_reserve(
-       int pages)
+static void
+vm_pageout_iothread_external(void)
 {
-       int             free_after_reserve;
+       thread_t        self = current_thread();
 
-       vm_page_free_reserved += pages;
+       self->options |= TH_OPT_VMPRIV;
 
-       free_after_reserve = vm_page_free_count_init - vm_page_free_reserved;
+       DTRACE_VM2(laundrythrottle, int, 1, (uint64_t *), NULL);
 
-       vm_page_free_min = vm_page_free_reserved +
-               VM_PAGE_FREE_MIN(free_after_reserve);
+       proc_set_thread_policy(self, TASK_POLICY_EXTERNAL,
+           TASK_POLICY_IO, THROTTLE_LEVEL_PAGEOUT_THROTTLED);
 
-       if (vm_page_free_min > VM_PAGE_FREE_MIN_LIMIT)
-               vm_page_free_min = VM_PAGE_FREE_MIN_LIMIT;
+       vm_page_lock_queues();
 
-       vm_page_free_target = vm_page_free_reserved +
-               VM_PAGE_FREE_TARGET(free_after_reserve);
+       vm_pageout_queue_external.pgo_tid = self->thread_id;
+       vm_pageout_queue_external.pgo_lowpriority = TRUE;
+       vm_pageout_queue_external.pgo_inited = TRUE;
 
-       if (vm_page_free_target > VM_PAGE_FREE_TARGET_LIMIT)
-               vm_page_free_target = VM_PAGE_FREE_TARGET_LIMIT;
+       vm_page_unlock_queues();
 
-       if (vm_page_free_target < vm_page_free_min + 5)
-               vm_page_free_target = vm_page_free_min + 5;
+       vm_pageout_iothread_external_continue(&vm_pageout_queue_external);
 
+       /*NOTREACHED*/
 }
 
-/*
- *     vm_pageout is the high level pageout daemon.
- */
 
-void
-vm_pageout_continue(void)
+static void
+vm_pageout_iothread_internal(struct cq *cq)
 {
-       DTRACE_VM2(pgrrun, int, 1, (uint64_t *), NULL);
-       vm_pageout_scan_event_counter++;
-       vm_pageout_scan();
-       /* we hold vm_page_queue_free_lock now */
-       assert(vm_page_free_wanted == 0);
-       assert(vm_page_free_wanted_privileged == 0);
-       assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT);
-       mutex_unlock(&vm_page_queue_free_lock);
+       thread_t        self = current_thread();
+
+       self->options |= TH_OPT_VMPRIV;
+
+       vm_page_lock_queues();
+
+       vm_pageout_queue_internal.pgo_tid = self->thread_id;
+       vm_pageout_queue_internal.pgo_lowpriority = TRUE;
+       vm_pageout_queue_internal.pgo_inited = TRUE;
+
+       vm_page_unlock_queues();
+
+       if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) {
+               thread_vm_bind_group_add();
+       }
+
+#if CONFIG_THREAD_GROUPS
+       thread_group_vm_add();
+#endif /* CONFIG_THREAD_GROUPS */
+
+#if __AMP__
+       if (vm_compressor_ebound) {
+               /*
+                * Use the soft bound option for vm_compressor to allow it to run on
+                * P-cores if E-cluster is unavailable.
+                */
+               thread_bind_cluster_type(self, 'E', true);
+       }
+#endif /* __AMP__ */
+
+       thread_set_thread_name(current_thread(), "VM_compressor");
+#if DEVELOPMENT || DEBUG
+       vmct_stats.vmct_minpages[cq->id] = INT32_MAX;
+#endif
+       vm_pageout_iothread_internal_continue(cq);
 
-       counter(c_vm_pageout_block++);
-       thread_block((thread_continue_t)vm_pageout_continue);
        /*NOTREACHED*/
 }
 
+kern_return_t
+vm_set_buffer_cleanup_callout(boolean_t (*func)(int))
+{
+       if (OSCompareAndSwapPtr(NULL, ptrauth_nop_cast(void *, func), (void * volatile *) &consider_buffer_cache_collect)) {
+               return KERN_SUCCESS;
+       } else {
+               return KERN_FAILURE; /* Already set */
+       }
+}
 
-/*
- * must be called with the
- * queues and object locks held
- */
-static void
-vm_pageout_queue_steal(vm_page_t m)
+extern boolean_t        memorystatus_manual_testing_on;
+extern unsigned int     memorystatus_level;
+
+
+#if VM_PRESSURE_EVENTS
+
+boolean_t vm_pressure_events_enabled = FALSE;
+
+void
+vm_pressure_response(void)
 {
-        struct vm_pageout_queue *q;
+       vm_pressure_level_t     old_level = kVMPressureNormal;
+       int                     new_level = -1;
+       unsigned int            total_pages;
+       uint64_t                available_memory = 0;
+
+       if (vm_pressure_events_enabled == FALSE) {
+               return;
+       }
 
-       if (m->object->internal == TRUE)
-               q = &vm_pageout_queue_internal;
-       else
-               q = &vm_pageout_queue_external;
+#if CONFIG_EMBEDDED
 
-       m->laundry = FALSE;
-       m->pageout_queue = FALSE;
-       queue_remove(&q->pgo_pending, m, vm_page_t, pageq);
+       available_memory = (uint64_t) memorystatus_available_pages;
 
-       m->pageq.next = NULL;
-       m->pageq.prev = NULL;
+#else /* CONFIG_EMBEDDED */
 
-       vm_object_paging_end(m->object);
+       available_memory = (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY;
+       memorystatus_available_pages = (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY;
 
-       q->pgo_laundry--;
-}
+#endif /* CONFIG_EMBEDDED */
 
+       total_pages = (unsigned int) atop_64(max_mem);
+#if CONFIG_SECLUDED_MEMORY
+       total_pages -= vm_page_secluded_count;
+#endif /* CONFIG_SECLUDED_MEMORY */
+       memorystatus_level = (unsigned int) ((available_memory * 100) / total_pages);
 
-#ifdef FAKE_DEADLOCK
+       if (memorystatus_manual_testing_on) {
+               return;
+       }
 
-#define FAKE_COUNT     5000
+       old_level = memorystatus_vm_pressure_level;
 
-int internal_count = 0;
-int fake_deadlock = 0;
+       switch (memorystatus_vm_pressure_level) {
+       case kVMPressureNormal:
+       {
+               if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
+                       new_level = kVMPressureCritical;
+               } else if (VM_PRESSURE_NORMAL_TO_WARNING()) {
+                       new_level = kVMPressureWarning;
+               }
+               break;
+       }
 
-#endif
+       case kVMPressureWarning:
+       case kVMPressureUrgent:
+       {
+               if (VM_PRESSURE_WARNING_TO_NORMAL()) {
+                       new_level = kVMPressureNormal;
+               } else if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
+                       new_level = kVMPressureCritical;
+               }
+               break;
+       }
 
-static void
-vm_pageout_iothread_continue(struct vm_pageout_queue *q)
+       case kVMPressureCritical:
+       {
+               if (VM_PRESSURE_WARNING_TO_NORMAL()) {
+                       new_level = kVMPressureNormal;
+               } else if (VM_PRESSURE_CRITICAL_TO_WARNING()) {
+                       new_level = kVMPressureWarning;
+               }
+               break;
+       }
+
+       default:
+               return;
+       }
+
+       if (new_level != -1) {
+               memorystatus_vm_pressure_level = (vm_pressure_level_t) new_level;
+
+               if (new_level != (int) old_level) {
+                       VM_DEBUG_CONSTANT_EVENT(vm_pressure_level_change, VM_PRESSURE_LEVEL_CHANGE, DBG_FUNC_NONE,
+                           new_level, old_level, 0, 0);
+               }
+
+               if ((memorystatus_vm_pressure_level != kVMPressureNormal) || (old_level != memorystatus_vm_pressure_level)) {
+                       if (vm_pageout_state.vm_pressure_thread_running == FALSE) {
+                               thread_wakeup(&vm_pressure_thread);
+                       }
+
+                       if (old_level != memorystatus_vm_pressure_level) {
+                               thread_wakeup(&vm_pageout_state.vm_pressure_changed);
+                       }
+               }
+       }
+}
+#endif /* VM_PRESSURE_EVENTS */
+
+/*
+ * Function called by a kernel thread to either get the current pressure level or
+ * wait until memory pressure changes from a given level.
+ */
+kern_return_t
+mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure, __unused unsigned int *pressure_level)
 {
-       vm_page_t       m = NULL;
-       vm_object_t     object;
-       boolean_t       need_wakeup;
-       memory_object_t pager;
-       thread_t        self = current_thread();
+#if !VM_PRESSURE_EVENTS
 
-       if ((vm_pageout_internal_iothread != THREAD_NULL)
-           && (self == vm_pageout_external_iothread )
-           && (self->options & TH_OPT_VMPRIV))
-               self->options &= ~TH_OPT_VMPRIV;
+       return KERN_FAILURE;
 
-       vm_page_lockspin_queues();
+#else /* VM_PRESSURE_EVENTS */
 
-        while ( !queue_empty(&q->pgo_pending) ) {
+       wait_result_t       wr = 0;
+       vm_pressure_level_t old_level = memorystatus_vm_pressure_level;
 
-                  q->pgo_busy = TRUE;
-                  queue_remove_first(&q->pgo_pending, m, vm_page_t, pageq);
-                  m->pageout_queue = FALSE;
-                  vm_page_unlock_queues();
+       if (pressure_level == NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
-                  m->pageq.next = NULL;
-                  m->pageq.prev = NULL;
-#ifdef FAKE_DEADLOCK
-                  if (q == &vm_pageout_queue_internal) {
-                          vm_offset_t addr;
-                          int  pg_count;
+       if (*pressure_level == kVMPressureJetsam) {
+               if (!wait_for_pressure) {
+                       return KERN_INVALID_ARGUMENT;
+               }
 
-                          internal_count++;
+               lck_mtx_lock(&memorystatus_jetsam_fg_band_lock);
+               wr = assert_wait((event_t)&memorystatus_jetsam_fg_band_waiters,
+                   THREAD_INTERRUPTIBLE);
+               if (wr == THREAD_WAITING) {
+                       ++memorystatus_jetsam_fg_band_waiters;
+                       lck_mtx_unlock(&memorystatus_jetsam_fg_band_lock);
+                       wr = thread_block(THREAD_CONTINUE_NULL);
+               } else {
+                       lck_mtx_unlock(&memorystatus_jetsam_fg_band_lock);
+               }
+               if (wr != THREAD_AWAKENED) {
+                       return KERN_ABORTED;
+               }
+               *pressure_level = kVMPressureJetsam;
+               return KERN_SUCCESS;
+       }
 
-                          if ((internal_count == FAKE_COUNT)) {
+       if (wait_for_pressure == TRUE) {
+               while (old_level == *pressure_level) {
+                       wr = assert_wait((event_t) &vm_pageout_state.vm_pressure_changed,
+                           THREAD_INTERRUPTIBLE);
+                       if (wr == THREAD_WAITING) {
+                               wr = thread_block(THREAD_CONTINUE_NULL);
+                       }
+                       if (wr == THREAD_INTERRUPTED) {
+                               return KERN_ABORTED;
+                       }
 
-                                  pg_count = vm_page_free_count + vm_page_free_reserved;
+                       if (wr == THREAD_AWAKENED) {
+                               old_level = memorystatus_vm_pressure_level;
+                       }
+               }
+       }
 
-                                  if (kmem_alloc(kernel_map, &addr, PAGE_SIZE * pg_count) == KERN_SUCCESS) {
-                                          kmem_free(kernel_map, addr, PAGE_SIZE * pg_count);
-                                  }
-                                  internal_count = 0;
-                                  fake_deadlock++;
-                          }
-                  }
-#endif
-                  object = m->object;
-
-                  vm_object_lock(object);
-
-                  if (!object->pager_initialized) {
-
-                          /*
-                           *   If there is no memory object for the page, create
-                           *   one and hand it to the default pager.
-                           */
-
-                          if (!object->pager_initialized)
-                                  vm_object_collapse(object,
-                                                     (vm_object_offset_t) 0,
-                                                     TRUE);
-                          if (!object->pager_initialized)
-                                  vm_object_pager_create(object);
-                          if (!object->pager_initialized) {
-                                  /*
-                                   *   Still no pager for the object.
-                                   *   Reactivate the page.
-                                   *
-                                   *   Should only happen if there is no
-                                   *   default pager.
-                                   */
-                                  m->list_req_pending = FALSE;
-                                  m->cleaning = FALSE;
-                                  m->pageout = FALSE;
-
-                                  vm_page_lockspin_queues();
-                                  vm_page_unwire(m);
-                                  vm_pageout_throttle_up(m);
-                                  vm_pageout_dirty_no_pager++;
-                                  vm_page_activate(m);
-                                  vm_page_unlock_queues();
-
-                                  /*
-                                   *   And we are done with it.
-                                   */
-                                  PAGE_WAKEUP_DONE(m);
-
-                                  vm_object_paging_end(object);
-                                  vm_object_unlock(object);
-
-                                  vm_page_lockspin_queues();
-                                  continue;
-                          }
-                  }
-                  pager = object->pager;
-                  if (pager == MEMORY_OBJECT_NULL) {
-                          /*
-                           * This pager has been destroyed by either
-                           * memory_object_destroy or vm_object_destroy, and
-                           * so there is nowhere for the page to go.
-                           * Just free the page... VM_PAGE_FREE takes
-                           * care of cleaning up all the state...
-                           * including doing the vm_pageout_throttle_up
-                           */
-
-                          VM_PAGE_FREE(m);
-
-                          vm_object_paging_end(object);
-                          vm_object_unlock(object);
-
-                          vm_page_lockspin_queues();
-                          continue;
-                  }
-                  vm_object_unlock(object);
-                  /*
-                   * we expect the paging_in_progress reference to have
-                   * already been taken on the object before it was added
-                   * to the appropriate pageout I/O queue... this will
-                   * keep the object from being terminated and/or the 
-                   * paging_offset from changing until the I/O has 
-                   * completed... therefore no need to lock the object to
-                   * pull the paging_offset from it.
-                   *
-                   * Send the data to the pager.
-                   * any pageout clustering happens there
-                   */
-                  memory_object_data_return(pager,
-                                            m->offset + object->paging_offset,
-                                            PAGE_SIZE,
-                                            NULL,
-                                            NULL,
-                                            FALSE,
-                                            FALSE,
-                                            0);
-
-                  vm_object_lock(object);
-                  vm_object_paging_end(object);
-                  vm_object_unlock(object);
-
-                  vm_page_lockspin_queues();
-       }
-       assert_wait((event_t) q, THREAD_UNINT);
-
-
-       if (q->pgo_throttled == TRUE && !VM_PAGE_Q_THROTTLED(q)) {
-               q->pgo_throttled = FALSE;
-               need_wakeup = TRUE;
-       } else
-               need_wakeup = FALSE;
+       *pressure_level = old_level;
+       return KERN_SUCCESS;
+#endif /* VM_PRESSURE_EVENTS */
+}
 
-       q->pgo_busy = FALSE;
-       q->pgo_idle = TRUE;
-       vm_page_unlock_queues();
+#if VM_PRESSURE_EVENTS
+void
+vm_pressure_thread(void)
+{
+       static boolean_t thread_initialized = FALSE;
 
-       if (need_wakeup == TRUE)
-               thread_wakeup((event_t) &q->pgo_laundry);
+       if (thread_initialized == TRUE) {
+               vm_pageout_state.vm_pressure_thread_running = TRUE;
+               consider_vm_pressure_events();
+               vm_pageout_state.vm_pressure_thread_running = FALSE;
+       }
 
-       thread_block_parameter((thread_continue_t)vm_pageout_iothread_continue, (void *) &q->pgo_pending);
-       /*NOTREACHED*/
+       thread_set_thread_name(current_thread(), "VM_pressure");
+       thread_initialized = TRUE;
+       assert_wait((event_t) &vm_pressure_thread, THREAD_UNINT);
+       thread_block((thread_continue_t)vm_pressure_thread);
 }
+#endif /* VM_PRESSURE_EVENTS */
 
 
-static void
-vm_pageout_iothread_external(void)
+/*
+ * called once per-second via "compute_averages"
+ */
+void
+compute_pageout_gc_throttle(__unused void *arg)
 {
-       thread_t        self = current_thread();
+       if (vm_pageout_vminfo.vm_pageout_considered_page != vm_pageout_state.vm_pageout_considered_page_last) {
+               vm_pageout_state.vm_pageout_considered_page_last = vm_pageout_vminfo.vm_pageout_considered_page;
 
-       self->options |= TH_OPT_VMPRIV;
+               thread_wakeup((event_t) &vm_pageout_garbage_collect);
+       }
+}
+
+/*
+ * vm_pageout_garbage_collect can also be called when the zone allocator needs
+ * to call zone_gc on a different thread in order to trigger zone-map-exhaustion
+ * jetsams. We need to check if the zone map size is above its jetsam limit to
+ * decide if this was indeed the case.
+ *
+ * We need to do this on a different thread because of the following reasons:
+ *
+ * 1. In the case of synchronous jetsams, the leaking process can try to jetsam
+ * itself causing the system to hang. We perform synchronous jetsams if we're
+ * leaking in the VM map entries zone, so the leaking process could be doing a
+ * zalloc for a VM map entry while holding its vm_map lock, when it decides to
+ * jetsam itself. We also need the vm_map lock on the process termination path,
+ * which would now lead the dying process to deadlock against itself.
+ *
+ * 2. The jetsam path might need to allocate zone memory itself. We could try
+ * using the non-blocking variant of zalloc for this path, but we can still
+ * end up trying to do a kernel_memory_allocate when the zone maps are almost
+ * full.
+ */
+
+void
+vm_pageout_garbage_collect(int collect)
+{
+       if (collect) {
+               if (is_zone_map_nearing_exhaustion()) {
+                       /*
+                        * Woken up by the zone allocator for zone-map-exhaustion jetsams.
+                        *
+                        * Bail out after calling zone_gc (which triggers the
+                        * zone-map-exhaustion jetsams). If we fall through, the subsequent
+                        * operations that clear out a bunch of caches might allocate zone
+                        * memory themselves (for eg. vm_map operations would need VM map
+                        * entries). Since the zone map is almost full at this point, we
+                        * could end up with a panic. We just need to quickly jetsam a
+                        * process and exit here.
+                        *
+                        * It could so happen that we were woken up to relieve memory
+                        * pressure and the zone map also happened to be near its limit at
+                        * the time, in which case we'll skip out early. But that should be
+                        * ok; if memory pressure persists, the thread will simply be woken
+                        * up again.
+                        */
+                       consider_zone_gc(TRUE);
+               } else {
+                       /* Woken up by vm_pageout_scan or compute_pageout_gc_throttle. */
+                       boolean_t buf_large_zfree = FALSE;
+                       boolean_t first_try = TRUE;
+
+                       stack_collect();
+
+                       consider_machine_collect();
+                       mbuf_drain(FALSE);
+
+                       do {
+                               if (consider_buffer_cache_collect != NULL) {
+                                       buf_large_zfree = (*consider_buffer_cache_collect)(0);
+                               }
+                               if (first_try == TRUE || buf_large_zfree == TRUE) {
+                                       /*
+                                        * consider_zone_gc should be last, because the other operations
+                                        * might return memory to zones.
+                                        */
+                                       consider_zone_gc(FALSE);
+                               }
+                               first_try = FALSE;
+                       } while (buf_large_zfree == TRUE && vm_page_free_count < vm_page_free_target);
+
+                       consider_machine_adjust();
+               }
+       }
+
+       assert_wait((event_t) &vm_pageout_garbage_collect, THREAD_UNINT);
 
-       vm_pageout_iothread_continue(&vm_pageout_queue_external);
+       thread_block_parameter((thread_continue_t) vm_pageout_garbage_collect, (void *)1);
        /*NOTREACHED*/
 }
 
 
-static void
-vm_pageout_iothread_internal(void)
-{
-       thread_t        self = current_thread();
+#if VM_PAGE_BUCKETS_CHECK
+#if VM_PAGE_FAKE_BUCKETS
+extern vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
 
-       self->options |= TH_OPT_VMPRIV;
 
-       vm_pageout_iothread_continue(&vm_pageout_queue_internal);
-       /*NOTREACHED*/
-}
 
-static void
-vm_pageout_garbage_collect(int collect)
+void
+vm_set_restrictions(unsigned int num_cpus)
 {
-       if (collect) {
-               stack_collect();
+       int vm_restricted_to_single_processor = 0;
 
-               /*
-                * consider_zone_gc should be last, because the other operations
-                * might return memory to zones.
-                */
-               consider_machine_collect();
-               consider_zone_gc();
+       if (PE_parse_boot_argn("vm_restricted_to_single_processor", &vm_restricted_to_single_processor, sizeof(vm_restricted_to_single_processor))) {
+               kprintf("Overriding vm_restricted_to_single_processor to %d\n", vm_restricted_to_single_processor);
+               vm_pageout_state.vm_restricted_to_single_processor = (vm_restricted_to_single_processor ? TRUE : FALSE);
+       } else {
+               assert(num_cpus > 0);
 
-               consider_machine_adjust();
+               if (num_cpus <= 3) {
+                       /*
+                        * on systems with a limited number of CPUS, bind the
+                        * 4 major threads that can free memory and that tend to use
+                        * a fair bit of CPU under pressured conditions to a single processor.
+                        * This insures that these threads don't hog all of the available CPUs
+                        * (important for camera launch), while allowing them to run independently
+                        * w/r to locks... the 4 threads are
+                        * vm_pageout_scan,  vm_pageout_iothread_internal (compressor),
+                        * vm_compressor_swap_trigger_thread (minor and major compactions),
+                        * memorystatus_thread (jetsams).
+                        *
+                        * the first time the thread is run, it is responsible for checking the
+                        * state of vm_restricted_to_single_processor, and if TRUE it calls
+                        * thread_bind_master...  someday this should be replaced with a group
+                        * scheduling mechanism and KPI.
+                        */
+                       vm_pageout_state.vm_restricted_to_single_processor = TRUE;
+               } else {
+                       vm_pageout_state.vm_restricted_to_single_processor = FALSE;
+               }
        }
-
-       assert_wait((event_t) &vm_pageout_garbage_collect, THREAD_UNINT);
-
-       thread_block_parameter((thread_continue_t) vm_pageout_garbage_collect, (void *)1);
-       /*NOTREACHED*/
 }
 
-
-
 void
 vm_pageout(void)
 {
-       thread_t        self = current_thread();
-       thread_t        thread;
-       kern_return_t   result;
-       spl_t           s;
+       thread_t        self = current_thread();
+       thread_t        thread;
+       kern_return_t   result;
+       spl_t           s;
 
        /*
         * Set thread privileges.
         */
        s = splsched();
+
+       vm_pageout_scan_thread = self;
+
+#if CONFIG_VPS_DYNAMIC_PRIO
+
+       int             vps_dynprio_bootarg = 0;
+
+       if (PE_parse_boot_argn("vps_dynamic_priority_enabled", &vps_dynprio_bootarg, sizeof(vps_dynprio_bootarg))) {
+               vps_dynamic_priority_enabled = (vps_dynprio_bootarg ? TRUE : FALSE);
+               kprintf("Overriding vps_dynamic_priority_enabled to %d\n", vps_dynamic_priority_enabled);
+       } else {
+               if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) {
+                       vps_dynamic_priority_enabled = TRUE;
+               } else {
+                       vps_dynamic_priority_enabled = FALSE;
+               }
+       }
+
+       if (vps_dynamic_priority_enabled) {
+               sched_set_kernel_thread_priority(self, MAXPRI_THROTTLE);
+               thread_set_eager_preempt(self);
+       } else {
+               sched_set_kernel_thread_priority(self, BASEPRI_VM);
+       }
+
+#else /* CONFIG_VPS_DYNAMIC_PRIO */
+
+       vps_dynamic_priority_enabled = FALSE;
+       sched_set_kernel_thread_priority(self, BASEPRI_VM);
+
+#endif /* CONFIG_VPS_DYNAMIC_PRIO */
+
        thread_lock(self);
-       self->priority = BASEPRI_PREEMPT - 1;
-       set_sched_pri(self, self->priority);
+       self->options |= TH_OPT_VMPRIV;
        thread_unlock(self);
 
-       if (!self->reserved_stack)
+       if (!self->reserved_stack) {
                self->reserved_stack = self->kernel_stack;
+       }
+
+       if (vm_pageout_state.vm_restricted_to_single_processor == TRUE &&
+           vps_dynamic_priority_enabled == FALSE) {
+               thread_vm_bind_group_add();
+       }
+
+
+#if CONFIG_THREAD_GROUPS
+       thread_group_vm_add();
+#endif /* CONFIG_THREAD_GROUPS */
+
+#if __AMP__
+       PE_parse_boot_argn("vmpgo_pcluster", &vm_pgo_pbound, sizeof(vm_pgo_pbound));
+       if (vm_pgo_pbound) {
+               /*
+                * Use the soft bound option for vm pageout to allow it to run on
+                * E-cores if P-cluster is unavailable.
+                */
+               thread_bind_cluster_type(self, 'P', true);
+       }
+#endif /* __AMP__ */
 
        splx(s);
 
+       thread_set_thread_name(current_thread(), "VM_pageout_scan");
+
        /*
         *      Initialize some paging parameters.
         */
 
-       if (vm_pageout_idle_wait == 0)
-               vm_pageout_idle_wait = VM_PAGEOUT_IDLE_WAIT;
-
-       if (vm_pageout_burst_wait == 0)
-               vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT;
-
-       if (vm_pageout_empty_wait == 0)
-               vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT;
+       vm_pageout_state.vm_pressure_thread_running = FALSE;
+       vm_pageout_state.vm_pressure_changed = FALSE;
+       vm_pageout_state.memorystatus_purge_on_warning = 2;
+       vm_pageout_state.memorystatus_purge_on_urgent = 5;
+       vm_pageout_state.memorystatus_purge_on_critical = 8;
+       vm_pageout_state.vm_page_speculative_q_age_ms = VM_PAGE_SPECULATIVE_Q_AGE_MS;
+       vm_pageout_state.vm_page_speculative_percentage = 5;
+       vm_pageout_state.vm_page_speculative_target = 0;
+
+       vm_pageout_state.vm_pageout_external_iothread = THREAD_NULL;
+       vm_pageout_state.vm_pageout_internal_iothread = THREAD_NULL;
+
+       vm_pageout_state.vm_pageout_swap_wait = 0;
+       vm_pageout_state.vm_pageout_idle_wait = 0;
+       vm_pageout_state.vm_pageout_empty_wait = 0;
+       vm_pageout_state.vm_pageout_burst_wait = 0;
+       vm_pageout_state.vm_pageout_deadlock_wait = 0;
+       vm_pageout_state.vm_pageout_deadlock_relief = 0;
+       vm_pageout_state.vm_pageout_burst_inactive_throttle = 0;
+
+       vm_pageout_state.vm_pageout_inactive = 0;
+       vm_pageout_state.vm_pageout_inactive_used = 0;
+       vm_pageout_state.vm_pageout_inactive_clean = 0;
+
+       vm_pageout_state.vm_memory_pressure = 0;
+       vm_pageout_state.vm_page_filecache_min = 0;
+#if CONFIG_JETSAM
+       vm_pageout_state.vm_page_filecache_min_divisor = 70;
+       vm_pageout_state.vm_page_xpmapped_min_divisor = 40;
+#else
+       vm_pageout_state.vm_page_filecache_min_divisor = 27;
+       vm_pageout_state.vm_page_xpmapped_min_divisor = 36;
+#endif
+       vm_pageout_state.vm_page_free_count_init = vm_page_free_count;
 
-       if (vm_pageout_deadlock_wait == 0)
-               vm_pageout_deadlock_wait = VM_PAGEOUT_DEADLOCK_WAIT;
+       vm_pageout_state.vm_pageout_considered_page_last = 0;
 
-       if (vm_pageout_deadlock_relief == 0)
-               vm_pageout_deadlock_relief = VM_PAGEOUT_DEADLOCK_RELIEF;
+       if (vm_pageout_state.vm_pageout_swap_wait == 0) {
+               vm_pageout_state.vm_pageout_swap_wait = VM_PAGEOUT_SWAP_WAIT;
+       }
 
-       if (vm_pageout_inactive_relief == 0)
-               vm_pageout_inactive_relief = VM_PAGEOUT_INACTIVE_RELIEF;
+       if (vm_pageout_state.vm_pageout_idle_wait == 0) {
+               vm_pageout_state.vm_pageout_idle_wait = VM_PAGEOUT_IDLE_WAIT;
+       }
 
-       if (vm_pageout_burst_active_throttle == 0)
-               vm_pageout_burst_active_throttle = VM_PAGEOUT_BURST_ACTIVE_THROTTLE;
+       if (vm_pageout_state.vm_pageout_burst_wait == 0) {
+               vm_pageout_state.vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT;
+       }
 
-       if (vm_pageout_burst_inactive_throttle == 0)
-               vm_pageout_burst_inactive_throttle = VM_PAGEOUT_BURST_INACTIVE_THROTTLE;
+       if (vm_pageout_state.vm_pageout_empty_wait == 0) {
+               vm_pageout_state.vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT;
+       }
 
-       /*
-        * Set kernel task to low backing store privileged 
-        * status
-        */
-       task_lock(kernel_task);
-       kernel_task->priv_flags |= VM_BACKING_STORE_PRIV;
-       task_unlock(kernel_task);
+       if (vm_pageout_state.vm_pageout_deadlock_wait == 0) {
+               vm_pageout_state.vm_pageout_deadlock_wait = VM_PAGEOUT_DEADLOCK_WAIT;
+       }
 
-       vm_page_free_count_init = vm_page_free_count;
+       if (vm_pageout_state.vm_pageout_deadlock_relief == 0) {
+               vm_pageout_state.vm_pageout_deadlock_relief = VM_PAGEOUT_DEADLOCK_RELIEF;
+       }
 
+       if (vm_pageout_state.vm_pageout_burst_inactive_throttle == 0) {
+               vm_pageout_state.vm_pageout_burst_inactive_throttle = VM_PAGEOUT_BURST_INACTIVE_THROTTLE;
+       }
        /*
         * even if we've already called vm_page_free_reserve
         * call it again here to insure that the targets are
@@ -2350,47 +4833,140 @@ vm_pageout(void)
         */
        if (vm_page_free_reserved < VM_PAGE_FREE_RESERVED(processor_count)) {
                vm_page_free_reserve((VM_PAGE_FREE_RESERVED(processor_count)) - vm_page_free_reserved);
-       } else
+       } else {
                vm_page_free_reserve(0);
+       }
 
 
-       queue_init(&vm_pageout_queue_external.pgo_pending);
+       vm_page_queue_init(&vm_pageout_queue_external.pgo_pending);
        vm_pageout_queue_external.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
        vm_pageout_queue_external.pgo_laundry = 0;
        vm_pageout_queue_external.pgo_idle = FALSE;
        vm_pageout_queue_external.pgo_busy = FALSE;
        vm_pageout_queue_external.pgo_throttled = FALSE;
+       vm_pageout_queue_external.pgo_draining = FALSE;
+       vm_pageout_queue_external.pgo_lowpriority = FALSE;
+       vm_pageout_queue_external.pgo_tid = -1;
+       vm_pageout_queue_external.pgo_inited = FALSE;
 
-       queue_init(&vm_pageout_queue_internal.pgo_pending);
+       vm_page_queue_init(&vm_pageout_queue_internal.pgo_pending);
        vm_pageout_queue_internal.pgo_maxlaundry = 0;
        vm_pageout_queue_internal.pgo_laundry = 0;
        vm_pageout_queue_internal.pgo_idle = FALSE;
        vm_pageout_queue_internal.pgo_busy = FALSE;
        vm_pageout_queue_internal.pgo_throttled = FALSE;
-
+       vm_pageout_queue_internal.pgo_draining = FALSE;
+       vm_pageout_queue_internal.pgo_lowpriority = FALSE;
+       vm_pageout_queue_internal.pgo_tid = -1;
+       vm_pageout_queue_internal.pgo_inited = FALSE;
 
        /* internal pageout thread started when default pager registered first time */
        /* external pageout and garbage collection threads started here */
 
-       result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_external, NULL, 
-                                             BASEPRI_PREEMPT - 1, 
-                                             &vm_pageout_external_iothread);
-       if (result != KERN_SUCCESS)
+       result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_external, NULL,
+           BASEPRI_VM,
+           &vm_pageout_state.vm_pageout_external_iothread);
+       if (result != KERN_SUCCESS) {
                panic("vm_pageout_iothread_external: create failed");
-
-       thread_deallocate(vm_pageout_external_iothread);
+       }
+       thread_set_thread_name(vm_pageout_state.vm_pageout_external_iothread, "VM_pageout_external_iothread");
+       thread_deallocate(vm_pageout_state.vm_pageout_external_iothread);
 
        result = kernel_thread_start_priority((thread_continue_t)vm_pageout_garbage_collect, NULL,
-                                             MINPRI_KERNEL, 
-                                             &thread);
-       if (result != KERN_SUCCESS)
+           BASEPRI_DEFAULT,
+           &thread);
+       if (result != KERN_SUCCESS) {
                panic("vm_pageout_garbage_collect: create failed");
+       }
+       thread_set_thread_name(thread, "VM_pageout_garbage_collect");
+       thread_deallocate(thread);
+
+#if VM_PRESSURE_EVENTS
+       result = kernel_thread_start_priority((thread_continue_t)vm_pressure_thread, NULL,
+           BASEPRI_DEFAULT,
+           &thread);
+
+       if (result != KERN_SUCCESS) {
+               panic("vm_pressure_thread: create failed");
+       }
 
        thread_deallocate(thread);
+#endif
 
        vm_object_reaper_init();
 
 
+       bzero(&vm_config, sizeof(vm_config));
+
+       switch (vm_compressor_mode) {
+       case VM_PAGER_DEFAULT:
+               printf("mapping deprecated VM_PAGER_DEFAULT to VM_PAGER_COMPRESSOR_WITH_SWAP\n");
+               OS_FALLTHROUGH;
+
+       case VM_PAGER_COMPRESSOR_WITH_SWAP:
+               vm_config.compressor_is_present = TRUE;
+               vm_config.swap_is_present = TRUE;
+               vm_config.compressor_is_active = TRUE;
+               vm_config.swap_is_active = TRUE;
+               break;
+
+       case VM_PAGER_COMPRESSOR_NO_SWAP:
+               vm_config.compressor_is_present = TRUE;
+               vm_config.swap_is_present = TRUE;
+               vm_config.compressor_is_active = TRUE;
+               break;
+
+       case VM_PAGER_FREEZER_DEFAULT:
+               printf("mapping deprecated VM_PAGER_FREEZER_DEFAULT to VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP\n");
+               OS_FALLTHROUGH;
+
+       case VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP:
+               vm_config.compressor_is_present = TRUE;
+               vm_config.swap_is_present = TRUE;
+               break;
+
+       case VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP:
+               vm_config.compressor_is_present = TRUE;
+               vm_config.swap_is_present = TRUE;
+               vm_config.compressor_is_active = TRUE;
+               vm_config.freezer_swap_is_active = TRUE;
+               break;
+
+       case VM_PAGER_NOT_CONFIGURED:
+               break;
+
+       default:
+               printf("unknown compressor mode - %x\n", vm_compressor_mode);
+               break;
+       }
+       if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
+               vm_compressor_pager_init();
+       }
+
+#if VM_PRESSURE_EVENTS
+       vm_pressure_events_enabled = TRUE;
+#endif /* VM_PRESSURE_EVENTS */
+
+#if CONFIG_PHANTOM_CACHE
+       vm_phantom_cache_init();
+#endif
+#if VM_PAGE_BUCKETS_CHECK
+#if VM_PAGE_FAKE_BUCKETS
+       printf("**** DEBUG: protecting fake buckets [0x%llx:0x%llx]\n",
+           (uint64_t) vm_page_fake_buckets_start,
+           (uint64_t) vm_page_fake_buckets_end);
+       pmap_protect(kernel_pmap,
+           vm_page_fake_buckets_start,
+           vm_page_fake_buckets_end,
+           VM_PROT_READ);
+//     *(char *) vm_page_fake_buckets_start = 'x';     /* panic! */
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
+#if VM_OBJECT_TRACKING
+       vm_object_tracking_init();
+#endif /* VM_OBJECT_TRACKING */
+
        vm_pageout_continue();
 
        /*
@@ -2417,134 +4993,404 @@ vm_pageout(void)
        /*NOTREACHED*/
 }
 
+
+
 kern_return_t
 vm_pageout_internal_start(void)
 {
-       kern_return_t result;
+       kern_return_t   result;
+       host_basic_info_data_t hinfo;
+       vm_offset_t     buf, bufsize;
+
+       assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
+
+       mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
+#define BSD_HOST 1
+       host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
+
+       assert(hinfo.max_cpus > 0);
+
+#if CONFIG_EMBEDDED
+       vm_pageout_state.vm_compressor_thread_count = 1;
+#else
+       if (hinfo.max_cpus > 4) {
+               vm_pageout_state.vm_compressor_thread_count = 2;
+       } else {
+               vm_pageout_state.vm_compressor_thread_count = 1;
+       }
+#endif
+       PE_parse_boot_argn("vmcomp_threads", &vm_pageout_state.vm_compressor_thread_count,
+           sizeof(vm_pageout_state.vm_compressor_thread_count));
+
+#if     __AMP__
+       PE_parse_boot_argn("vmcomp_ecluster", &vm_compressor_ebound, sizeof(vm_compressor_ebound));
+       if (vm_compressor_ebound) {
+               vm_pageout_state.vm_compressor_thread_count = 2;
+       }
+#endif
+       if (vm_pageout_state.vm_compressor_thread_count >= hinfo.max_cpus) {
+               vm_pageout_state.vm_compressor_thread_count = hinfo.max_cpus - 1;
+       }
+       if (vm_pageout_state.vm_compressor_thread_count <= 0) {
+               vm_pageout_state.vm_compressor_thread_count = 1;
+       } else if (vm_pageout_state.vm_compressor_thread_count > MAX_COMPRESSOR_THREAD_COUNT) {
+               vm_pageout_state.vm_compressor_thread_count = MAX_COMPRESSOR_THREAD_COUNT;
+       }
 
-       vm_pageout_queue_internal.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
-       result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_internal, NULL, BASEPRI_PREEMPT - 1, &vm_pageout_internal_iothread);
-       if (result == KERN_SUCCESS)
-               thread_deallocate(vm_pageout_internal_iothread);
+       vm_pageout_queue_internal.pgo_maxlaundry =
+           (vm_pageout_state.vm_compressor_thread_count * 4) * VM_PAGE_LAUNDRY_MAX;
+
+       PE_parse_boot_argn("vmpgoi_maxlaundry",
+           &vm_pageout_queue_internal.pgo_maxlaundry,
+           sizeof(vm_pageout_queue_internal.pgo_maxlaundry));
+
+       bufsize = COMPRESSOR_SCRATCH_BUF_SIZE;
+       if (kernel_memory_allocate(kernel_map, &buf,
+           bufsize * vm_pageout_state.vm_compressor_thread_count,
+           0, KMA_KOBJECT | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR)) {
+               panic("vm_pageout_internal_start: Unable to allocate %zd bytes",
+                   (size_t)(bufsize * vm_pageout_state.vm_compressor_thread_count));
+       }
+
+       for (int i = 0; i < vm_pageout_state.vm_compressor_thread_count; i++) {
+               ciq[i].id = i;
+               ciq[i].q = &vm_pageout_queue_internal;
+               ciq[i].current_chead = NULL;
+               ciq[i].scratch_buf = (char *)(buf + i * bufsize);
+
+               result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_internal,
+                   (void *)&ciq[i], BASEPRI_VM,
+                   &vm_pageout_state.vm_pageout_internal_iothread);
+
+               if (result == KERN_SUCCESS) {
+                       thread_deallocate(vm_pageout_state.vm_pageout_internal_iothread);
+               } else {
+                       break;
+               }
+       }
        return result;
 }
 
-#define UPL_DELAYED_UNLOCK_LIMIT  (MAX_UPL_TRANSFER / 2)
+#if CONFIG_IOSCHED
+/*
+ * To support I/O Expedite for compressed files we mark the upls with special flags.
+ * The way decmpfs works is that we create a big upl which marks all the pages needed to
+ * represent the compressed file as busy. We tag this upl with the flag UPL_DECMP_REQ. Decmpfs
+ * then issues smaller I/Os for compressed I/Os, deflates them and puts the data into the pages
+ * being held in the big original UPL. We mark each of these smaller UPLs with the flag
+ * UPL_DECMP_REAL_IO. Any outstanding real I/O UPL is tracked by the big req upl using the
+ * decmp_io_upl field (in the upl structure). This link is protected in the forward direction
+ * by the req upl lock (the reverse link doesnt need synch. since we never inspect this link
+ * unless the real I/O upl is being destroyed).
+ */
+
+
+static void
+upl_set_decmp_info(upl_t upl, upl_t src_upl)
+{
+       assert((src_upl->flags & UPL_DECMP_REQ) != 0);
+
+       upl_lock(src_upl);
+       if (src_upl->decmp_io_upl) {
+               /*
+                * If there is already an alive real I/O UPL, ignore this new UPL.
+                * This case should rarely happen and even if it does, it just means
+                * that we might issue a spurious expedite which the driver is expected
+                * to handle.
+                */
+               upl_unlock(src_upl);
+               return;
+       }
+       src_upl->decmp_io_upl = (void *)upl;
+       src_upl->ref_count++;
+
+       upl->flags |= UPL_DECMP_REAL_IO;
+       upl->decmp_io_upl = (void *)src_upl;
+       upl_unlock(src_upl);
+}
+#endif /* CONFIG_IOSCHED */
+
+#if UPL_DEBUG
+int     upl_debug_enabled = 1;
+#else
+int     upl_debug_enabled = 0;
+#endif
 
 static upl_t
 upl_create(int type, int flags, upl_size_t size)
 {
-       upl_t   upl;
-       int     page_field_size = 0;
-       int     upl_flags = 0;
-       int     upl_size  = sizeof(struct upl);
+       upl_t   upl;
+       vm_size_t       page_field_size = 0;
+       int     upl_flags = 0;
+       vm_size_t       upl_size  = sizeof(struct upl);
+
+       assert(page_aligned(size));
+
+       size = round_page_32(size);
 
        if (type & UPL_CREATE_LITE) {
-               page_field_size = ((size/PAGE_SIZE) + 7) >> 3;
+               page_field_size = (atop(size) + 7) >> 3;
                page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
 
                upl_flags |= UPL_LITE;
        }
        if (type & UPL_CREATE_INTERNAL) {
-               upl_size += sizeof(struct upl_page_info) * (size/PAGE_SIZE);
+               upl_size += sizeof(struct upl_page_info) * atop(size);
 
                upl_flags |= UPL_INTERNAL;
        }
        upl = (upl_t)kalloc(upl_size + page_field_size);
 
-       if (page_field_size)
-               bzero((char *)upl + upl_size, page_field_size);
+       if (page_field_size) {
+               bzero((char *)upl + upl_size, page_field_size);
+       }
 
        upl->flags = upl_flags | flags;
-       upl->src_object = NULL;
        upl->kaddr = (vm_offset_t)0;
-       upl->size = 0;
+       upl->u_offset = 0;
+       upl->u_size = 0;
        upl->map_object = NULL;
        upl->ref_count = 1;
+       upl->ext_ref_count = 0;
        upl->highest_page = 0;
        upl_lock_init(upl);
-#ifdef UPL_DEBUG
+       upl->vector_upl = NULL;
+       upl->associated_upl = NULL;
+       upl->upl_iodone = NULL;
+#if CONFIG_IOSCHED
+       if (type & UPL_CREATE_IO_TRACKING) {
+               upl->upl_priority = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO);
+       }
+
+       upl->upl_reprio_info = 0;
+       upl->decmp_io_upl = 0;
+       if ((type & UPL_CREATE_INTERNAL) && (type & UPL_CREATE_EXPEDITE_SUP)) {
+               /* Only support expedite on internal UPLs */
+               thread_t        curthread = current_thread();
+               upl->upl_reprio_info = (uint64_t *)kalloc(sizeof(uint64_t) * atop(size));
+               bzero(upl->upl_reprio_info, (sizeof(uint64_t) * atop(size)));
+               upl->flags |= UPL_EXPEDITE_SUPPORTED;
+               if (curthread->decmp_upl != NULL) {
+                       upl_set_decmp_info(upl, curthread->decmp_upl);
+               }
+       }
+#endif
+#if CONFIG_IOSCHED || UPL_DEBUG
+       if ((type & UPL_CREATE_IO_TRACKING) || upl_debug_enabled) {
+               upl->upl_creator = current_thread();
+               upl->uplq.next = 0;
+               upl->uplq.prev = 0;
+               upl->flags |= UPL_TRACKED_BY_OBJECT;
+       }
+#endif
+
+#if UPL_DEBUG
        upl->ubc_alias1 = 0;
        upl->ubc_alias2 = 0;
+
+       upl->upl_state = 0;
+       upl->upl_commit_index = 0;
+       bzero(&upl->upl_commit_records[0], sizeof(upl->upl_commit_records));
+
+       (void) OSBacktrace(&upl->upl_create_retaddr[0], UPL_DEBUG_STACK_FRAMES);
 #endif /* UPL_DEBUG */
-       return(upl);
+
+       return upl;
 }
 
 static void
 upl_destroy(upl_t upl)
 {
-       int     page_field_size;  /* bit field in word size buf */
-        int    size;
+       int     page_field_size;  /* bit field in word size buf */
+       int     size;
 
-#ifdef UPL_DEBUG
-       {
-               vm_object_t     object;
+//     DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object);
+
+       if (upl->ext_ref_count) {
+               panic("upl(%p) ext_ref_count", upl);
+       }
+
+#if CONFIG_IOSCHED
+       if ((upl->flags & UPL_DECMP_REAL_IO) && upl->decmp_io_upl) {
+               upl_t src_upl;
+               src_upl = upl->decmp_io_upl;
+               assert((src_upl->flags & UPL_DECMP_REQ) != 0);
+               upl_lock(src_upl);
+               src_upl->decmp_io_upl = NULL;
+               upl_unlock(src_upl);
+               upl_deallocate(src_upl);
+       }
+#endif /* CONFIG_IOSCHED */
+
+#if CONFIG_IOSCHED || UPL_DEBUG
+       if (((upl->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) &&
+           !(upl->flags & UPL_VECTOR)) {
+               vm_object_t     object;
 
                if (upl->flags & UPL_SHADOWED) {
                        object = upl->map_object->shadow;
                } else {
                        object = upl->map_object;
                }
+
                vm_object_lock(object);
                queue_remove(&object->uplq, upl, upl_t, uplq);
+               vm_object_activity_end(object);
+               vm_object_collapse(object, 0, TRUE);
                vm_object_unlock(object);
        }
-#endif /* UPL_DEBUG */
+#endif
        /*
         * drop a reference on the map_object whether or
         * not a pageout object is inserted
         */
-       if (upl->flags & UPL_SHADOWED)
+       if (upl->flags & UPL_SHADOWED) {
                vm_object_deallocate(upl->map_object);
+       }
 
-        if (upl->flags & UPL_DEVICE_MEMORY)
-               size = PAGE_SIZE;
-       else
-               size = upl->size;
+       if (upl->flags & UPL_DEVICE_MEMORY) {
+               size = PAGE_SIZE;
+       } else {
+               size = upl_adjusted_size(upl, PAGE_MASK);
+       }
        page_field_size = 0;
 
        if (upl->flags & UPL_LITE) {
-               page_field_size = ((size/PAGE_SIZE) + 7) >> 3;
+               page_field_size = ((size / PAGE_SIZE) + 7) >> 3;
                page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
        }
+       upl_lock_destroy(upl);
+       upl->vector_upl = (vector_upl_t) 0xfeedbeef;
+
+#if CONFIG_IOSCHED
+       if (upl->flags & UPL_EXPEDITE_SUPPORTED) {
+               kfree(upl->upl_reprio_info, sizeof(uint64_t) * (size / PAGE_SIZE));
+       }
+#endif
+
        if (upl->flags & UPL_INTERNAL) {
                kfree(upl,
-                     sizeof(struct upl) + 
-                     (sizeof(struct upl_page_info) * (size/PAGE_SIZE))
-                     + page_field_size);
+                   sizeof(struct upl) +
+                   (sizeof(struct upl_page_info) * (size / PAGE_SIZE))
+                   + page_field_size);
        } else {
                kfree(upl, sizeof(struct upl) + page_field_size);
        }
 }
 
-void uc_upl_dealloc(upl_t upl);
-__private_extern__ void
-uc_upl_dealloc(upl_t upl)
+void
+upl_deallocate(upl_t upl)
 {
-       if (--upl->ref_count == 0)
+       upl_lock(upl);
+
+       if (--upl->ref_count == 0) {
+               if (vector_upl_is_valid(upl)) {
+                       vector_upl_deallocate(upl);
+               }
+               upl_unlock(upl);
+
+               if (upl->upl_iodone) {
+                       upl_callout_iodone(upl);
+               }
+
                upl_destroy(upl);
+       } else {
+               upl_unlock(upl);
+       }
 }
 
+#if CONFIG_IOSCHED
 void
-upl_deallocate(upl_t upl)
+upl_mark_decmp(upl_t upl)
 {
-       if (--upl->ref_count == 0)
-               upl_destroy(upl);
+       if (upl->flags & UPL_TRACKED_BY_OBJECT) {
+               upl->flags |= UPL_DECMP_REQ;
+               upl->upl_creator->decmp_upl = (void *)upl;
+       }
+}
+
+void
+upl_unmark_decmp(upl_t upl)
+{
+       if (upl && (upl->flags & UPL_DECMP_REQ)) {
+               upl->upl_creator->decmp_upl = NULL;
+       }
+}
+
+#endif /* CONFIG_IOSCHED */
+
+#define VM_PAGE_Q_BACKING_UP(q)         \
+       ((q)->pgo_laundry >= (((q)->pgo_maxlaundry * 8) / 10))
+
+boolean_t must_throttle_writes(void);
+
+boolean_t
+must_throttle_writes()
+{
+       if (VM_PAGE_Q_BACKING_UP(&vm_pageout_queue_external) &&
+           vm_page_pageable_external_count > (AVAILABLE_NON_COMPRESSED_MEMORY * 6) / 10) {
+               return TRUE;
+       }
+
+       return FALSE;
+}
+
+#define MIN_DELAYED_WORK_CTX_ALLOCATED  (16)
+#define MAX_DELAYED_WORK_CTX_ALLOCATED  (512)
+
+int vm_page_delayed_work_ctx_needed = 0;
+zone_t  dw_ctx_zone = ZONE_NULL;
+
+void
+vm_page_delayed_work_init_ctx(void)
+{
+       int nelems = 0, elem_size = 0;
+
+       elem_size = sizeof(struct vm_page_delayed_work_ctx);
+
+       dw_ctx_zone = zone_create_ext("delayed-work-ctx", elem_size,
+           ZC_NOGC, ZONE_ID_ANY, ^(zone_t z) {
+               zone_set_exhaustible(z, MAX_DELAYED_WORK_CTX_ALLOCATED * elem_size);
+       });
+
+       nelems = zfill(dw_ctx_zone, MIN_DELAYED_WORK_CTX_ALLOCATED);
+       if (nelems < MIN_DELAYED_WORK_CTX_ALLOCATED) {
+               printf("vm_page_delayed_work_init_ctx: Failed to preallocate minimum delayed work contexts (%d vs %d).\n", nelems, MIN_DELAYED_WORK_CTX_ALLOCATED);
+#if DEVELOPMENT || DEBUG
+               panic("Failed to preallocate minimum delayed work contexts (%d vs %d).\n", nelems, MIN_DELAYED_WORK_CTX_ALLOCATED);
+#endif /* DEVELOPMENT || DEBUG */
+       }
+}
+
+struct vm_page_delayed_work*
+vm_page_delayed_work_get_ctx(void)
+{
+       struct vm_page_delayed_work_ctx * dw_ctx = NULL;
+
+       dw_ctx = (struct vm_page_delayed_work_ctx*) zalloc_noblock(dw_ctx_zone);
+
+       if (dw_ctx) {
+               dw_ctx->delayed_owner = current_thread();
+       } else {
+               vm_page_delayed_work_ctx_needed++;
+       }
+       return dw_ctx ? dw_ctx->dwp : NULL;
+}
+
+void
+vm_page_delayed_work_finish_ctx(struct vm_page_delayed_work* dwp)
+{
+       struct  vm_page_delayed_work_ctx *ldw_ctx;
+
+       ldw_ctx = (struct vm_page_delayed_work_ctx *)dwp;
+       ldw_ctx->delayed_owner = NULL;
+
+       zfree(dw_ctx_zone, ldw_ctx);
 }
 
 /*
- * Statistics about UPL enforcement of copy-on-write obligations.
- */
-unsigned long upl_cow = 0;
-unsigned long upl_cow_again = 0;
-unsigned long upl_cow_contiguous = 0;
-unsigned long upl_cow_pages = 0;
-unsigned long upl_cow_again_pages = 0;
-unsigned long upl_cow_contiguous_pages = 0;
-
-/*  
- *     Routine:        vm_object_upl_request 
- *     Purpose:        
+ *     Routine:        vm_object_upl_request
+ *     Purpose:
  *             Cause the population of a portion of a vm_object.
  *             Depending on the nature of the request, the pages
  *             returned may be contain valid data or be uninitialized.
@@ -2555,7 +5401,7 @@ unsigned long upl_cow_contiguous_pages = 0;
  *             IMPORTANT NOTE: The caller must still respect the relationship
  *             between the vm_object and its backing memory object.  The
  *             caller MUST NOT substitute changes in the backing file
- *             without first doing a memory_object_lock_request on the 
+ *             without first doing a memory_object_lock_request on the
  *             target range unless it is know that the pages are not
  *             shared with another entity at the pager level.
  *             Copy_in_to:
@@ -2573,7 +5419,7 @@ unsigned long upl_cow_contiguous_pages = 0;
  *                     all mapped pages.  Where a page does not exist
  *                     map a zero filled one. Leave pages busy in
  *                     the original object.  If a page list structure
- *                     is not specified, this call is a no-op. 
+ *                     is not specified, this call is a no-op.
  *
  *             Note:  access of default pager objects has a rather interesting
  *             twist.  The caller of this routine, presumably the file system
@@ -2581,37 +5427,50 @@ unsigned long upl_cow_contiguous_pages = 0;
  *             against a default pager backed object.  Only the default
  *             pager will make requests on backing store related vm_objects
  *             In this way the default pager can maintain the relationship
- *             between backing store files (abstract memory objects) and 
+ *             between backing store files (abstract memory objects) and
  *             the vm_objects (cache objects), they support.
  *
  */
 
 __private_extern__ kern_return_t
 vm_object_upl_request(
-       vm_object_t             object,
-       vm_object_offset_t      offset,
-       upl_size_t              size,
-       upl_t                   *upl_ptr,
-       upl_page_info_array_t   user_page_list,
-       unsigned int            *page_list_count,
-       int                     cntrl_flags)
+       vm_object_t             object,
+       vm_object_offset_t      offset,
+       upl_size_t              size,
+       upl_t                   *upl_ptr,
+       upl_page_info_array_t   user_page_list,
+       unsigned int            *page_list_count,
+       upl_control_flags_t     cntrl_flags,
+       vm_tag_t                tag)
 {
-       vm_page_t               dst_page = VM_PAGE_NULL;
-       vm_object_offset_t      dst_offset;
-       upl_size_t              xfer_size;
-       boolean_t               dirty;
-       boolean_t               hw_dirty;
-       upl_t                   upl = NULL;
-       unsigned int            entry;
-#if MACH_CLUSTER_STATS
-       boolean_t               encountered_lrp = FALSE;
-#endif
-       vm_page_t               alias_page = NULL;
-        int                    refmod_state = 0;
-       wpl_array_t             lite_list = NULL;
-       vm_object_t             last_copy_object;
-       int                     delayed_unlock = 0;
-       int                     j;
+       vm_page_t               dst_page = VM_PAGE_NULL;
+       vm_object_offset_t      dst_offset;
+       upl_size_t              xfer_size;
+       unsigned int            size_in_pages;
+       boolean_t               dirty;
+       boolean_t               hw_dirty;
+       upl_t                   upl = NULL;
+       unsigned int            entry;
+       vm_page_t               alias_page = NULL;
+       int                     refmod_state = 0;
+       wpl_array_t             lite_list = NULL;
+       vm_object_t             last_copy_object;
+       struct  vm_page_delayed_work    dw_array;
+       struct  vm_page_delayed_work    *dwp, *dwp_start;
+       bool                    dwp_finish_ctx = TRUE;
+       int                     dw_count;
+       int                     dw_limit;
+       int                     io_tracking_flag = 0;
+       int                     grab_options;
+       int                     page_grab_count = 0;
+       ppnum_t                 phys_page;
+       pmap_flush_context      pmap_flush_context_storage;
+       boolean_t               pmap_flushes_delayed = FALSE;
+#if DEVELOPMENT || DEBUG
+       task_t                  task = current_task();
+#endif /* DEVELOPMENT || DEBUG */
+
+       dwp_start = dwp = NULL;
 
        if (cntrl_flags & ~UPL_VALID_FLAGS) {
                /*
@@ -2620,51 +5479,91 @@ vm_object_upl_request(
                 */
                return KERN_INVALID_VALUE;
        }
-       if ( (!object->internal) && (object->paging_offset != 0) )
+       if ((!object->internal) && (object->paging_offset != 0)) {
                panic("vm_object_upl_request: external object with non-zero paging offset\n");
-       if (object->phys_contiguous)
-               panic("vm_object_upl_request: contiguous object specified\n");
+       }
+       if (object->phys_contiguous) {
+               panic("vm_object_upl_request: contiguous object specified\n");
+       }
+
+       assertf(page_aligned(offset) && page_aligned(size),
+           "offset 0x%llx size 0x%x",
+           offset, size);
 
+       VM_DEBUG_CONSTANT_EVENT(vm_object_upl_request, VM_UPL_REQUEST, DBG_FUNC_START, size, cntrl_flags, 0, 0);
 
-       if ((size / PAGE_SIZE) > MAX_UPL_SIZE)
-               size = MAX_UPL_SIZE * PAGE_SIZE;
+       dw_count = 0;
+       dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
+       dwp_start = vm_page_delayed_work_get_ctx();
+       if (dwp_start == NULL) {
+               dwp_start = &dw_array;
+               dw_limit = 1;
+               dwp_finish_ctx = FALSE;
+       }
 
-       if ( (cntrl_flags & UPL_SET_INTERNAL) && page_list_count != NULL)
-               *page_list_count = MAX_UPL_SIZE;
+       dwp = dwp_start;
 
-       if (cntrl_flags & UPL_SET_INTERNAL) {
-               if (cntrl_flags & UPL_SET_LITE) {
+       if (size > MAX_UPL_SIZE_BYTES) {
+               size = MAX_UPL_SIZE_BYTES;
+       }
+
+       if ((cntrl_flags & UPL_SET_INTERNAL) && page_list_count != NULL) {
+               *page_list_count = MAX_UPL_SIZE_BYTES >> PAGE_SHIFT;
+       }
+
+#if CONFIG_IOSCHED || UPL_DEBUG
+       if (object->io_tracking || upl_debug_enabled) {
+               io_tracking_flag |= UPL_CREATE_IO_TRACKING;
+       }
+#endif
+#if CONFIG_IOSCHED
+       if (object->io_tracking) {
+               io_tracking_flag |= UPL_CREATE_EXPEDITE_SUP;
+       }
+#endif
 
-                       upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE, 0, size);
+       if (cntrl_flags & UPL_SET_INTERNAL) {
+               if (cntrl_flags & UPL_SET_LITE) {
+                       upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE | io_tracking_flag, 0, size);
 
                        user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
                        lite_list = (wpl_array_t)
-                                       (((uintptr_t)user_page_list) + 
-                                       ((size/PAGE_SIZE) * sizeof(upl_page_info_t)));
+                           (((uintptr_t)user_page_list) +
+                           ((size / PAGE_SIZE) * sizeof(upl_page_info_t)));
+                       if (size == 0) {
+                               user_page_list = NULL;
+                               lite_list = NULL;
+                       }
                } else {
-                       upl = upl_create(UPL_CREATE_INTERNAL, 0, size);
+                       upl = upl_create(UPL_CREATE_INTERNAL | io_tracking_flag, 0, size);
 
                        user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
+                       if (size == 0) {
+                               user_page_list = NULL;
+                       }
                }
        } else {
-               if (cntrl_flags & UPL_SET_LITE) {
-
-                       upl = upl_create(UPL_CREATE_EXTERNAL | UPL_CREATE_LITE, 0, size);
+               if (cntrl_flags & UPL_SET_LITE) {
+                       upl = upl_create(UPL_CREATE_EXTERNAL | UPL_CREATE_LITE | io_tracking_flag, 0, size);
 
                        lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
+                       if (size == 0) {
+                               lite_list = NULL;
+                       }
                } else {
-                       upl = upl_create(UPL_CREATE_EXTERNAL, 0, size);
+                       upl = upl_create(UPL_CREATE_EXTERNAL | io_tracking_flag, 0, size);
                }
        }
        *upl_ptr = upl;
-       
-       if (user_page_list)
-               user_page_list[0].device = FALSE;
+
+       if (user_page_list) {
+               user_page_list[0].device = FALSE;
+       }
 
        if (cntrl_flags & UPL_SET_LITE) {
-               upl->map_object = object;
+               upl->map_object = object;
        } else {
-               upl->map_object = vm_object_allocate(size);
+               upl->map_object = vm_object_allocate(size);
                /*
                 * No neeed to lock the new object: nobody else knows
                 * about it yet, so it's all ours so far.
@@ -2673,39 +5572,42 @@ vm_object_upl_request(
                upl->map_object->pageout = TRUE;
                upl->map_object->can_persist = FALSE;
                upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
-               upl->map_object->shadow_offset = offset;
+               upl->map_object->vo_shadow_offset = offset;
                upl->map_object->wimg_bits = object->wimg_bits;
+               assertf(page_aligned(upl->map_object->vo_shadow_offset),
+                   "object %p shadow_offset 0x%llx",
+                   upl->map_object, upl->map_object->vo_shadow_offset);
 
                VM_PAGE_GRAB_FICTITIOUS(alias_page);
 
                upl->flags |= UPL_SHADOWED;
        }
-       /*
-        * ENCRYPTED SWAP:
-        * Just mark the UPL as "encrypted" here.
-        * We'll actually encrypt the pages later,
-        * in upl_encrypt(), when the caller has
-        * selected which pages need to go to swap.
-        */
-       if (cntrl_flags & UPL_ENCRYPT)
-               upl->flags |= UPL_ENCRYPTED;
-
-       if (cntrl_flags & UPL_FOR_PAGEOUT)
+       if (cntrl_flags & UPL_FOR_PAGEOUT) {
                upl->flags |= UPL_PAGEOUT;
+       }
 
        vm_object_lock(object);
-       vm_object_paging_begin(object);
+       vm_object_activity_begin(object);
+
+       grab_options = 0;
+#if CONFIG_SECLUDED_MEMORY
+       if (object->can_grab_secluded) {
+               grab_options |= VM_PAGE_GRAB_SECLUDED;
+       }
+#endif /* CONFIG_SECLUDED_MEMORY */
 
        /*
         * we can lock in the paging_offset once paging_in_progress is set
         */
-       upl->size = size;
-       upl->offset = offset + object->paging_offset;
-
-#ifdef UPL_DEBUG
-       queue_enter(&object->uplq, upl, upl_t, uplq);
-#endif /* UPL_DEBUG */
+       upl->u_size = size;
+       upl->u_offset = offset + object->paging_offset;
 
+#if CONFIG_IOSCHED || UPL_DEBUG
+       if (object->io_tracking || upl_debug_enabled) {
+               vm_object_activity_begin(object);
+               queue_enter(&object->uplq, upl, upl_t, uplq);
+       }
+#endif
        if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != VM_OBJECT_NULL) {
                /*
                 * Honor copy-on-write obligations
@@ -2717,15 +5619,16 @@ vm_object_upl_request(
                 * the caller modify them.
                 */
                vm_object_update(object,
-                                offset,
-                                size,
-                                NULL,
-                                NULL,
-                                FALSE, /* should_return */
-                                MEMORY_OBJECT_COPY_SYNC,
-                                VM_PROT_NO_CHANGE);
-               upl_cow++;
-               upl_cow_pages += size >> PAGE_SHIFT;
+                   offset,
+                   size,
+                   NULL,
+                   NULL,
+                   FALSE,              /* should_return */
+                   MEMORY_OBJECT_COPY_SYNC,
+                   VM_PROT_NO_CHANGE);
+
+               VM_PAGEOUT_DEBUG(upl_cow, 1);
+               VM_PAGEOUT_DEBUG(upl_cow_pages, (size >> PAGE_SHIFT));
        }
        /*
         * remember which copy object we synchronized with
@@ -2735,58 +5638,60 @@ vm_object_upl_request(
 
        xfer_size = size;
        dst_offset = offset;
+       size_in_pages = size / PAGE_SIZE;
+
+       if (vm_page_free_count > (vm_page_free_target + size_in_pages) ||
+           object->resident_page_count < ((MAX_UPL_SIZE_BYTES * 2) >> PAGE_SHIFT)) {
+               object->scan_collisions = 0;
+       }
+
+       if ((cntrl_flags & UPL_WILL_MODIFY) && must_throttle_writes() == TRUE) {
+               boolean_t       isSSD = FALSE;
+
+#if CONFIG_EMBEDDED
+               isSSD = TRUE;
+#else
+               vnode_pager_get_isSSD(object->pager, &isSSD);
+#endif
+               vm_object_unlock(object);
+
+               OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages);
+
+               if (isSSD == TRUE) {
+                       delay(1000 * size_in_pages);
+               } else {
+                       delay(5000 * size_in_pages);
+               }
+               OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
+
+               vm_object_lock(object);
+       }
 
        while (xfer_size) {
+               dwp->dw_mask = 0;
 
                if ((alias_page == NULL) && !(cntrl_flags & UPL_SET_LITE)) {
-                       if (delayed_unlock) {
-                               delayed_unlock = 0;
-                               vm_page_unlock_queues();
-                       }
                        vm_object_unlock(object);
                        VM_PAGE_GRAB_FICTITIOUS(alias_page);
-                       goto relock;
-               }
-               if (delayed_unlock == 0) {
-                       /*
-                        * pageout_scan takes the vm_page_lock_queues first
-                        * then tries for the object lock... to avoid what
-                        * is effectively a lock inversion, we'll go to the
-                        * trouble of taking them in that same order... otherwise
-                        * if this object contains the majority of the pages resident
-                        * in the UBC (or a small set of large objects actively being
-                        * worked on contain the majority of the pages), we could
-                        * cause the pageout_scan thread to 'starve' in its attempt
-                        * to find pages to move to the free queue, since it has to
-                        * successfully acquire the object lock of any candidate page
-                        * before it can steal/clean it.
-                        */
-                       vm_object_unlock(object);
-relock:
-                       for (j = 0; ; j++) {
-                               vm_page_lock_queues();
-
-                               if (vm_object_lock_try(object))
-                                       break;
-                               vm_page_unlock_queues();
-                               mutex_pause(j);
-                       }
-                       delayed_unlock = 1;
+                       vm_object_lock(object);
                }
                if (cntrl_flags & UPL_COPYOUT_FROM) {
-                       upl->flags |= UPL_PAGE_SYNC_DONE;
-
-                       if ( ((dst_page = vm_page_lookup(object, dst_offset)) == VM_PAGE_NULL) ||
-                               dst_page->fictitious ||
-                               dst_page->absent ||
-                               dst_page->error ||
-                              (dst_page->wire_count && !dst_page->pageout && !dst_page->list_req_pending)) {
-
-                               if (user_page_list)
+                       upl->flags |= UPL_PAGE_SYNC_DONE;
+
+                       if (((dst_page = vm_page_lookup(object, dst_offset)) == VM_PAGE_NULL) ||
+                           dst_page->vmp_fictitious ||
+                           dst_page->vmp_absent ||
+                           dst_page->vmp_error ||
+                           dst_page->vmp_cleaning ||
+                           (VM_PAGE_WIRED(dst_page))) {
+                               if (user_page_list) {
                                        user_page_list[entry].phys_addr = 0;
+                               }
 
-                               goto delay_unlock_queues;
+                               goto try_next_page;
                        }
+                       phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
+
                        /*
                         * grab this up front...
                         * a high percentange of the time we're going to
@@ -2794,228 +5699,167 @@ relock:
                         * anyway... so we can eliminate an extra call into
                         * the pmap layer by grabbing it here and recording it
                         */
-                       if (dst_page->pmapped)
-                               refmod_state = pmap_get_refmod(dst_page->phys_page);
-                       else
-                               refmod_state = 0;
+                       if (dst_page->vmp_pmapped) {
+                               refmod_state = pmap_get_refmod(phys_page);
+                       } else {
+                               refmod_state = 0;
+                       }
 
-                       if ( (refmod_state & VM_MEM_REFERENCED) && dst_page->inactive ) {
-                               /*
+                       if ((refmod_state & VM_MEM_REFERENCED) && VM_PAGE_INACTIVE(dst_page)) {
+                               /*
                                 * page is on inactive list and referenced...
                                 * reactivate it now... this gets it out of the
                                 * way of vm_pageout_scan which would have to
                                 * reactivate it upon tripping over it
                                 */
-                               vm_page_activate(dst_page);
-                               VM_STAT_INCR(reactivations);
+                               dwp->dw_mask |= DW_vm_page_activate;
                        }
                        if (cntrl_flags & UPL_RET_ONLY_DIRTY) {
-                               /*
+                               /*
                                 * we're only asking for DIRTY pages to be returned
                                 */
-                               if (dst_page->list_req_pending || !(cntrl_flags & UPL_FOR_PAGEOUT)) {
-                                       /*
+                               if (dst_page->vmp_laundry || !(cntrl_flags & UPL_FOR_PAGEOUT)) {
+                                       /*
                                         * if we were the page stolen by vm_pageout_scan to be
-                                        * cleaned (as opposed to a buddy being clustered in 
+                                        * cleaned (as opposed to a buddy being clustered in
                                         * or this request is not being driven by a PAGEOUT cluster
                                         * then we only need to check for the page being dirty or
                                         * precious to decide whether to return it
                                         */
-                                       if (dst_page->dirty || dst_page->precious || (refmod_state & VM_MEM_MODIFIED))
-                                               goto check_busy;
+                                       if (dst_page->vmp_dirty || dst_page->vmp_precious || (refmod_state & VM_MEM_MODIFIED)) {
+                                               goto check_busy;
+                                       }
                                        goto dont_return;
                                }
                                /*
                                 * this is a request for a PAGEOUT cluster and this page
                                 * is merely along for the ride as a 'buddy'... not only
                                 * does it have to be dirty to be returned, but it also
-                                * can't have been referenced recently... note that we've
-                                * already filtered above based on whether this page is
-                                * currently on the inactive queue or it meets the page
-                                * ticket (generation count) check
+                                * can't have been referenced recently...
                                 */
-                               if ( !(refmod_state & VM_MEM_REFERENCED) && 
-                                    ((refmod_state & VM_MEM_MODIFIED) || dst_page->dirty || dst_page->precious) ) {
-                                       goto check_busy;
+                               if ((hibernate_cleaning_in_progress == TRUE ||
+                                   (!((refmod_state & VM_MEM_REFERENCED) || dst_page->vmp_reference) ||
+                                   (dst_page->vmp_q_state == VM_PAGE_ON_THROTTLED_Q))) &&
+                                   ((refmod_state & VM_MEM_MODIFIED) || dst_page->vmp_dirty || dst_page->vmp_precious)) {
+                                       goto check_busy;
                                }
 dont_return:
                                /*
                                 * if we reach here, we're not to return
                                 * the page... go on to the next one
                                 */
-                               if (user_page_list)
-                                       user_page_list[entry].phys_addr = 0;
+                               if (dst_page->vmp_laundry == TRUE) {
+                                       /*
+                                        * if we get here, the page is not 'cleaning' (filtered out above).
+                                        * since it has been referenced, remove it from the laundry
+                                        * so we don't pay the cost of an I/O to clean a page
+                                        * we're just going to take back
+                                        */
+                                       vm_page_lockspin_queues();
+
+                                       vm_pageout_steal_laundry(dst_page, TRUE);
+                                       vm_page_activate(dst_page);
+
+                                       vm_page_unlock_queues();
+                               }
+                               if (user_page_list) {
+                                       user_page_list[entry].phys_addr = 0;
+                               }
 
-                               goto delay_unlock_queues;
+                               goto try_next_page;
                        }
-check_busy:                    
-                       if (dst_page->busy && (!(dst_page->list_req_pending && dst_page->pageout))) {
-                               if (cntrl_flags & UPL_NOBLOCK) {
-                                       if (user_page_list)
-                                               user_page_list[entry].phys_addr = 0;
+check_busy:
+                       if (dst_page->vmp_busy) {
+                               if (cntrl_flags & UPL_NOBLOCK) {
+                                       if (user_page_list) {
+                                               user_page_list[entry].phys_addr = 0;
+                                       }
+                                       dwp->dw_mask = 0;
 
-                                       goto delay_unlock_queues;
+                                       goto try_next_page;
                                }
                                /*
                                 * someone else is playing with the
                                 * page.  We will have to wait.
                                 */
-                               delayed_unlock = 0;
-                               vm_page_unlock_queues();
-
                                PAGE_SLEEP(object, dst_page, THREAD_UNINT);
 
                                continue;
                        }
-                       /*
-                        * Someone else already cleaning the page?
-                        */
-                       if ((dst_page->cleaning || dst_page->absent || dst_page->wire_count != 0) && !dst_page->list_req_pending) {
-                               if (user_page_list)
-                                       user_page_list[entry].phys_addr = 0;
-
-                               goto delay_unlock_queues;
-                       }
-                       /*
-                        * ENCRYPTED SWAP:
-                        * The caller is gathering this page and might
-                        * access its contents later on.  Decrypt the
-                        * page before adding it to the UPL, so that
-                        * the caller never sees encrypted data.
-                        */
-                       if (! (cntrl_flags & UPL_ENCRYPT) && dst_page->encrypted) {
-                               int  was_busy;
+                       if (dst_page->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
+                               vm_page_lockspin_queues();
 
-                               delayed_unlock = 0;
+                               if (dst_page->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
+                                       /*
+                                        * we've buddied up a page for a clustered pageout
+                                        * that has already been moved to the pageout
+                                        * queue by pageout_scan... we need to remove
+                                        * it from the queue and drop the laundry count
+                                        * on that queue
+                                        */
+                                       vm_pageout_throttle_up(dst_page);
+                               }
                                vm_page_unlock_queues();
-                               /*
-                                * save the current state of busy
-                                * mark page as busy while decrypt
-                                * is in progress since it will drop
-                                * the object lock...
-                                */
-                               was_busy = dst_page->busy;
-                               dst_page->busy = TRUE;
-
-                               vm_page_decrypt(dst_page, 0);
-                               vm_page_decrypt_for_upl_counter++;
-                               /*
-                                * restore to original busy state
-                                */
-                               dst_page->busy = was_busy;
-
-                               vm_page_lock_queues();
-                               delayed_unlock = 1;
-                       }
-                       if (dst_page->pageout_queue == TRUE)
-                               /*
-                                * we've buddied up a page for a clustered pageout
-                                * that has already been moved to the pageout
-                                * queue by pageout_scan... we need to remove
-                                * it from the queue and drop the laundry count
-                                * on that queue
-                                */
-                               vm_pageout_queue_steal(dst_page);
-#if MACH_CLUSTER_STATS
-                       /*
-                        * pageout statistics gathering.  count
-                        * all the pages we will page out that
-                        * were not counted in the initial
-                        * vm_pageout_scan work
-                        */
-                       if (dst_page->list_req_pending)
-                               encountered_lrp = TRUE;
-                       if ((dst_page->dirty || (dst_page->object->internal && dst_page->precious)) && !dst_page->list_req_pending) {
-                               if (encountered_lrp)
-                                       CLUSTER_STAT(pages_at_higher_offsets++;)
-                               else
-                                       CLUSTER_STAT(pages_at_lower_offsets++;)
                        }
-#endif
-                       /*
-                        * Turn off busy indication on pending
-                        * pageout.  Note: we can only get here
-                        * in the request pending case.
-                        */
-                       dst_page->list_req_pending = FALSE;
-                       dst_page->busy = FALSE;
-
                        hw_dirty = refmod_state & VM_MEM_MODIFIED;
-                       dirty = hw_dirty ? TRUE : dst_page->dirty;
+                       dirty = hw_dirty ? TRUE : dst_page->vmp_dirty;
+
+                       if (phys_page > upl->highest_page) {
+                               upl->highest_page = phys_page;
+                       }
 
-                       if (dst_page->phys_page > upl->highest_page)
-                               upl->highest_page = dst_page->phys_page;
+                       assert(!pmap_is_noencrypt(phys_page));
 
                        if (cntrl_flags & UPL_SET_LITE) {
-                               int     pg_num;
+                               unsigned int    pg_num;
 
-                               pg_num = (dst_offset-offset)/PAGE_SIZE;
-                               lite_list[pg_num>>5] |= 1 << (pg_num & 31);
+                               pg_num = (unsigned int) ((dst_offset - offset) / PAGE_SIZE);
+                               assert(pg_num == (dst_offset - offset) / PAGE_SIZE);
+                               lite_list[pg_num >> 5] |= 1U << (pg_num & 31);
 
-                               if (hw_dirty)
-                                       pmap_clear_modify(dst_page->phys_page);
+                               if (hw_dirty) {
+                                       if (pmap_flushes_delayed == FALSE) {
+                                               pmap_flush_context_init(&pmap_flush_context_storage);
+                                               pmap_flushes_delayed = TRUE;
+                                       }
+                                       pmap_clear_refmod_options(phys_page,
+                                           VM_MEM_MODIFIED,
+                                           PMAP_OPTIONS_NOFLUSH | PMAP_OPTIONS_CLEAR_WRITE,
+                                           &pmap_flush_context_storage);
+                               }
 
                                /*
-                                * Mark original page as cleaning 
+                                * Mark original page as cleaning
                                 * in place.
                                 */
-                               dst_page->cleaning = TRUE;
-                               dst_page->precious = FALSE;
+                               dst_page->vmp_cleaning = TRUE;
+                               dst_page->vmp_precious = FALSE;
                        } else {
-                               /*
+                               /*
                                 * use pageclean setup, it is more
                                 * convenient even for the pageout
                                 * cases here
                                 */
-                               vm_object_lock(upl->map_object);
+                               vm_object_lock(upl->map_object);
                                vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
                                vm_object_unlock(upl->map_object);
 
-                               alias_page->absent = FALSE;
+                               alias_page->vmp_absent = FALSE;
                                alias_page = NULL;
                        }
-#if     MACH_PAGEMAP
-                       /*
-                        * Record that this page has been 
-                        * written out
-                        */
-                       vm_external_state_set(object->existence_map, dst_page->offset);
-#endif  /*MACH_PAGEMAP*/
-                       dst_page->dirty = dirty;
-
-                       if (!dirty)
-                               dst_page->precious = TRUE;
-
-                       if (dst_page->pageout)
-                               dst_page->busy = TRUE;
-
-                       if ( (cntrl_flags & UPL_ENCRYPT) ) {
-                               /*
-                                * ENCRYPTED SWAP:
-                                * We want to deny access to the target page
-                                * because its contents are about to be
-                                * encrypted and the user would be very
-                                * confused to see encrypted data instead
-                                * of their data.
-                                * We also set "encrypted_cleaning" to allow
-                                * vm_pageout_scan() to demote that page
-                                * from "adjacent/clean-in-place" to
-                                * "target/clean-and-free" if it bumps into
-                                * this page during its scanning while we're
-                                * still processing this cluster.
-                                */
-                               dst_page->busy = TRUE;
-                               dst_page->encrypted_cleaning = TRUE;
+                       if (dirty) {
+                               SET_PAGE_DIRTY(dst_page, FALSE);
+                       } else {
+                               dst_page->vmp_dirty = FALSE;
                        }
-                       if ( !(cntrl_flags & UPL_CLEAN_IN_PLACE) ) {
-                               /*
-                                * deny access to the target page
-                                * while it is being worked on
-                                */
-                               if ((!dst_page->pageout) && (dst_page->wire_count == 0)) {
-                                       dst_page->busy = TRUE;
-                                       dst_page->pageout = TRUE;
-                                       vm_page_wire(dst_page);
+
+                       if (!dirty) {
+                               dst_page->vmp_precious = TRUE;
+                       }
+
+                       if (!(cntrl_flags & UPL_CLEAN_IN_PLACE)) {
+                               if (!VM_PAGE_WIRED(dst_page)) {
+                                       dst_page->vmp_free_when_done = TRUE;
                                }
                        }
                } else {
@@ -3042,24 +5886,18 @@ check_busy:
                                 * to see both the *before* and *after* pages.
                                 */
                                if (object->copy != VM_OBJECT_NULL) {
-                                       delayed_unlock = 0;
-                                       vm_page_unlock_queues();
-
                                        vm_object_update(
                                                object,
                                                dst_offset,/* current offset */
                                                xfer_size, /* remaining size */
                                                NULL,
                                                NULL,
-                                               FALSE,     /* should_return */
+                                               FALSE,     /* should_return */
                                                MEMORY_OBJECT_COPY_SYNC,
                                                VM_PROT_NO_CHANGE);
 
-                                       upl_cow_again++;
-                                       upl_cow_again_pages += xfer_size >> PAGE_SHIFT;
-
-                                       vm_page_lock_queues();
-                                       delayed_unlock = 1;
+                                       VM_PAGEOUT_DEBUG(upl_cow_again, 1);
+                                       VM_PAGEOUT_DEBUG(upl_cow_again_pages, (xfer_size >> PAGE_SHIFT));
                                }
                                /*
                                 * remember the copy object we synced with
@@ -3067,214 +5905,208 @@ check_busy:
                                last_copy_object = object->copy;
                        }
                        dst_page = vm_page_lookup(object, dst_offset);
-                       
-                       if (dst_page != VM_PAGE_NULL) {
-                               if ( !(dst_page->list_req_pending) ) {
-                                       if ((cntrl_flags & UPL_RET_ONLY_ABSENT) && !dst_page->absent) {
-                                               /*
-                                                * skip over pages already present in the cache
-                                                */
-                                               if (user_page_list)
-                                                       user_page_list[entry].phys_addr = 0;
 
-                                               goto delay_unlock_queues;
+                       if (dst_page != VM_PAGE_NULL) {
+                               if ((cntrl_flags & UPL_RET_ONLY_ABSENT)) {
+                                       /*
+                                        * skip over pages already present in the cache
+                                        */
+                                       if (user_page_list) {
+                                               user_page_list[entry].phys_addr = 0;
                                        }
-                                       if (dst_page->cleaning) {
-                                               /*
-                                                * someone else is writing to the page... wait...
-                                                */
-                                               delayed_unlock = 0;
-                                               vm_page_unlock_queues();
 
-                                               PAGE_SLEEP(object, dst_page, THREAD_UNINT);
-
-                                               continue;
-                                       }
-                               } else {
-                                       if (dst_page->fictitious &&
-                                           dst_page->phys_page == vm_page_fictitious_addr) {
-                                               assert( !dst_page->speculative);
-                                               /*
-                                                * dump the fictitious page
-                                                */
-                                               dst_page->list_req_pending = FALSE;
+                                       goto try_next_page;
+                               }
+                               if (dst_page->vmp_fictitious) {
+                                       panic("need corner case for fictitious page");
+                               }
 
-                                               vm_page_free(dst_page);
+                               if (dst_page->vmp_busy || dst_page->vmp_cleaning) {
+                                       /*
+                                        * someone else is playing with the
+                                        * page.  We will have to wait.
+                                        */
+                                       PAGE_SLEEP(object, dst_page, THREAD_UNINT);
 
-                                               dst_page = NULL;
-                                       } else if (dst_page->absent) {
-                                               /*
-                                                * the default_pager case
-                                                */
-                                               dst_page->list_req_pending = FALSE;
-                                               dst_page->busy = FALSE;
-                                       }
+                                       continue;
                                }
-                       }
-                       if (dst_page == VM_PAGE_NULL) {
+                               if (dst_page->vmp_laundry) {
+                                       vm_pageout_steal_laundry(dst_page, FALSE);
+                               }
+                       } else {
                                if (object->private) {
-                                       /* 
-                                        * This is a nasty wrinkle for users 
-                                        * of upl who encounter device or 
-                                        * private memory however, it is 
+                                       /*
+                                        * This is a nasty wrinkle for users
+                                        * of upl who encounter device or
+                                        * private memory however, it is
                                         * unavoidable, only a fault can
                                         * resolve the actual backing
                                         * physical page by asking the
                                         * backing device.
                                         */
-                                       if (user_page_list)
+                                       if (user_page_list) {
                                                user_page_list[entry].phys_addr = 0;
+                                       }
 
-                                       goto delay_unlock_queues;
+                                       goto try_next_page;
                                }
-                               /*
-                                * need to allocate a page
-                                */
-                               dst_page = vm_page_grab();
+                               if (object->scan_collisions) {
+                                       /*
+                                        * the pageout_scan thread is trying to steal
+                                        * pages from this object, but has run into our
+                                        * lock... grab 2 pages from the head of the object...
+                                        * the first is freed on behalf of pageout_scan, the
+                                        * 2nd is for our own use... we use vm_object_page_grab
+                                        * in both cases to avoid taking pages from the free
+                                        * list since we are under memory pressure and our
+                                        * lock on this object is getting in the way of
+                                        * relieving it
+                                        */
+                                       dst_page = vm_object_page_grab(object);
+
+                                       if (dst_page != VM_PAGE_NULL) {
+                                               vm_page_release(dst_page,
+                                                   FALSE);
+                                       }
 
+                                       dst_page = vm_object_page_grab(object);
+                               }
+                               if (dst_page == VM_PAGE_NULL) {
+                                       /*
+                                        * need to allocate a page
+                                        */
+                                       dst_page = vm_page_grab_options(grab_options);
+                                       if (dst_page != VM_PAGE_NULL) {
+                                               page_grab_count++;
+                                       }
+                               }
                                if (dst_page == VM_PAGE_NULL) {
-                                       if ( (cntrl_flags & (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) == (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) {
-                                              /*
-                                               * we don't want to stall waiting for pages to come onto the free list
-                                               * while we're already holding absent pages in this UPL
-                                               * the caller will deal with the empty slots
-                                               */
-                                               if (user_page_list)
-                                                       user_page_list[entry].phys_addr = 0;
+                                       if ((cntrl_flags & (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) == (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) {
+                                               /*
+                                                * we don't want to stall waiting for pages to come onto the free list
+                                                * while we're already holding absent pages in this UPL
+                                                * the caller will deal with the empty slots
+                                                */
+                                               if (user_page_list) {
+                                                       user_page_list[entry].phys_addr = 0;
+                                               }
 
                                                goto try_next_page;
                                        }
-                                       /*
+                                       /*
                                         * no pages available... wait
                                         * then try again for the same
                                         * offset...
                                         */
-                                       delayed_unlock = 0;
-                                       vm_page_unlock_queues();
-
                                        vm_object_unlock(object);
+
+                                       OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages);
+
+                                       VM_DEBUG_EVENT(vm_upl_page_wait, VM_UPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
+
                                        VM_PAGE_WAIT();
+                                       OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
 
-                                       /*
-                                        * pageout_scan takes the vm_page_lock_queues first
-                                        * then tries for the object lock... to avoid what
-                                        * is effectively a lock inversion, we'll go to the
-                                        * trouble of taking them in that same order... otherwise
-                                        * if this object contains the majority of the pages resident
-                                        * in the UBC (or a small set of large objects actively being
-                                        * worked on contain the majority of the pages), we could
-                                        * cause the pageout_scan thread to 'starve' in its attempt
-                                        * to find pages to move to the free queue, since it has to
-                                        * successfully acquire the object lock of any candidate page
-                                        * before it can steal/clean it.
-                                        */
-                                       for (j = 0; ; j++) {
-                                               vm_page_lock_queues();
+                                       VM_DEBUG_EVENT(vm_upl_page_wait, VM_UPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
 
-                                               if (vm_object_lock_try(object))
-                                                       break;
-                                               vm_page_unlock_queues();
-                                               mutex_pause(j);
-                                       }
-                                       delayed_unlock = 1;
+                                       vm_object_lock(object);
 
                                        continue;
                                }
-                               vm_page_insert_internal(dst_page, object, dst_offset, TRUE);
+                               vm_page_insert(dst_page, object, dst_offset);
 
-                               dst_page->absent = TRUE;
-                               dst_page->busy = FALSE;
+                               dst_page->vmp_absent = TRUE;
+                               dst_page->vmp_busy = FALSE;
 
                                if (cntrl_flags & UPL_RET_ONLY_ABSENT) {
-                                       /*
+                                       /*
                                         * if UPL_RET_ONLY_ABSENT was specified,
                                         * than we're definitely setting up a
-                                        * upl for a clustered read/pagein 
+                                        * upl for a clustered read/pagein
                                         * operation... mark the pages as clustered
                                         * so upl_commit_range can put them on the
                                         * speculative list
                                         */
-                                       dst_page->clustered = TRUE;
-                               }
-                       }
-                       /*
-                        * ENCRYPTED SWAP:
-                        */
-                       if (cntrl_flags & UPL_ENCRYPT) {
-                               /*
-                                * The page is going to be encrypted when we
-                                * get it from the pager, so mark it so.
-                                */
-                               dst_page->encrypted = TRUE;
-                       } else {
-                               /*
-                                * Otherwise, the page will not contain
-                                * encrypted data.
-                                */
-                               dst_page->encrypted = FALSE;
-                       }
-                       dst_page->overwriting = TRUE;
+                                       dst_page->vmp_clustered = TRUE;
 
-                       if (dst_page->fictitious) {
-                               panic("need corner case for fictitious page");
+                                       if (!(cntrl_flags & UPL_FILE_IO)) {
+                                               VM_STAT_INCR(pageins);
+                                       }
+                               }
                        }
-                       if (dst_page->busy) {
-                               /*
-                                * someone else is playing with the
-                                * page.  We will have to wait.
-                                */
-                               delayed_unlock = 0;
-                               vm_page_unlock_queues();
+                       phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
 
-                               PAGE_SLEEP(object, dst_page, THREAD_UNINT);
+                       dst_page->vmp_overwriting = TRUE;
 
-                               continue;
-                       }
-                       if (dst_page->pmapped) {
-                               if ( !(cntrl_flags & UPL_FILE_IO))
-                                       /*
+                       if (dst_page->vmp_pmapped) {
+                               if (!(cntrl_flags & UPL_FILE_IO)) {
+                                       /*
                                         * eliminate all mappings from the
                                         * original object and its prodigy
                                         */
-                                       refmod_state = pmap_disconnect(dst_page->phys_page);
-                               else
-                                       refmod_state = pmap_get_refmod(dst_page->phys_page);
-                       } else
-                               refmod_state = 0;
+                                       refmod_state = pmap_disconnect(phys_page);
+                               } else {
+                                       refmod_state = pmap_get_refmod(phys_page);
+                               }
+                       } else {
+                               refmod_state = 0;
+                       }
 
                        hw_dirty = refmod_state & VM_MEM_MODIFIED;
-                       dirty = hw_dirty ? TRUE : dst_page->dirty;
+                       dirty = hw_dirty ? TRUE : dst_page->vmp_dirty;
 
                        if (cntrl_flags & UPL_SET_LITE) {
-                               int     pg_num;
+                               unsigned int    pg_num;
 
-                               pg_num = (dst_offset-offset)/PAGE_SIZE;
-                               lite_list[pg_num>>5] |= 1 << (pg_num & 31);
+                               pg_num = (unsigned int) ((dst_offset - offset) / PAGE_SIZE);
+                               assert(pg_num == (dst_offset - offset) / PAGE_SIZE);
+                               lite_list[pg_num >> 5] |= 1U << (pg_num & 31);
 
-                               if (hw_dirty)
-                                       pmap_clear_modify(dst_page->phys_page);
+                               if (hw_dirty) {
+                                       pmap_clear_modify(phys_page);
+                               }
 
                                /*
-                                * Mark original page as cleaning 
+                                * Mark original page as cleaning
                                 * in place.
                                 */
-                               dst_page->cleaning = TRUE;
-                               dst_page->precious = FALSE;
+                               dst_page->vmp_cleaning = TRUE;
+                               dst_page->vmp_precious = FALSE;
                        } else {
                                /*
                                 * use pageclean setup, it is more
                                 * convenient even for the pageout
                                 * cases here
                                 */
-                               vm_object_lock(upl->map_object);
+                               vm_object_lock(upl->map_object);
                                vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
-                               vm_object_unlock(upl->map_object);
+                               vm_object_unlock(upl->map_object);
 
-                               alias_page->absent = FALSE;
+                               alias_page->vmp_absent = FALSE;
                                alias_page = NULL;
                        }
 
-                       if (cntrl_flags & UPL_CLEAN_IN_PLACE) {
+                       if (cntrl_flags & UPL_REQUEST_SET_DIRTY) {
+                               upl->flags &= ~UPL_CLEAR_DIRTY;
+                               upl->flags |= UPL_SET_DIRTY;
+                               dirty = TRUE;
+                               /*
+                                * Page belonging to a code-signed object is about to
+                                * be written. Mark it tainted and disconnect it from
+                                * all pmaps so processes have to fault it back in and
+                                * deal with the tainted bit.
+                                */
+                               if (object->code_signed && dst_page->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
+                                       dst_page->vmp_cs_tainted = VMP_CS_ALL_TRUE;
+                                       vm_page_upl_tainted++;
+                                       if (dst_page->vmp_pmapped) {
+                                               refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
+                                               if (refmod_state & VM_MEM_REFERENCED) {
+                                                       dst_page->vmp_reference = TRUE;
+                                               }
+                                       }
+                               }
+                       } else if (cntrl_flags & UPL_CLEAN_IN_PLACE) {
                                /*
                                 * clean in place for read implies
                                 * that a write will be done on all
@@ -3285,175 +6117,149 @@ check_busy:
                                 */
                                upl->flags |= UPL_CLEAR_DIRTY;
                        }
-                       dst_page->dirty = dirty;
+                       dst_page->vmp_dirty = dirty;
 
-                       if (!dirty)
-                               dst_page->precious = TRUE;
+                       if (!dirty) {
+                               dst_page->vmp_precious = TRUE;
+                       }
 
-                       if (dst_page->wire_count == 0) {
-                               /*
+                       if (!VM_PAGE_WIRED(dst_page)) {
+                               /*
                                 * deny access to the target page while
                                 * it is being worked on
                                 */
-                               dst_page->busy = TRUE;
-                       } else
-                               vm_page_wire(dst_page);
-
-                       if (dst_page->clustered) {
-                               /*
-                                * expect the page not to be used
-                                * since it's coming in as part
-                                * of a speculative cluster... 
-                                * pages that are 'consumed' will
-                                * get a hardware reference
-                                */
-                               dst_page->reference = FALSE;
+                               dst_page->vmp_busy = TRUE;
                        } else {
-                               /*
+                               dwp->dw_mask |= DW_vm_page_wire;
+                       }
+
+                       /*
+                        * We might be about to satisfy a fault which has been
+                        * requested. So no need for the "restart" bit.
+                        */
+                       dst_page->vmp_restart = FALSE;
+                       if (!dst_page->vmp_absent && !(cntrl_flags & UPL_WILL_MODIFY)) {
+                               /*
                                 * expect the page to be used
                                 */
-                               dst_page->reference = TRUE;
+                               dwp->dw_mask |= DW_set_reference;
+                       }
+                       if (cntrl_flags & UPL_PRECIOUS) {
+                               if (object->internal) {
+                                       SET_PAGE_DIRTY(dst_page, FALSE);
+                                       dst_page->vmp_precious = FALSE;
+                               } else {
+                                       dst_page->vmp_precious = TRUE;
+                               }
+                       } else {
+                               dst_page->vmp_precious = FALSE;
                        }
-                       dst_page->precious = (cntrl_flags & UPL_PRECIOUS) ? TRUE : FALSE;
                }
-               if (dst_page->phys_page > upl->highest_page)
-                       upl->highest_page = dst_page->phys_page;
+               if (dst_page->vmp_busy) {
+                       upl->flags |= UPL_HAS_BUSY;
+               }
+
+               if (phys_page > upl->highest_page) {
+                       upl->highest_page = phys_page;
+               }
+               assert(!pmap_is_noencrypt(phys_page));
                if (user_page_list) {
-                       user_page_list[entry].phys_addr = dst_page->phys_page;
-                       user_page_list[entry].pageout   = dst_page->pageout;
-                       user_page_list[entry].absent    = dst_page->absent;
-                       user_page_list[entry].dirty     = dst_page->dirty;
-                       user_page_list[entry].precious  = dst_page->precious;
-                       user_page_list[entry].device    = FALSE;
-                       if (dst_page->clustered == TRUE)
-                               user_page_list[entry].speculative = dst_page->speculative;
-                       else
-                               user_page_list[entry].speculative = FALSE;
-                       user_page_list[entry].cs_validated = dst_page->cs_validated;
-                       user_page_list[entry].cs_tainted = dst_page->cs_tainted;
-               }
-               /*
+                       user_page_list[entry].phys_addr = phys_page;
+                       user_page_list[entry].free_when_done    = dst_page->vmp_free_when_done;
+                       user_page_list[entry].absent    = dst_page->vmp_absent;
+                       user_page_list[entry].dirty     = dst_page->vmp_dirty;
+                       user_page_list[entry].precious  = dst_page->vmp_precious;
+                       user_page_list[entry].device    = FALSE;
+                       user_page_list[entry].needed    = FALSE;
+                       if (dst_page->vmp_clustered == TRUE) {
+                               user_page_list[entry].speculative = (dst_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ? TRUE : FALSE;
+                       } else {
+                               user_page_list[entry].speculative = FALSE;
+                       }
+                       user_page_list[entry].cs_validated = dst_page->vmp_cs_validated;
+                       user_page_list[entry].cs_tainted = dst_page->vmp_cs_tainted;
+                       user_page_list[entry].cs_nx = dst_page->vmp_cs_nx;
+                       user_page_list[entry].mark      = FALSE;
+               }
+               /*
                 * if UPL_RET_ONLY_ABSENT is set, then
                 * we are working with a fresh page and we've
                 * just set the clustered flag on it to
                 * indicate that it was drug in as part of a
                 * speculative cluster... so leave it alone
                 */
-               if ( !(cntrl_flags & UPL_RET_ONLY_ABSENT)) {
-                       /*
+               if (!(cntrl_flags & UPL_RET_ONLY_ABSENT)) {
+                       /*
                         * someone is explicitly grabbing this page...
                         * update clustered and speculative state
-                        * 
+                        *
                         */
-                       VM_PAGE_CONSUME_CLUSTERED(dst_page);
+                       if (dst_page->vmp_clustered) {
+                               VM_PAGE_CONSUME_CLUSTERED(dst_page);
+                       }
                }
-delay_unlock_queues:
-               if (delayed_unlock++ > UPL_DELAYED_UNLOCK_LIMIT) {
-                       /*
-                        * pageout_scan takes the vm_page_lock_queues first
-                        * then tries for the object lock... to avoid what
-                        * is effectively a lock inversion, we'll go to the
-                        * trouble of taking them in that same order... otherwise
-                        * if this object contains the majority of the pages resident
-                        * in the UBC (or a small set of large objects actively being
-                        * worked on contain the majority of the pages), we could
-                        * cause the pageout_scan thread to 'starve' in its attempt
-                        * to find pages to move to the free queue, since it has to
-                        * successfully acquire the object lock of any candidate page
-                        * before it can steal/clean it.
-                        */
-                       vm_object_unlock(object);
-                       mutex_yield(&vm_page_queue_lock);
+try_next_page:
+               if (dwp->dw_mask) {
+                       if (dwp->dw_mask & DW_vm_page_activate) {
+                               VM_STAT_INCR(reactivations);
+                       }
 
-                       for (j = 0; ; j++) {
-                               if (vm_object_lock_try(object))
-                                       break;
-                               vm_page_unlock_queues();
-                               mutex_pause(j);
-                               vm_page_lock_queues();
+                       VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count);
+
+                       if (dw_count >= dw_limit) {
+                               vm_page_do_delayed_work(object, tag, dwp_start, dw_count);
+
+                               dwp = dwp_start;
+                               dw_count = 0;
                        }
-                       delayed_unlock = 1;
                }
-try_next_page:
                entry++;
                dst_offset += PAGE_SIZE_64;
                xfer_size -= PAGE_SIZE;
        }
+       if (dw_count) {
+               vm_page_do_delayed_work(object, tag, dwp_start, dw_count);
+               dwp = dwp_start;
+               dw_count = 0;
+       }
+
        if (alias_page != NULL) {
-               if (delayed_unlock == 0) {
-                       vm_page_lock_queues();
-                       delayed_unlock = 1;
-               }
-               vm_page_free(alias_page);
+               VM_PAGE_FREE(alias_page);
+       }
+       if (pmap_flushes_delayed == TRUE) {
+               pmap_flush(&pmap_flush_context_storage);
        }
-       if (delayed_unlock)
-               vm_page_unlock_queues();
 
        if (page_list_count != NULL) {
-               if (upl->flags & UPL_INTERNAL)
+               if (upl->flags & UPL_INTERNAL) {
                        *page_list_count = 0;
-               else if (*page_list_count > entry)
+               } else if (*page_list_count > entry) {
                        *page_list_count = entry;
+               }
        }
+#if UPL_DEBUG
+       upl->upl_state = 1;
+#endif
        vm_object_unlock(object);
 
-       return KERN_SUCCESS;
-}
-
-/* JMM - Backward compatability for now */
-kern_return_t
-vm_fault_list_request(                 /* forward */
-       memory_object_control_t         control,
-       vm_object_offset_t      offset,
-       upl_size_t              size,
-       upl_t                   *upl_ptr,
-       upl_page_info_t         **user_page_list_ptr,
-       unsigned int            page_list_count,
-       int                     cntrl_flags);
-kern_return_t
-vm_fault_list_request(
-       memory_object_control_t         control,
-       vm_object_offset_t      offset,
-       upl_size_t              size,
-       upl_t                   *upl_ptr,
-       upl_page_info_t         **user_page_list_ptr,
-       unsigned int            page_list_count,
-       int                     cntrl_flags)
-{
-       unsigned int            local_list_count;
-       upl_page_info_t         *user_page_list;
-       kern_return_t           kr;
-
-       if (user_page_list_ptr != NULL) {
-               local_list_count = page_list_count;
-               user_page_list = *user_page_list_ptr;
-       } else {
-               local_list_count = 0;
-               user_page_list = NULL;
-       }
-       kr =  memory_object_upl_request(control,
-                               offset,
-                               size,
-                               upl_ptr,
-                               user_page_list,
-                               &local_list_count,
-                               cntrl_flags);
-
-       if(kr != KERN_SUCCESS)
-               return kr;
+       VM_DEBUG_CONSTANT_EVENT(vm_object_upl_request, VM_UPL_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0);
+#if DEVELOPMENT || DEBUG
+       if (task != NULL) {
+               ledger_credit(task->ledger, task_ledgers.pages_grabbed_upl, page_grab_count);
+       }
+#endif /* DEVELOPMENT || DEBUG */
 
-       if ((user_page_list_ptr != NULL) && (cntrl_flags & UPL_INTERNAL)) {
-               *user_page_list_ptr = UPL_GET_INTERNAL_PAGE_LIST(*upl_ptr);
+       if (dwp_start && dwp_finish_ctx) {
+               vm_page_delayed_work_finish_ctx(dwp_start);
+               dwp_start = dwp = NULL;
        }
 
        return KERN_SUCCESS;
 }
 
-               
-
-/*  
+/*
  *     Routine:        vm_object_super_upl_request
- *     Purpose:        
+ *     Purpose:
  *             Cause the population of a portion of a vm_object
  *             in much the same way as memory_object_upl_request.
  *             Depending on the nature of the request, the pages
@@ -3465,68 +6271,91 @@ vm_fault_list_request(
 __private_extern__ kern_return_t
 vm_object_super_upl_request(
        vm_object_t object,
-       vm_object_offset_t      offset,
-       upl_size_t              size,
-       upl_size_t              super_cluster,
-       upl_t                   *upl,
-       upl_page_info_t         *user_page_list,
-       unsigned int            *page_list_count,
-       int                     cntrl_flags)
+       vm_object_offset_t      offset,
+       upl_size_t              size,
+       upl_size_t              super_cluster,
+       upl_t                   *upl,
+       upl_page_info_t         *user_page_list,
+       unsigned int            *page_list_count,
+       upl_control_flags_t     cntrl_flags,
+       vm_tag_t                tag)
 {
-       if (object->paging_offset > offset)
+       if (object->paging_offset > offset || ((cntrl_flags & UPL_VECTOR) == UPL_VECTOR)) {
                return KERN_FAILURE;
+       }
 
        assert(object->paging_in_progress);
        offset = offset - object->paging_offset;
 
        if (super_cluster > size) {
-
-               vm_object_offset_t      base_offset;
-               upl_size_t              super_size;
+               vm_object_offset_t      base_offset;
+               upl_size_t              super_size;
+               vm_object_size_t        super_size_64;
 
                base_offset = (offset & ~((vm_object_offset_t) super_cluster - 1));
-               super_size = (offset + size) > (base_offset + super_cluster) ? super_cluster<<1 : super_cluster;
-               super_size = ((base_offset + super_size) > object->size) ? (object->size - base_offset) : super_size;
+               super_size = (offset + size) > (base_offset + super_cluster) ? super_cluster << 1 : super_cluster;
+               super_size_64 = ((base_offset + super_size) > object->vo_size) ? (object->vo_size - base_offset) : super_size;
+               super_size = (upl_size_t) super_size_64;
+               assert(super_size == super_size_64);
 
                if (offset > (base_offset + super_size)) {
-                       panic("vm_object_super_upl_request: Missed target pageout"
-                             " %#llx,%#llx, %#x, %#x, %#x, %#llx\n",
-                             offset, base_offset, super_size, super_cluster,
-                             size, object->paging_offset);
+                       panic("vm_object_super_upl_request: Missed target pageout"
+                           " %#llx,%#llx, %#x, %#x, %#x, %#llx\n",
+                           offset, base_offset, super_size, super_cluster,
+                           size, object->paging_offset);
                }
                /*
                 * apparently there is a case where the vm requests a
                 * page to be written out who's offset is beyond the
                 * object size
                 */
-               if ((offset + size) > (base_offset + super_size))
-                       super_size = (offset + size) - base_offset;
+               if ((offset + size) > (base_offset + super_size)) {
+                       super_size_64 = (offset + size) - base_offset;
+                       super_size = (upl_size_t) super_size_64;
+                       assert(super_size == super_size_64);
+               }
 
                offset = base_offset;
                size = super_size;
        }
-       return vm_object_upl_request(object, offset, size, upl, user_page_list, page_list_count, cntrl_flags);
+       return vm_object_upl_request(object, offset, size, upl, user_page_list, page_list_count, cntrl_flags, tag);
 }
 
-                                
+int cs_executable_create_upl = 0;
+extern int proc_selfpid(void);
+extern char *proc_name_address(void *p);
+
 kern_return_t
 vm_map_create_upl(
-       vm_map_t                map,
-       vm_map_address_t        offset,
-       upl_size_t              *upl_size,
-       upl_t                   *upl,
-       upl_page_info_array_t   page_list,
-       unsigned int            *count,
-       int                     *flags)
+       vm_map_t                map,
+       vm_map_address_t        offset,
+       upl_size_t              *upl_size,
+       upl_t                   *upl,
+       upl_page_info_array_t   page_list,
+       unsigned int            *count,
+       upl_control_flags_t     *flags,
+       vm_tag_t                tag)
 {
-       vm_map_entry_t  entry;
-       int             caller_flags;
-       int             force_data_sync;
-       int             sync_cow_data;
-       vm_object_t     local_object;
-       vm_map_offset_t local_offset;
-       vm_map_offset_t local_start;
-       kern_return_t   ret;
+       vm_map_entry_t          entry;
+       upl_control_flags_t     caller_flags;
+       int                     force_data_sync;
+       int                     sync_cow_data;
+       vm_object_t             local_object;
+       vm_map_offset_t         local_offset;
+       vm_map_offset_t         local_start;
+       kern_return_t           ret;
+       vm_map_address_t        original_offset;
+       vm_map_size_t           original_size, adjusted_size;
+       vm_map_offset_t         local_entry_start;
+       vm_object_offset_t      local_entry_offset;
+       vm_object_offset_t      offset_in_mapped_page;
+       boolean_t               release_map = FALSE;
+
+start_with_map:
+
+       original_offset = offset;
+       original_size = *upl_size;
+       adjusted_size = original_size;
 
        caller_flags = *flags;
 
@@ -3535,225 +6364,624 @@ vm_map_create_upl(
                 * For forward compatibility's sake,
                 * reject any unknown flag.
                 */
-               return KERN_INVALID_VALUE;
+               ret = KERN_INVALID_VALUE;
+               goto done;
        }
        force_data_sync = (caller_flags & UPL_FORCE_DATA_SYNC);
        sync_cow_data = !(caller_flags & UPL_COPYOUT_FROM);
 
-       if (upl == NULL)
-               return KERN_INVALID_ARGUMENT;
+       if (upl == NULL) {
+               ret = KERN_INVALID_ARGUMENT;
+               goto done;
+       }
 
 REDISCOVER_ENTRY:
-       vm_map_lock(map);
+       vm_map_lock_read(map);
+
+       if (!vm_map_lookup_entry(map, offset, &entry)) {
+               vm_map_unlock_read(map);
+               ret = KERN_FAILURE;
+               goto done;
+       }
+
+       local_entry_start = entry->vme_start;
+       local_entry_offset = VME_OFFSET(entry);
+
+       if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) {
+               DEBUG4K_UPL("map %p (%d) offset 0x%llx size 0x%x flags 0x%llx\n", map, VM_MAP_PAGE_SHIFT(map), (uint64_t)offset, *upl_size, *flags);
+       }
+
+       if (entry->vme_end - original_offset < adjusted_size) {
+               adjusted_size = entry->vme_end - original_offset;
+               assert(adjusted_size > 0);
+               *upl_size = (upl_size_t) adjusted_size;
+               assert(*upl_size == adjusted_size);
+       }
+
+       if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
+               *flags = 0;
+
+               if (!entry->is_sub_map &&
+                   VME_OBJECT(entry) != VM_OBJECT_NULL) {
+                       if (VME_OBJECT(entry)->private) {
+                               *flags = UPL_DEV_MEMORY;
+                       }
+
+                       if (VME_OBJECT(entry)->phys_contiguous) {
+                               *flags |= UPL_PHYS_CONTIG;
+                       }
+               }
+               vm_map_unlock_read(map);
+               ret = KERN_SUCCESS;
+               goto done;
+       }
+
+       offset_in_mapped_page = 0;
+       if (VM_MAP_PAGE_SIZE(map) < PAGE_SIZE) {
+               offset = vm_map_trunc_page(original_offset, VM_MAP_PAGE_MASK(map));
+               *upl_size = (upl_size_t)
+                   (vm_map_round_page(original_offset + adjusted_size,
+                   VM_MAP_PAGE_MASK(map))
+                   - offset);
 
-       if (vm_map_lookup_entry(map, offset, &entry)) {
+               offset_in_mapped_page = original_offset - offset;
+               assert(offset_in_mapped_page < VM_MAP_PAGE_SIZE(map));
 
-               if ((entry->vme_end - offset) < *upl_size)
-                       *upl_size = entry->vme_end - offset;
+               DEBUG4K_UPL("map %p (%d) offset 0x%llx size 0x%llx flags 0x%llx -> offset 0x%llx adjusted_size 0x%llx *upl_size 0x%x offset_in_mapped_page 0x%llx\n", map, VM_MAP_PAGE_SHIFT(map), (uint64_t)original_offset, (uint64_t)original_size, *flags, (uint64_t)offset, (uint64_t)adjusted_size, *upl_size, offset_in_mapped_page);
+       }
+
+       if (VME_OBJECT(entry) == VM_OBJECT_NULL ||
+           !VME_OBJECT(entry)->phys_contiguous) {
+               if (*upl_size > MAX_UPL_SIZE_BYTES) {
+                       *upl_size = MAX_UPL_SIZE_BYTES;
+               }
+       }
+
+       /*
+        *      Create an object if necessary.
+        */
+       if (VME_OBJECT(entry) == VM_OBJECT_NULL) {
+               if (vm_map_lock_read_to_write(map)) {
+                       goto REDISCOVER_ENTRY;
+               }
+
+               VME_OBJECT_SET(entry,
+                   vm_object_allocate((vm_size_t)
+                   vm_object_round_page((entry->vme_end - entry->vme_start))));
+               VME_OFFSET_SET(entry, 0);
+               assert(entry->use_pmap);
+
+               vm_map_lock_write_to_read(map);
+       }
 
-               if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
-                       *flags = 0;
+       if (!(caller_flags & UPL_COPYOUT_FROM) &&
+           !entry->is_sub_map &&
+           !(entry->protection & VM_PROT_WRITE)) {
+               vm_map_unlock_read(map);
+               ret = KERN_PROTECTION_FAILURE;
+               goto done;
+       }
+
+#if CONFIG_EMBEDDED
+       if (map->pmap != kernel_pmap &&
+           (caller_flags & UPL_COPYOUT_FROM) &&
+           (entry->protection & VM_PROT_EXECUTE) &&
+           !(entry->protection & VM_PROT_WRITE)) {
+               vm_offset_t     kaddr;
+               vm_size_t       ksize;
 
-                       if (entry->object.vm_object != VM_OBJECT_NULL) {
-                               if (entry->object.vm_object->private)
-                                       *flags = UPL_DEV_MEMORY;
+               /*
+                * We're about to create a read-only UPL backed by
+                * memory from an executable mapping.
+                * Wiring the pages would result in the pages being copied
+                * (due to the "MAP_PRIVATE" mapping) and no longer
+                * code-signed, so no longer eligible for execution.
+                * Instead, let's copy the data into a kernel buffer and
+                * create the UPL from this kernel buffer.
+                * The kernel buffer is then freed, leaving the UPL holding
+                * the last reference on the VM object, so the memory will
+                * be released when the UPL is committed.
+                */
 
-                               if (entry->object.vm_object->phys_contiguous)
-                                       *flags |= UPL_PHYS_CONTIG;
+               vm_map_unlock_read(map);
+               entry = VM_MAP_ENTRY_NULL;
+               /* allocate kernel buffer */
+               ksize = round_page(*upl_size);
+               kaddr = 0;
+               ret = kmem_alloc_pageable(kernel_map,
+                   &kaddr,
+                   ksize,
+                   tag);
+               if (ret == KERN_SUCCESS) {
+                       /* copyin the user data */
+                       ret = copyinmap(map, offset, (void *)kaddr, *upl_size);
+               }
+               if (ret == KERN_SUCCESS) {
+                       if (ksize > *upl_size) {
+                               /* zero out the extra space in kernel buffer */
+                               memset((void *)(kaddr + *upl_size),
+                                   0,
+                                   ksize - *upl_size);
                        }
-                       vm_map_unlock(map);
-
-                       return KERN_SUCCESS;
+                       /* create the UPL from the kernel buffer */
+                       vm_object_offset_t      offset_in_object;
+                       vm_object_offset_t      offset_in_object_page;
+
+                       offset_in_object = offset - local_entry_start + local_entry_offset;
+                       offset_in_object_page = offset_in_object - vm_object_trunc_page(offset_in_object);
+                       assert(offset_in_object_page < PAGE_SIZE);
+                       assert(offset_in_object_page + offset_in_mapped_page < PAGE_SIZE);
+                       *upl_size -= offset_in_object_page + offset_in_mapped_page;
+                       ret = vm_map_create_upl(kernel_map,
+                           (vm_map_address_t)(kaddr + offset_in_object_page + offset_in_mapped_page),
+                           upl_size, upl, page_list, count, flags, tag);
                }
-               if (entry->object.vm_object == VM_OBJECT_NULL || !entry->object.vm_object->phys_contiguous) {
-                       if ((*upl_size/page_size) > MAX_UPL_SIZE)
-                                       *upl_size = MAX_UPL_SIZE * page_size;
+               if (kaddr != 0) {
+                       /* free the kernel buffer */
+                       kmem_free(kernel_map, kaddr, ksize);
+                       kaddr = 0;
+                       ksize = 0;
                }
+#if DEVELOPMENT || DEBUG
+               DTRACE_VM4(create_upl_from_executable,
+                   vm_map_t, map,
+                   vm_map_address_t, offset,
+                   upl_size_t, *upl_size,
+                   kern_return_t, ret);
+#endif /* DEVELOPMENT || DEBUG */
+               goto done;
+       }
+#endif /* CONFIG_EMBEDDED */
+
+       local_object = VME_OBJECT(entry);
+       assert(local_object != VM_OBJECT_NULL);
+
+       if (!entry->is_sub_map &&
+           !entry->needs_copy &&
+           *upl_size != 0 &&
+           local_object->vo_size > *upl_size && /* partial UPL */
+           entry->wired_count == 0 && /* No COW for entries that are wired */
+           (map->pmap != kernel_pmap) && /* alias checks */
+           (vm_map_entry_should_cow_for_true_share(entry) /* case 1 */
+           ||
+           ( /* case 2 */
+                   local_object->internal &&
+                   (local_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) &&
+                   local_object->ref_count > 1))) {
+               vm_prot_t       prot;
+
                /*
-                *      Create an object if necessary.
+                * Case 1:
+                * Set up the targeted range for copy-on-write to avoid
+                * applying true_share/copy_delay to the entire object.
+                *
+                * Case 2:
+                * This map entry covers only part of an internal
+                * object.  There could be other map entries covering
+                * other areas of this object and some of these map
+                * entries could be marked as "needs_copy", which
+                * assumes that the object is COPY_SYMMETRIC.
+                * To avoid marking this object as COPY_DELAY and
+                * "true_share", let's shadow it and mark the new
+                * (smaller) object as "true_share" and COPY_DELAY.
                 */
-               if (entry->object.vm_object == VM_OBJECT_NULL) {
-                       entry->object.vm_object = vm_object_allocate((vm_size_t)(entry->vme_end - entry->vme_start));
-                       entry->offset = 0;
-               }
-               if (!(caller_flags & UPL_COPYOUT_FROM)) {
-                       if (!(entry->protection & VM_PROT_WRITE)) {
-                               vm_map_unlock(map);
-                               return KERN_PROTECTION_FAILURE;
-                       }
-                       if (entry->needs_copy)  {
-                               vm_map_t                local_map;
-                               vm_object_t             object;
-                               vm_object_offset_t      new_offset;
-                               vm_prot_t               prot;
-                               boolean_t               wired;
-                               vm_map_version_t        version;
-                               vm_map_t                real_map;
-
-                               local_map = map;
-                               vm_map_lock_write_to_read(map);
-
-                               if (vm_map_lookup_locked(&local_map,
-                                                        offset, VM_PROT_WRITE,
-                                                        OBJECT_LOCK_EXCLUSIVE,
-                                                        &version, &object,
-                                                        &new_offset, &prot, &wired,
-                                                        NULL,
-                                                        &real_map)) {
-                                       vm_map_unlock(local_map);
-                                       return KERN_FAILURE;
-                               }
-                               if (real_map != map)
-                                       vm_map_unlock(real_map);
-                               vm_object_unlock(object);
-                               vm_map_unlock(local_map);
 
-                               goto REDISCOVER_ENTRY;
-                       }
+               if (vm_map_lock_read_to_write(map)) {
+                       goto REDISCOVER_ENTRY;
+               }
+               vm_map_lock_assert_exclusive(map);
+               assert(VME_OBJECT(entry) == local_object);
+
+               vm_map_clip_start(map,
+                   entry,
+                   vm_map_trunc_page(offset,
+                   VM_MAP_PAGE_MASK(map)));
+               vm_map_clip_end(map,
+                   entry,
+                   vm_map_round_page(offset + *upl_size,
+                   VM_MAP_PAGE_MASK(map)));
+               if ((entry->vme_end - offset) < *upl_size) {
+                       *upl_size = (upl_size_t) (entry->vme_end - offset);
+                       assert(*upl_size == entry->vme_end - offset);
                }
-               if (entry->is_sub_map) {
-                       vm_map_t        submap;
 
-                       submap = entry->object.sub_map;
-                       local_start = entry->vme_start;
-                       local_offset = entry->offset;
+               prot = entry->protection & ~VM_PROT_WRITE;
+               if (override_nx(map, VME_ALIAS(entry)) && prot) {
+                       prot |= VM_PROT_EXECUTE;
+               }
+               vm_object_pmap_protect(local_object,
+                   VME_OFFSET(entry),
+                   entry->vme_end - entry->vme_start,
+                   ((entry->is_shared ||
+                   map->mapped_in_other_pmaps)
+                   ? PMAP_NULL
+                   : map->pmap),
+                   VM_MAP_PAGE_SIZE(map),
+                   entry->vme_start,
+                   prot);
+
+               assert(entry->wired_count == 0);
 
-                       vm_map_reference(submap);
-                       vm_map_unlock(map);
+               /*
+                * Lock the VM object and re-check its status: if it's mapped
+                * in another address space, we could still be racing with
+                * another thread holding that other VM map exclusively.
+                */
+               vm_object_lock(local_object);
+               if (local_object->true_share) {
+                       /* object is already in proper state: no COW needed */
+                       assert(local_object->copy_strategy !=
+                           MEMORY_OBJECT_COPY_SYMMETRIC);
+               } else {
+                       /* not true_share: ask for copy-on-write below */
+                       assert(local_object->copy_strategy ==
+                           MEMORY_OBJECT_COPY_SYMMETRIC);
+                       entry->needs_copy = TRUE;
+               }
+               vm_object_unlock(local_object);
 
-                       ret = vm_map_create_upl(submap, 
-                                               local_offset + (offset - local_start), 
-                                               upl_size, upl, page_list, count, flags);
-                       vm_map_deallocate(submap);
+               vm_map_lock_write_to_read(map);
+       }
 
-                       return ret;
+       if (entry->needs_copy) {
+               /*
+                * Honor copy-on-write for COPY_SYMMETRIC
+                * strategy.
+                */
+               vm_map_t                local_map;
+               vm_object_t             object;
+               vm_object_offset_t      new_offset;
+               vm_prot_t               prot;
+               boolean_t               wired;
+               vm_map_version_t        version;
+               vm_map_t                real_map;
+               vm_prot_t               fault_type;
+
+               if (entry->vme_start < VM_MAP_TRUNC_PAGE(offset, VM_MAP_PAGE_MASK(map)) ||
+                   entry->vme_end > VM_MAP_ROUND_PAGE(offset + *upl_size, VM_MAP_PAGE_MASK(map))) {
+                       /*
+                        * Clip the requested range first to minimize the
+                        * amount of potential copying...
+                        */
+                       if (vm_map_lock_read_to_write(map)) {
+                               goto REDISCOVER_ENTRY;
+                       }
+                       vm_map_lock_assert_exclusive(map);
+                       assert(VME_OBJECT(entry) == local_object);
+                       vm_map_clip_start(map, entry,
+                           VM_MAP_TRUNC_PAGE(offset, VM_MAP_PAGE_MASK(map)));
+                       vm_map_clip_end(map, entry,
+                           VM_MAP_ROUND_PAGE(offset + *upl_size, VM_MAP_PAGE_MASK(map)));
+                       vm_map_lock_write_to_read(map);
                }
-               if (sync_cow_data) {
-                       if (entry->object.vm_object->shadow || entry->object.vm_object->copy) {
-                               local_object = entry->object.vm_object;
-                               local_start = entry->vme_start;
-                               local_offset = entry->offset;
 
-                               vm_object_reference(local_object);
-                               vm_map_unlock(map);
+               local_map = map;
 
-                               if (entry->object.vm_object->shadow && entry->object.vm_object->copy) {
-                                       vm_object_lock_request(
-                                                              local_object->shadow,
-                                                              (vm_object_offset_t)
-                                                              ((offset - local_start) +
-                                                               local_offset) +
-                                                              local_object->shadow_offset,
-                                                              *upl_size, FALSE, 
-                                                              MEMORY_OBJECT_DATA_SYNC,
-                                                              VM_PROT_NO_CHANGE);
-                               }
-                               sync_cow_data = FALSE;
-                               vm_object_deallocate(local_object);
-
-                               goto REDISCOVER_ENTRY;
+               if (caller_flags & UPL_COPYOUT_FROM) {
+                       fault_type = VM_PROT_READ | VM_PROT_COPY;
+                       vm_counters.create_upl_extra_cow++;
+                       vm_counters.create_upl_extra_cow_pages +=
+                           (entry->vme_end - entry->vme_start) / PAGE_SIZE;
+               } else {
+                       fault_type = VM_PROT_WRITE;
+               }
+               if (vm_map_lookup_locked(&local_map,
+                   offset, fault_type,
+                   OBJECT_LOCK_EXCLUSIVE,
+                   &version, &object,
+                   &new_offset, &prot, &wired,
+                   NULL,
+                   &real_map, NULL) != KERN_SUCCESS) {
+                       if (fault_type == VM_PROT_WRITE) {
+                               vm_counters.create_upl_lookup_failure_write++;
+                       } else {
+                               vm_counters.create_upl_lookup_failure_copy++;
                        }
+                       vm_map_unlock_read(local_map);
+                       ret = KERN_FAILURE;
+                       goto done;
                }
-               if (force_data_sync) {
-                       local_object = entry->object.vm_object;
-                       local_start = entry->vme_start;
-                       local_offset = entry->offset;
+               if (real_map != local_map) {
+                       vm_map_unlock(real_map);
+               }
+               vm_map_unlock_read(local_map);
+
+               vm_object_unlock(object);
 
-                       vm_object_reference(local_object);
-                       vm_map_unlock(map);
+               goto REDISCOVER_ENTRY;
+       }
 
-                       vm_object_lock_request(
-                                              local_object,
-                                              (vm_object_offset_t)
-                                              ((offset - local_start) + local_offset),
-                                              (vm_object_size_t)*upl_size, FALSE, 
-                                              MEMORY_OBJECT_DATA_SYNC,
-                                              VM_PROT_NO_CHANGE);
+       if (entry->is_sub_map) {
+               vm_map_t        submap;
 
-                       force_data_sync = FALSE;
-                       vm_object_deallocate(local_object);
+               submap = VME_SUBMAP(entry);
+               local_start = entry->vme_start;
+               local_offset = (vm_map_offset_t)VME_OFFSET(entry);
 
-                       goto REDISCOVER_ENTRY;
+               vm_map_reference(submap);
+               vm_map_unlock_read(map);
+
+               DEBUG4K_UPL("map %p offset 0x%llx (0x%llx) size 0x%x (adjusted 0x%llx original 0x%llx) offset_in_mapped_page 0x%llx submap %p\n", map, (uint64_t)offset, (uint64_t)original_offset, *upl_size, (uint64_t)adjusted_size, (uint64_t)original_size, offset_in_mapped_page, submap);
+               offset += offset_in_mapped_page;
+               *upl_size -= offset_in_mapped_page;
+
+               if (release_map) {
+                       vm_map_deallocate(map);
                }
-               if (entry->object.vm_object->private)
-                       *flags = UPL_DEV_MEMORY;
-               else
-                       *flags = 0;
+               map = submap;
+               release_map = TRUE;
+               offset = local_offset + (offset - local_start);
+               goto start_with_map;
+       }
+
+       if (sync_cow_data &&
+           (VME_OBJECT(entry)->shadow ||
+           VME_OBJECT(entry)->copy)) {
+               local_object = VME_OBJECT(entry);
+               local_start = entry->vme_start;
+               local_offset = (vm_map_offset_t)VME_OFFSET(entry);
 
-               if (entry->object.vm_object->phys_contiguous)
-                       *flags |= UPL_PHYS_CONTIG;
+               vm_object_reference(local_object);
+               vm_map_unlock_read(map);
+
+               if (local_object->shadow && local_object->copy) {
+                       vm_object_lock_request(local_object->shadow,
+                           ((vm_object_offset_t)
+                           ((offset - local_start) +
+                           local_offset) +
+                           local_object->vo_shadow_offset),
+                           *upl_size, FALSE,
+                           MEMORY_OBJECT_DATA_SYNC,
+                           VM_PROT_NO_CHANGE);
+               }
+               sync_cow_data = FALSE;
+               vm_object_deallocate(local_object);
 
-               local_object = entry->object.vm_object;
-               local_offset = entry->offset;
+               goto REDISCOVER_ENTRY;
+       }
+       if (force_data_sync) {
+               local_object = VME_OBJECT(entry);
                local_start = entry->vme_start;
+               local_offset = (vm_map_offset_t)VME_OFFSET(entry);
 
                vm_object_reference(local_object);
-               vm_map_unlock(map);
-
-               ret = vm_object_iopl_request(local_object, 
-                                             (vm_object_offset_t) ((offset - local_start) + local_offset),
-                                             *upl_size,
-                                             upl,
-                                             page_list,
-                                             count,
-                                             caller_flags);
+               vm_map_unlock_read(map);
+
+               vm_object_lock_request(local_object,
+                   ((vm_object_offset_t)
+                   ((offset - local_start) +
+                   local_offset)),
+                   (vm_object_size_t)*upl_size,
+                   FALSE,
+                   MEMORY_OBJECT_DATA_SYNC,
+                   VM_PROT_NO_CHANGE);
+
+               force_data_sync = FALSE;
                vm_object_deallocate(local_object);
 
-               return(ret);
-       } 
-       vm_map_unlock(map);
+               goto REDISCOVER_ENTRY;
+       }
+       if (VME_OBJECT(entry)->private) {
+               *flags = UPL_DEV_MEMORY;
+       } else {
+               *flags = 0;
+       }
+
+       if (VME_OBJECT(entry)->phys_contiguous) {
+               *flags |= UPL_PHYS_CONTIG;
+       }
+
+       local_object = VME_OBJECT(entry);
+       local_offset = (vm_map_offset_t)VME_OFFSET(entry);
+       local_start = entry->vme_start;
+
+       /*
+        * Wiring will copy the pages to the shadow object.
+        * The shadow object will not be code-signed so
+        * attempting to execute code from these copied pages
+        * would trigger a code-signing violation.
+        */
+       if (entry->protection & VM_PROT_EXECUTE) {
+#if MACH_ASSERT
+               printf("pid %d[%s] create_upl out of executable range from "
+                   "0x%llx to 0x%llx: side effects may include "
+                   "code-signing violations later on\n",
+                   proc_selfpid(),
+                   (current_task()->bsd_info
+                   ? proc_name_address(current_task()->bsd_info)
+                   : "?"),
+                   (uint64_t) entry->vme_start,
+                   (uint64_t) entry->vme_end);
+#endif /* MACH_ASSERT */
+               DTRACE_VM2(cs_executable_create_upl,
+                   uint64_t, (uint64_t)entry->vme_start,
+                   uint64_t, (uint64_t)entry->vme_end);
+               cs_executable_create_upl++;
+       }
+
+       vm_object_lock(local_object);
+
+       /*
+        * Ensure that this object is "true_share" and "copy_delay" now,
+        * while we're still holding the VM map lock.  After we unlock the map,
+        * anything could happen to that mapping, including some copy-on-write
+        * activity.  We need to make sure that the IOPL will point at the
+        * same memory as the mapping.
+        */
+       if (local_object->true_share) {
+               assert(local_object->copy_strategy !=
+                   MEMORY_OBJECT_COPY_SYMMETRIC);
+       } else if (local_object != kernel_object &&
+           local_object != compressor_object &&
+           !local_object->phys_contiguous) {
+#if VM_OBJECT_TRACKING_OP_TRUESHARE
+               if (!local_object->true_share &&
+                   vm_object_tracking_inited) {
+                       void *bt[VM_OBJECT_TRACKING_BTDEPTH];
+                       int num = 0;
+                       num = OSBacktrace(bt,
+                           VM_OBJECT_TRACKING_BTDEPTH);
+                       btlog_add_entry(vm_object_tracking_btlog,
+                           local_object,
+                           VM_OBJECT_TRACKING_OP_TRUESHARE,
+                           bt,
+                           num);
+               }
+#endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
+               local_object->true_share = TRUE;
+               if (local_object->copy_strategy ==
+                   MEMORY_OBJECT_COPY_SYMMETRIC) {
+                       local_object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
+               }
+       }
+
+       vm_object_reference_locked(local_object);
+       vm_object_unlock(local_object);
+
+       vm_map_unlock_read(map);
+
+       offset += offset_in_mapped_page;
+       assert(*upl_size > offset_in_mapped_page);
+       *upl_size -= offset_in_mapped_page;
+
+       ret = vm_object_iopl_request(local_object,
+           ((vm_object_offset_t)
+           ((offset - local_start) + local_offset)),
+           *upl_size,
+           upl,
+           page_list,
+           count,
+           caller_flags,
+           tag);
+       vm_object_deallocate(local_object);
+
+done:
+       if (release_map) {
+               vm_map_deallocate(map);
+       }
 
-       return(KERN_FAILURE);
+       return ret;
 }
 
 /*
  * Internal routine to enter a UPL into a VM map.
- * 
+ *
  * JMM - This should just be doable through the standard
  * vm_map_enter() API.
  */
 kern_return_t
 vm_map_enter_upl(
-       vm_map_t                map, 
-       upl_t                   upl, 
-       vm_map_offset_t *dst_addr)
+       vm_map_t                map,
+       upl_t                   upl,
+       vm_map_offset_t         *dst_addr)
 {
-       vm_map_size_t           size;
-       vm_object_offset_t      offset;
-       vm_map_offset_t         addr;
-       vm_page_t               m;
-       kern_return_t           kr;
-
-       if (upl == UPL_NULL)
+       vm_map_size_t           size;
+       vm_object_offset_t      offset;
+       vm_map_offset_t         addr;
+       vm_page_t               m;
+       kern_return_t           kr;
+       int                     isVectorUPL = 0, curr_upl = 0;
+       upl_t                   vector_upl = NULL;
+       vm_offset_t             vector_upl_dst_addr = 0;
+       vm_map_t                vector_upl_submap = NULL;
+       upl_offset_t            subupl_offset = 0;
+       upl_size_t              subupl_size = 0;
+
+       if (upl == UPL_NULL) {
                return KERN_INVALID_ARGUMENT;
+       }
 
-       upl_lock(upl);
+       DEBUG4K_UPL("map %p upl %p flags 0x%x object %p offset 0x%llx size 0x%x \n", map, upl, upl->flags, upl->map_object, upl->u_offset, upl->u_size);
+       assert(map == kernel_map);
 
-       /*
-        * check to see if already mapped
-        */
-       if (UPL_PAGE_LIST_MAPPED & upl->flags) {
-               upl_unlock(upl);
-               return KERN_FAILURE;
+       if ((isVectorUPL = vector_upl_is_valid(upl))) {
+               int mapped = 0, valid_upls = 0;
+               vector_upl = upl;
+
+               upl_lock(vector_upl);
+               for (curr_upl = 0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
+                       upl =  vector_upl_subupl_byindex(vector_upl, curr_upl );
+                       if (upl == NULL) {
+                               continue;
+                       }
+                       valid_upls++;
+                       if (UPL_PAGE_LIST_MAPPED & upl->flags) {
+                               mapped++;
+                       }
+               }
+
+               if (mapped) {
+                       if (mapped != valid_upls) {
+                               panic("Only %d of the %d sub-upls within the Vector UPL are alread mapped\n", mapped, valid_upls);
+                       } else {
+                               upl_unlock(vector_upl);
+                               return KERN_FAILURE;
+                       }
+               }
+
+               if (VM_MAP_PAGE_MASK(map) < PAGE_MASK) {
+                       panic("TODO4K: vector UPL not implemented");
+               }
+
+               kr = kmem_suballoc(map, &vector_upl_dst_addr,
+                   vector_upl->u_size,
+                   FALSE,
+                   VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_NONE,
+                   &vector_upl_submap);
+               if (kr != KERN_SUCCESS) {
+                       panic("Vector UPL submap allocation failed\n");
+               }
+               map = vector_upl_submap;
+               vector_upl_set_submap(vector_upl, vector_upl_submap, vector_upl_dst_addr);
+               curr_upl = 0;
+       } else {
+               upl_lock(upl);
+       }
+
+process_upl_to_enter:
+       if (isVectorUPL) {
+               if (curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
+                       *dst_addr = vector_upl_dst_addr;
+                       upl_unlock(vector_upl);
+                       return KERN_SUCCESS;
+               }
+               upl =  vector_upl_subupl_byindex(vector_upl, curr_upl++ );
+               if (upl == NULL) {
+                       goto process_upl_to_enter;
+               }
+
+               vector_upl_get_iostate(vector_upl, upl, &subupl_offset, &subupl_size);
+               *dst_addr = (vm_map_offset_t)(vector_upl_dst_addr + (vm_map_offset_t)subupl_offset);
+       } else {
+               /*
+                * check to see if already mapped
+                */
+               if (UPL_PAGE_LIST_MAPPED & upl->flags) {
+                       upl_unlock(upl);
+                       return KERN_FAILURE;
+               }
        }
 
-       if ((!(upl->flags & UPL_SHADOWED)) && !((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) ||
-                                              (upl->map_object->phys_contiguous))) {
-               vm_object_t             object;
-               vm_page_t               alias_page;
-               vm_object_offset_t      new_offset;
-               int                     pg_num;
-               wpl_array_t             lite_list;
+       size = upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map));
+
+       if ((!(upl->flags & UPL_SHADOWED)) &&
+           ((upl->flags & UPL_HAS_BUSY) ||
+           !((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) || (upl->map_object->phys_contiguous)))) {
+               vm_object_t             object;
+               vm_page_t               alias_page;
+               vm_object_offset_t      new_offset;
+               unsigned int            pg_num;
+               wpl_array_t             lite_list;
 
                if (upl->flags & UPL_INTERNAL) {
-                       lite_list = (wpl_array_t) 
-                               ((((uintptr_t)upl) + sizeof(struct upl))
-                                + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
+                       lite_list = (wpl_array_t)
+                           ((((uintptr_t)upl) + sizeof(struct upl))
+                           + ((size / PAGE_SIZE) * sizeof(upl_page_info_t)));
                } else {
-                       lite_list = (wpl_array_t)(((uintptr_t)upl) + sizeof(struct upl));
+                       lite_list = (wpl_array_t)(((uintptr_t)upl) + sizeof(struct upl));
                }
                object = upl->map_object;
-               upl->map_object = vm_object_allocate(upl->size);
+               upl->map_object = vm_object_allocate(vm_object_round_page(size));
 
                vm_object_lock(upl->map_object);
 
@@ -3761,71 +6989,62 @@ vm_map_enter_upl(
                upl->map_object->pageout = TRUE;
                upl->map_object->can_persist = FALSE;
                upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
-               upl->map_object->shadow_offset = upl->offset - object->paging_offset;
+               upl->map_object->vo_shadow_offset = upl_adjusted_offset(upl, PAGE_MASK) - object->paging_offset;
+               assertf(page_aligned(upl->map_object->vo_shadow_offset),
+                   "object %p shadow_offset 0x%llx",
+                   upl->map_object,
+                   (uint64_t)upl->map_object->vo_shadow_offset);
                upl->map_object->wimg_bits = object->wimg_bits;
-               offset = upl->map_object->shadow_offset;
+               assertf(page_aligned(upl->map_object->vo_shadow_offset),
+                   "object %p shadow_offset 0x%llx",
+                   upl->map_object, upl->map_object->vo_shadow_offset);
+               offset = upl->map_object->vo_shadow_offset;
                new_offset = 0;
-               size = upl->size;
+               size = upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map));
 
                upl->flags |= UPL_SHADOWED;
 
                while (size) {
-                       pg_num = (new_offset)/PAGE_SIZE;
-
-                       if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
+                       pg_num = (unsigned int) (new_offset / PAGE_SIZE);
+                       assert(pg_num == new_offset / PAGE_SIZE);
 
+                       if (lite_list[pg_num >> 5] & (1U << (pg_num & 31))) {
                                VM_PAGE_GRAB_FICTITIOUS(alias_page);
 
                                vm_object_lock(object);
 
                                m = vm_page_lookup(object, offset);
                                if (m == VM_PAGE_NULL) {
-                                       panic("vm_upl_map: page missing\n");
+                                       panic("vm_upl_map: page missing\n");
                                }
 
                                /*
-                                * Convert the fictitious page to a private 
+                                * Convert the fictitious page to a private
                                 * shadow of the real page.
                                 */
-                               assert(alias_page->fictitious);
-                               alias_page->fictitious = FALSE;
-                               alias_page->private = TRUE;
-                               alias_page->pageout = TRUE;
+                               assert(alias_page->vmp_fictitious);
+                               alias_page->vmp_fictitious = FALSE;
+                               alias_page->vmp_private = TRUE;
+                               alias_page->vmp_free_when_done = TRUE;
                                /*
                                 * since m is a page in the upl it must
                                 * already be wired or BUSY, so it's
                                 * safe to assign the underlying physical
                                 * page to the alias
                                 */
-                               alias_page->phys_page = m->phys_page;
+                               VM_PAGE_SET_PHYS_PAGE(alias_page, VM_PAGE_GET_PHYS_PAGE(m));
 
-                               vm_object_unlock(object);
+                               vm_object_unlock(object);
 
                                vm_page_lockspin_queues();
-                               vm_page_wire(alias_page);
+                               vm_page_wire(alias_page, VM_KERN_MEMORY_NONE, TRUE);
                                vm_page_unlock_queues();
-                               
-                               /*
-                                * ENCRYPTED SWAP:
-                                * The virtual page ("m") has to be wired in some way
-                                * here or its physical page ("m->phys_page") could
-                                * be recycled at any time.
-                                * Assuming this is enforced by the caller, we can't
-                                * get an encrypted page here.  Since the encryption
-                                * key depends on the VM page's "pager" object and
-                                * the "paging_offset", we couldn't handle 2 pageable
-                                * VM pages (with different pagers and paging_offsets)
-                                * sharing the same physical page:  we could end up
-                                * encrypting with one key (via one VM page) and
-                                * decrypting with another key (via the alias VM page).
-                                */
-                               ASSERT_PAGE_DECRYPTED(m);
 
-                               vm_page_insert(alias_page, upl->map_object, new_offset);
+                               vm_page_insert_wired(alias_page, upl->map_object, new_offset, VM_KERN_MEMORY_NONE);
 
-                               assert(!alias_page->wanted);
-                               alias_page->busy = FALSE;
-                               alias_page->absent = FALSE;
+                               assert(!alias_page->vmp_wanted);
+                               alias_page->vmp_busy = FALSE;
+                               alias_page->vmp_absent = FALSE;
                        }
                        size -= PAGE_SIZE;
                        offset += PAGE_SIZE_64;
@@ -3833,25 +7052,39 @@ vm_map_enter_upl(
                }
                vm_object_unlock(upl->map_object);
        }
-       if ((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) || upl->map_object->phys_contiguous)
-               offset = upl->offset - upl->map_object->paging_offset;
-       else
-               offset = 0;
-       size = upl->size;
-       
+       if (upl->flags & UPL_SHADOWED) {
+               offset = 0;
+       } else {
+               offset = upl_adjusted_offset(upl, VM_MAP_PAGE_MASK(map)) - upl->map_object->paging_offset;
+       }
+
+       size = upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map));
+
        vm_object_reference(upl->map_object);
 
-       *dst_addr = 0;
-       /*
-        * NEED A UPL_MAP ALIAS
-        */
-       kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
-                         VM_FLAGS_ANYWHERE, upl->map_object, offset, FALSE,
-                         VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
+       if (!isVectorUPL) {
+               *dst_addr = 0;
+               /*
+                * NEED A UPL_MAP ALIAS
+                */
+               kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
+                   VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_OSFMK,
+                   upl->map_object, offset, FALSE,
+                   VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
 
-       if (kr != KERN_SUCCESS) {
-               upl_unlock(upl);
-               return(kr);
+               if (kr != KERN_SUCCESS) {
+                       vm_object_deallocate(upl->map_object);
+                       upl_unlock(upl);
+                       return kr;
+               }
+       } else {
+               kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
+                   VM_FLAGS_FIXED, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_OSFMK,
+                   upl->map_object, offset, FALSE,
+                   VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
+               if (kr) {
+                       panic("vm_map_enter failed for a Vector UPL\n");
+               }
        }
        vm_object_lock(upl->map_object);
 
@@ -3859,13 +7092,20 @@ vm_map_enter_upl(
                m = vm_page_lookup(upl->map_object, offset);
 
                if (m) {
-                       unsigned int    cache_attr;
-                       cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
+                       m->vmp_pmapped = TRUE;
+
+                       /* CODE SIGNING ENFORCEMENT: page has been wpmapped,
+                        * but only in kernel space. If this was on a user map,
+                        * we'd have to set the wpmapped bit. */
+                       /* m->vmp_wpmapped = TRUE; */
+                       assert(map->pmap == kernel_pmap);
 
-                       m->pmapped = TRUE;
-                       m->wpmapped = TRUE;
-       
-                       PMAP_ENTER(map->pmap, addr, m, VM_PROT_ALL, cache_attr, TRUE);
+                       PMAP_ENTER(map->pmap, addr, m, VM_PROT_DEFAULT, VM_PROT_NONE, 0, TRUE, kr);
+
+                       assert(kr == KERN_SUCCESS);
+#if KASAN
+                       kasan_notify_address(addr, PAGE_SIZE_64);
+#endif
                }
                offset += PAGE_SIZE_64;
        }
@@ -3876,12 +7116,29 @@ vm_map_enter_upl(
         */
        upl->ref_count++;
        upl->flags |= UPL_PAGE_LIST_MAPPED;
-       upl->kaddr = *dst_addr;
+       upl->kaddr = (vm_offset_t) *dst_addr;
+       assert(upl->kaddr == *dst_addr);
+
+       if (isVectorUPL) {
+               goto process_upl_to_enter;
+       }
+
+       if (!isVectorUPL) {
+               vm_map_offset_t addr_adjustment;
+
+               addr_adjustment = (vm_map_offset_t)(upl->u_offset - upl_adjusted_offset(upl, VM_MAP_PAGE_MASK(map)));
+               if (addr_adjustment) {
+                       assert(VM_MAP_PAGE_MASK(map) != PAGE_MASK);
+                       DEBUG4K_UPL("dst_addr 0x%llx (+ 0x%llx) -> 0x%llx\n", (uint64_t)*dst_addr, (uint64_t)addr_adjustment, (uint64_t)(*dst_addr + addr_adjustment));
+                       *dst_addr += addr_adjustment;
+               }
+       }
+
        upl_unlock(upl);
 
        return KERN_SUCCESS;
 }
-       
+
 /*
  * Internal routine to remove a UPL mapping from a VM map.
  *
@@ -3894,129 +7151,259 @@ vm_map_enter_upl(
  */
 kern_return_t
 vm_map_remove_upl(
-       vm_map_t        map, 
-       upl_t           upl)
+       vm_map_t        map,
+       upl_t           upl)
 {
-       vm_address_t    addr;
-       upl_size_t      size;
+       vm_address_t    addr;
+       upl_size_t      size;
+       int             isVectorUPL = 0, curr_upl = 0;
+       upl_t           vector_upl = NULL;
 
-       if (upl == UPL_NULL)
+       if (upl == UPL_NULL) {
                return KERN_INVALID_ARGUMENT;
+       }
 
-       upl_lock(upl);
+       if ((isVectorUPL = vector_upl_is_valid(upl))) {
+               int     unmapped = 0, valid_upls = 0;
+               vector_upl = upl;
+               upl_lock(vector_upl);
+               for (curr_upl = 0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
+                       upl =  vector_upl_subupl_byindex(vector_upl, curr_upl );
+                       if (upl == NULL) {
+                               continue;
+                       }
+                       valid_upls++;
+                       if (!(UPL_PAGE_LIST_MAPPED & upl->flags)) {
+                               unmapped++;
+                       }
+               }
+
+               if (unmapped) {
+                       if (unmapped != valid_upls) {
+                               panic("%d of the %d sub-upls within the Vector UPL is/are not mapped\n", unmapped, valid_upls);
+                       } else {
+                               upl_unlock(vector_upl);
+                               return KERN_FAILURE;
+                       }
+               }
+               curr_upl = 0;
+       } else {
+               upl_lock(upl);
+       }
+
+process_upl_to_remove:
+       if (isVectorUPL) {
+               if (curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
+                       vm_map_t v_upl_submap;
+                       vm_offset_t v_upl_submap_dst_addr;
+                       vector_upl_get_submap(vector_upl, &v_upl_submap, &v_upl_submap_dst_addr);
+
+                       vm_map_remove(map, v_upl_submap_dst_addr,
+                           v_upl_submap_dst_addr + vector_upl->u_size,
+                           VM_MAP_REMOVE_NO_FLAGS);
+                       vm_map_deallocate(v_upl_submap);
+                       upl_unlock(vector_upl);
+                       return KERN_SUCCESS;
+               }
+
+               upl =  vector_upl_subupl_byindex(vector_upl, curr_upl++ );
+               if (upl == NULL) {
+                       goto process_upl_to_remove;
+               }
+       }
 
        if (upl->flags & UPL_PAGE_LIST_MAPPED) {
                addr = upl->kaddr;
-               size = upl->size;
+               size = upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map));
 
                assert(upl->ref_count > 1);
-               upl->ref_count--;               /* removing mapping ref */
+               upl->ref_count--;               /* removing mapping ref */
 
                upl->flags &= ~UPL_PAGE_LIST_MAPPED;
                upl->kaddr = (vm_offset_t) 0;
-               upl_unlock(upl);
 
-               vm_map_remove(map,
-                             vm_map_trunc_page(addr),
-                             vm_map_round_page(addr + size),
-                             VM_MAP_NO_FLAGS);
+               if (!isVectorUPL) {
+                       upl_unlock(upl);
 
-               return KERN_SUCCESS;
+                       vm_map_remove(
+                               map,
+                               vm_map_trunc_page(addr,
+                               VM_MAP_PAGE_MASK(map)),
+                               vm_map_round_page(addr + size,
+                               VM_MAP_PAGE_MASK(map)),
+                               VM_MAP_REMOVE_NO_FLAGS);
+                       return KERN_SUCCESS;
+               } else {
+                       /*
+                        * If it's a Vectored UPL, we'll be removing the entire
+                        * submap anyways, so no need to remove individual UPL
+                        * element mappings from within the submap
+                        */
+                       goto process_upl_to_remove;
+               }
        }
        upl_unlock(upl);
 
        return KERN_FAILURE;
 }
 
+
 kern_return_t
 upl_commit_range(
-       upl_t                   upl, 
-       upl_offset_t            offset, 
-       upl_size_t              size,
-       int                     flags,
-       upl_page_info_t         *page_list,
-       mach_msg_type_number_t  count,
-       boolean_t               *empty) 
+       upl_t                   upl,
+       upl_offset_t            offset,
+       upl_size_t              size,
+       int                     flags,
+       upl_page_info_t         *page_list,
+       mach_msg_type_number_t  count,
+       boolean_t               *empty)
 {
-       upl_size_t              xfer_size;
-       vm_object_t             shadow_object;
-       vm_object_t             object;
-       vm_object_offset_t      target_offset;
-       int                     entry;
-       wpl_array_t             lite_list;
-       int                     occupied;
-       int                     delayed_unlock = 0;
-       int                     clear_refmod = 0;
-       int                     pgpgout_count = 0;
-       int                     j;
-
+       upl_size_t              xfer_size, subupl_size;
+       vm_object_t             shadow_object;
+       vm_object_t             object;
+       vm_object_t             m_object;
+       vm_object_offset_t      target_offset;
+       upl_offset_t            subupl_offset = offset;
+       int                     entry;
+       wpl_array_t             lite_list;
+       int                     occupied;
+       int                     clear_refmod = 0;
+       int                     pgpgout_count = 0;
+       struct  vm_page_delayed_work    dw_array;
+       struct  vm_page_delayed_work    *dwp, *dwp_start;
+       bool                    dwp_finish_ctx = TRUE;
+       int                     dw_count;
+       int                     dw_limit;
+       int                     isVectorUPL = 0;
+       upl_t                   vector_upl = NULL;
+       boolean_t               should_be_throttled = FALSE;
+
+       vm_page_t               nxt_page = VM_PAGE_NULL;
+       int                     fast_path_possible = 0;
+       int                     fast_path_full_commit = 0;
+       int                     throttle_page = 0;
+       int                     unwired_count = 0;
+       int                     local_queue_count = 0;
+       vm_page_t               first_local, last_local;
+       vm_object_offset_t      obj_start, obj_end, obj_offset;
+       kern_return_t           kr = KERN_SUCCESS;
+
+//     DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p offset 0x%llx size 0x%llx flags 0x%x\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object, (uint64_t)offset, (uint64_t)size, flags);
+
+       dwp_start = dwp = NULL;
+
+       subupl_size = size;
        *empty = FALSE;
 
-       if (upl == UPL_NULL)
+       if (upl == UPL_NULL) {
                return KERN_INVALID_ARGUMENT;
+       }
+
+       dw_count = 0;
+       dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
+       dwp_start = vm_page_delayed_work_get_ctx();
+       if (dwp_start == NULL) {
+               dwp_start = &dw_array;
+               dw_limit = 1;
+               dwp_finish_ctx = FALSE;
+       }
+
+       dwp = dwp_start;
 
-       if (count == 0)
+       if (count == 0) {
                page_list = NULL;
+       }
 
-       if (upl->flags & UPL_DEVICE_MEMORY)
-               xfer_size = 0;
-       else if ((offset + size) <= upl->size)
-               xfer_size = size;
-       else
-               return KERN_FAILURE;
+       if ((isVectorUPL = vector_upl_is_valid(upl))) {
+               vector_upl = upl;
+               upl_lock(vector_upl);
+       } else {
+               upl_lock(upl);
+       }
 
-       upl_lock(upl);
+process_upl_to_commit:
 
-       if (upl->flags & UPL_ACCESS_BLOCKED) {
-               /*
-                * We used this UPL to block access to the pages by marking
-                * them "busy".  Now we need to clear the "busy" bit to allow
-                * access to these pages again.
-                */
-               flags |= UPL_COMMIT_ALLOW_ACCESS;
+       if (isVectorUPL) {
+               size = subupl_size;
+               offset = subupl_offset;
+               if (size == 0) {
+                       upl_unlock(vector_upl);
+                       kr = KERN_SUCCESS;
+                       goto done;
+               }
+               upl =  vector_upl_subupl_byoffset(vector_upl, &offset, &size);
+               if (upl == NULL) {
+                       upl_unlock(vector_upl);
+                       kr = KERN_FAILURE;
+                       goto done;
+               }
+               page_list = UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl);
+               subupl_size -= size;
+               subupl_offset += size;
+       }
+
+#if UPL_DEBUG
+       if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
+               (void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
+
+               upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
+               upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
+
+               upl->upl_commit_index++;
+       }
+#endif
+       if (upl->flags & UPL_DEVICE_MEMORY) {
+               xfer_size = 0;
+       } else if ((offset + size) <= upl_adjusted_size(upl, PAGE_MASK)) {
+               xfer_size = size;
+       } else {
+               if (!isVectorUPL) {
+                       upl_unlock(upl);
+               } else {
+                       upl_unlock(vector_upl);
+               }
+               DEBUG4K_ERROR("upl %p (u_offset 0x%llx u_size 0x%x) offset 0x%x size 0x%x\n", upl, upl->u_offset, upl->u_size, offset, size);
+               kr = KERN_FAILURE;
+               goto done;
+       }
+       if (upl->flags & UPL_SET_DIRTY) {
+               flags |= UPL_COMMIT_SET_DIRTY;
+       }
+       if (upl->flags & UPL_CLEAR_DIRTY) {
+               flags |= UPL_COMMIT_CLEAR_DIRTY;
        }
-       if (upl->flags & UPL_CLEAR_DIRTY)
-               flags |= UPL_COMMIT_CLEAR_DIRTY;
 
-       if (upl->flags & UPL_INTERNAL)
+       if (upl->flags & UPL_INTERNAL) {
                lite_list = (wpl_array_t) ((((uintptr_t)upl) + sizeof(struct upl))
-                                          + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
-       else
+                   + ((upl_adjusted_size(upl, PAGE_MASK) / PAGE_SIZE) * sizeof(upl_page_info_t)));
+       } else {
                lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
+       }
 
        object = upl->map_object;
 
        if (upl->flags & UPL_SHADOWED) {
-               vm_object_lock(object);
+               vm_object_lock(object);
                shadow_object = object->shadow;
        } else {
                shadow_object = object;
        }
-       entry = offset/PAGE_SIZE;
+       entry = offset / PAGE_SIZE;
        target_offset = (vm_object_offset_t)offset;
 
-       /*
-        * pageout_scan takes the vm_page_lock_queues first
-        * then tries for the object lock... to avoid what
-        * is effectively a lock inversion, we'll go to the
-        * trouble of taking them in that same order... otherwise
-        * if this object contains the majority of the pages resident
-        * in the UBC (or a small set of large objects actively being
-        * worked on contain the majority of the pages), we could
-        * cause the pageout_scan thread to 'starve' in its attempt
-        * to find pages to move to the free queue, since it has to
-        * successfully acquire the object lock of any candidate page
-        * before it can steal/clean it.
-        */
-       for (j = 0; ; j++) {
-               vm_page_lock_queues();
+       if (upl->flags & UPL_KERNEL_OBJECT) {
+               vm_object_lock_shared(shadow_object);
+       } else {
+               vm_object_lock(shadow_object);
+       }
 
-               if (vm_object_lock_try(shadow_object))
-                       break;
-               vm_page_unlock_queues();
-               mutex_pause(j);
+       VM_OBJECT_WIRED_PAGE_UPDATE_START(shadow_object);
+
+       if (upl->flags & UPL_ACCESS_BLOCKED) {
+               assert(shadow_object->blocked_access);
+               shadow_object->blocked_access = FALSE;
+               vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
        }
-       delayed_unlock = 1;
 
        if (shadow_object->code_signed) {
                /*
@@ -4028,45 +7415,97 @@ upl_commit_range(
                 */
                flags &= ~UPL_COMMIT_CS_VALIDATED;
        }
-       if (! page_list) {
+       if (!page_list) {
                /*
                 * No page list to get the code-signing info from !?
                 */
                flags &= ~UPL_COMMIT_CS_VALIDATED;
        }
+       if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal) {
+               should_be_throttled = TRUE;
+       }
 
-       while (xfer_size) {
-               vm_page_t       t, m;
+       if ((upl->flags & UPL_IO_WIRE) &&
+           !(flags & UPL_COMMIT_FREE_ABSENT) &&
+           !isVectorUPL &&
+           shadow_object->purgable != VM_PURGABLE_VOLATILE &&
+           shadow_object->purgable != VM_PURGABLE_EMPTY) {
+               if (!vm_page_queue_empty(&shadow_object->memq)) {
+                       if (size == shadow_object->vo_size) {
+                               nxt_page = (vm_page_t)vm_page_queue_first(&shadow_object->memq);
+                               fast_path_full_commit = 1;
+                       }
+                       fast_path_possible = 1;
+
+                       if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal &&
+                           (shadow_object->purgable == VM_PURGABLE_DENY ||
+                           shadow_object->purgable == VM_PURGABLE_NONVOLATILE ||
+                           shadow_object->purgable == VM_PURGABLE_VOLATILE)) {
+                               throttle_page = 1;
+                       }
+               }
+       }
+       first_local = VM_PAGE_NULL;
+       last_local = VM_PAGE_NULL;
+
+       obj_start = target_offset + upl->u_offset - shadow_object->paging_offset;
+       obj_end = obj_start + xfer_size;
+       obj_start = vm_object_trunc_page(obj_start);
+       obj_end = vm_object_round_page(obj_end);
+       for (obj_offset = obj_start;
+           obj_offset < obj_end;
+           obj_offset += PAGE_SIZE) {
+               vm_page_t       t, m;
+
+               dwp->dw_mask = 0;
+               clear_refmod = 0;
 
                m = VM_PAGE_NULL;
 
                if (upl->flags & UPL_LITE) {
-                       int     pg_num;
+                       unsigned int    pg_num;
 
-                       pg_num = target_offset/PAGE_SIZE;
+                       if (nxt_page != VM_PAGE_NULL) {
+                               m = nxt_page;
+                               nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->vmp_listq);
+                               target_offset = m->vmp_offset;
+                       }
+                       pg_num = (unsigned int) (target_offset / PAGE_SIZE);
+                       assert(pg_num == target_offset / PAGE_SIZE);
 
-                       if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
-                               lite_list[pg_num>>5] &= ~(1 << (pg_num & 31));
+                       if (lite_list[pg_num >> 5] & (1U << (pg_num & 31))) {
+                               lite_list[pg_num >> 5] &= ~(1U << (pg_num & 31));
 
-                               m = vm_page_lookup(shadow_object, target_offset + (upl->offset - shadow_object->paging_offset));
+                               if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) {
+                                       m = vm_page_lookup(shadow_object, obj_offset);
+                               }
+                       } else {
+                               m = NULL;
                        }
                }
                if (upl->flags & UPL_SHADOWED) {
-                       if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
+                       if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
+                               t->vmp_free_when_done = FALSE;
 
-                               t->pageout = FALSE;
+                               VM_PAGE_FREE(t);
 
-                               vm_page_free(t);
-
-                               if (m == VM_PAGE_NULL)
-                                       m = vm_page_lookup(shadow_object, target_offset + object->shadow_offset);
+                               if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) {
+                                       m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
+                               }
                        }
                }
                if (m == VM_PAGE_NULL) {
                        goto commit_next_page;
                }
 
-               clear_refmod = 0;
+               m_object = VM_PAGE_OBJECT(m);
+
+               if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
+                       assert(m->vmp_busy);
+
+                       dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
+                       goto commit_next_page;
+               }
 
                if (flags & UPL_COMMIT_CS_VALIDATED) {
                        /*
@@ -4074,22 +7513,27 @@ upl_commit_range(
                         * Set the code signing bits according to
                         * what the UPL says they should be.
                         */
-                       m->cs_validated = page_list[entry].cs_validated;
-                       m->cs_tainted = page_list[entry].cs_tainted;
+                       m->vmp_cs_validated |= page_list[entry].cs_validated;
+                       m->vmp_cs_tainted |= page_list[entry].cs_tainted;
+                       m->vmp_cs_nx |= page_list[entry].cs_nx;
+               }
+               if (flags & UPL_COMMIT_WRITTEN_BY_KERNEL) {
+                       m->vmp_written_by_kernel = TRUE;
                }
-               if (upl->flags & UPL_IO_WIRE) {
-
-                       vm_page_unwire(m);
 
-                       if (page_list)
+               if (upl->flags & UPL_IO_WIRE) {
+                       if (page_list) {
                                page_list[entry].phys_addr = 0;
+                       }
 
-                       if (flags & UPL_COMMIT_SET_DIRTY)
-                               m->dirty = TRUE;
-                       else if (flags & UPL_COMMIT_CLEAR_DIRTY) {
-                               m->dirty = FALSE;
-                               if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
-                                   m->cs_validated && !m->cs_tainted) {
+                       if (flags & UPL_COMMIT_SET_DIRTY) {
+                               SET_PAGE_DIRTY(m, FALSE);
+                       } else if (flags & UPL_COMMIT_CLEAR_DIRTY) {
+                               m->vmp_dirty = FALSE;
+
+                               if (!(flags & UPL_COMMIT_CS_VALIDATED) &&
+                                   m->vmp_cs_validated &&
+                                   m->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
                                        /*
                                         * CODE SIGNING:
                                         * This page is no longer dirty
@@ -4097,28 +7541,110 @@ upl_commit_range(
                                         * so it will need to be
                                         * re-validated.
                                         */
-                                       m->cs_validated = FALSE;
-                                       vm_cs_validated_resets++;
+                                       m->vmp_cs_validated = VMP_CS_ALL_FALSE;
+
+                                       VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
+
+                                       pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
                                }
                                clear_refmod |= VM_MEM_MODIFIED;
                        }
-                       
-                       if (flags & UPL_COMMIT_INACTIVATE)
-                               vm_page_deactivate(m);
+                       if (upl->flags & UPL_ACCESS_BLOCKED) {
+                               /*
+                                * We blocked access to the pages in this UPL.
+                                * Clear the "busy" bit and wake up any waiter
+                                * for this page.
+                                */
+                               dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
+                       }
+                       if (fast_path_possible) {
+                               assert(m_object->purgable != VM_PURGABLE_EMPTY);
+                               assert(m_object->purgable != VM_PURGABLE_VOLATILE);
+                               if (m->vmp_absent) {
+                                       assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
+                                       assert(m->vmp_wire_count == 0);
+                                       assert(m->vmp_busy);
+
+                                       m->vmp_absent = FALSE;
+                                       dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
+                               } else {
+                                       if (m->vmp_wire_count == 0) {
+                                               panic("wire_count == 0, m = %p, obj = %p\n", m, shadow_object);
+                                       }
+                                       assert(m->vmp_q_state == VM_PAGE_IS_WIRED);
+
+                                       /*
+                                        * XXX FBDP need to update some other
+                                        * counters here (purgeable_wired_count)
+                                        * (ledgers), ...
+                                        */
+                                       assert(m->vmp_wire_count > 0);
+                                       m->vmp_wire_count--;
+
+                                       if (m->vmp_wire_count == 0) {
+                                               m->vmp_q_state = VM_PAGE_NOT_ON_Q;
+                                               unwired_count++;
+                                       }
+                               }
+                               if (m->vmp_wire_count == 0) {
+                                       assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
+
+                                       if (last_local == VM_PAGE_NULL) {
+                                               assert(first_local == VM_PAGE_NULL);
+
+                                               last_local = m;
+                                               first_local = m;
+                                       } else {
+                                               assert(first_local != VM_PAGE_NULL);
+
+                                               m->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
+                                               first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(m);
+                                               first_local = m;
+                                       }
+                                       local_queue_count++;
 
-                       if (clear_refmod)
-                               pmap_clear_refmod(m->phys_page, clear_refmod);
+                                       if (throttle_page) {
+                                               m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
+                                       } else {
+                                               if (flags & UPL_COMMIT_INACTIVATE) {
+                                                       if (shadow_object->internal) {
+                                                               m->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
+                                                       } else {
+                                                               m->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
+                                                       }
+                                               } else {
+                                                       m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
+                                               }
+                                       }
+                               }
+                       } else {
+                               if (flags & UPL_COMMIT_INACTIVATE) {
+                                       dwp->dw_mask |= DW_vm_page_deactivate_internal;
+                                       clear_refmod |= VM_MEM_REFERENCED;
+                               }
+                               if (m->vmp_absent) {
+                                       if (flags & UPL_COMMIT_FREE_ABSENT) {
+                                               dwp->dw_mask |= DW_vm_page_free;
+                                       } else {
+                                               m->vmp_absent = FALSE;
+                                               dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
 
-                       if (flags & UPL_COMMIT_ALLOW_ACCESS) {
-                               /*
-                                * We blocked access to the pages in this UPL.
-                                * Clear the "busy" bit and wake up any waiter
-                                * for this page.
-                                */
-                               PAGE_WAKEUP_DONE(m);
+                                               if (!(dwp->dw_mask & DW_vm_page_deactivate_internal)) {
+                                                       dwp->dw_mask |= DW_vm_page_activate;
+                                               }
+                                       }
+                               } else {
+                                       dwp->dw_mask |= DW_vm_page_unwire;
+                               }
                        }
                        goto commit_next_page;
                }
+               assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
+
+               if (page_list) {
+                       page_list[entry].phys_addr = 0;
+               }
+
                /*
                 * make sure to clear the hardware
                 * modify or reference bits before
@@ -4127,167 +7653,112 @@ upl_commit_range(
                 * change of state
                 */
                if (flags & UPL_COMMIT_CLEAR_DIRTY) {
-                       m->dirty = FALSE;
+                       m->vmp_dirty = FALSE;
 
-                       if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
-                           m->cs_validated && !m->cs_tainted) {
-                               /*
-                                * CODE SIGNING:
-                                * This page is no longer dirty
-                                * but could have been modified,
-                                * so it will need to be
-                                * re-validated.
-                                */
-                               m->cs_validated = FALSE;
-#if DEVELOPMENT || DEBUG
-                               vm_cs_validated_resets++;
-#endif
-                       }
                        clear_refmod |= VM_MEM_MODIFIED;
                }
-               if (clear_refmod)
-                       pmap_clear_refmod(m->phys_page, clear_refmod);
+               if (m->vmp_laundry) {
+                       dwp->dw_mask |= DW_vm_pageout_throttle_up;
+               }
 
-               if (page_list) {
-                       upl_page_info_t *p;
-
-                       p = &(page_list[entry]);
-                       
-                       if (p->phys_addr && p->pageout && !m->pageout) {
-                               m->busy = TRUE;
-                               m->pageout = TRUE;
-                               vm_page_wire(m);
-                       } else if (p->phys_addr &&
-                                  !p->pageout && m->pageout &&
-                                  !m->dump_cleaning) {
-                               m->pageout = FALSE;
-                               m->absent = FALSE;
-                               m->overwriting = FALSE;
-                               vm_page_unwire(m);
-                               
-                               PAGE_WAKEUP_DONE(m);
-                       }
-                       page_list[entry].phys_addr = 0;
+               if (VM_PAGE_WIRED(m)) {
+                       m->vmp_free_when_done = FALSE;
                }
-               m->dump_cleaning = FALSE;
 
-               if (m->laundry)
-                       vm_pageout_throttle_up(m);
+               if (!(flags & UPL_COMMIT_CS_VALIDATED) &&
+                   m->vmp_cs_validated &&
+                   m->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
+                       /*
+                        * CODE SIGNING:
+                        * This page is no longer dirty
+                        * but could have been modified,
+                        * so it will need to be
+                        * re-validated.
+                        */
+                       m->vmp_cs_validated = VMP_CS_ALL_FALSE;
+
+                       VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
 
-               if (m->pageout) {
-                       m->cleaning = FALSE;
-                       m->encrypted_cleaning = FALSE;
-                       m->pageout = FALSE;
-#if MACH_CLUSTER_STATS
-                       if (m->wanted) vm_pageout_target_collisions++;
+                       pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
+               }
+               if (m->vmp_overwriting) {
+                       /*
+                        * the (COPY_OUT_FROM == FALSE) request_page_list case
+                        */
+                       if (m->vmp_busy) {
+#if CONFIG_PHANTOM_CACHE
+                               if (m->vmp_absent && !m_object->internal) {
+                                       dwp->dw_mask |= DW_vm_phantom_cache_update;
+                               }
 #endif
-                       m->dirty = FALSE;
-                       
-                       if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
-                           m->cs_validated && !m->cs_tainted) {
+                               m->vmp_absent = FALSE;
+
+                               dwp->dw_mask |= DW_clear_busy;
+                       } else {
                                /*
-                                * CODE SIGNING:
-                                * This page is no longer dirty
-                                * but could have been modified,
-                                * so it will need to be
-                                * re-validated.
+                                * alternate (COPY_OUT_FROM == FALSE) page_list case
+                                * Occurs when the original page was wired
+                                * at the time of the list request
                                 */
-                               m->cs_validated = FALSE;
-#if DEVELOPMENT || DEBUG
-                               vm_cs_validated_resets++;
-#endif
+                               assert(VM_PAGE_WIRED(m));
+
+                               dwp->dw_mask |= DW_vm_page_unwire; /* reactivates */
                        }
-                       
-                       if (m->pmapped && (pmap_disconnect(m->phys_page) & VM_MEM_MODIFIED))
-                               m->dirty = TRUE;
-                       
-                       if (m->dirty) {
+                       m->vmp_overwriting = FALSE;
+               }
+               m->vmp_cleaning = FALSE;
+
+               if (m->vmp_free_when_done) {
+                       /*
+                        * With the clean queue enabled, UPL_PAGEOUT should
+                        * no longer set the pageout bit. Its pages now go
+                        * to the clean queue.
+                        *
+                        * We don't use the cleaned Q anymore and so this
+                        * assert isn't correct. The code for the clean Q
+                        * still exists and might be used in the future. If we
+                        * go back to the cleaned Q, we will re-enable this
+                        * assert.
+                        *
+                        * assert(!(upl->flags & UPL_PAGEOUT));
+                        */
+                       assert(!m_object->internal);
+
+                       m->vmp_free_when_done = FALSE;
+
+                       if ((flags & UPL_COMMIT_SET_DIRTY) ||
+                           (m->vmp_pmapped && (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED))) {
                                /*
                                 * page was re-dirtied after we started
-                                * the pageout... reactivate it since 
+                                * the pageout... reactivate it since
                                 * we don't know whether the on-disk
                                 * copy matches what is now in memory
                                 */
-                               vm_page_unwire(m);
-                               
+                               SET_PAGE_DIRTY(m, FALSE);
+
+                               dwp->dw_mask |= DW_vm_page_activate | DW_PAGE_WAKEUP;
+
                                if (upl->flags & UPL_PAGEOUT) {
-                                       CLUSTER_STAT(vm_pageout_target_page_dirtied++;)
                                        VM_STAT_INCR(reactivations);
                                        DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
                                }
-                               PAGE_WAKEUP_DONE(m);
                        } else {
                                /*
                                 * page has been successfully cleaned
                                 * go ahead and free it for other use
                                 */
-                               
-                               if (m->object->internal) {
+                               if (m_object->internal) {
                                        DTRACE_VM2(anonpgout, int, 1, (uint64_t *), NULL);
                                } else {
                                        DTRACE_VM2(fspgout, int, 1, (uint64_t *), NULL);
                                }
-                               
-                               vm_page_free(m);
-                               
-                               if (upl->flags & UPL_PAGEOUT) {
-                                       CLUSTER_STAT(vm_pageout_target_page_freed++;)
-                                       
-                                       if (page_list[entry].dirty) {
-                                               VM_STAT_INCR(pageouts);
-                                               DTRACE_VM2(pgout, int, 1, (uint64_t *), NULL);
-                                               pgpgout_count++;
-                                       }
-                               }
+                               m->vmp_dirty = FALSE;
+                               m->vmp_busy = TRUE;
+
+                               dwp->dw_mask |= DW_vm_page_free;
                        }
                        goto commit_next_page;
                }
-#if MACH_CLUSTER_STATS
-               if (m->wpmapped)
-                       m->dirty = pmap_is_modified(m->phys_page);
-
-               if (m->dirty)   vm_pageout_cluster_dirtied++;
-               else            vm_pageout_cluster_cleaned++;
-               if (m->wanted)  vm_pageout_cluster_collisions++;
-#endif
-               m->dirty = FALSE;
-
-               if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
-                   m->cs_validated && !m->cs_tainted) {
-                       /*
-                        * CODE SIGNING:
-                        * This page is no longer dirty
-                        * but could have been modified,
-                        * so it will need to be
-                        * re-validated.
-                        */
-                       m->cs_validated = FALSE;
-#if DEVELOPMENT || DEBUG
-                       vm_cs_validated_resets++;
-#endif
-               }
-
-               if ((m->busy) && (m->cleaning)) {
-                       /*
-                        * the request_page_list case
-                        */
-                       m->absent = FALSE;
-                       m->overwriting = FALSE;
-                       m->busy = FALSE;
-               } else if (m->overwriting) {
-                       /*
-                        * alternate request page list, write to 
-                        * page_list case.  Occurs when the original
-                        * page was wired at the time of the list
-                        * request
-                        */
-                       assert(m->wire_count != 0);
-                       vm_page_unwire(m);/* reactivates */
-                       m->overwriting = FALSE;
-               }
-               m->cleaning = FALSE;
-               m->encrypted_cleaning = FALSE;
-               
                /*
                 * It is a part of the semantic of COPYOUT_FROM
                 * UPLs that a commit implies cache sync
@@ -4295,354 +7766,619 @@ upl_commit_range(
                 * this can be used to strip the precious bit
                 * as well as clean
                 */
-               if (upl->flags & UPL_PAGE_SYNC_DONE)
-                       m->precious = FALSE;
-               
-               if (flags & UPL_COMMIT_SET_DIRTY)
-                       m->dirty = TRUE;
-               
-               if ((flags & UPL_COMMIT_INACTIVATE) && !m->clustered && !m->speculative) {
-                       vm_page_deactivate(m);
-               } else if (!m->active && !m->inactive && !m->speculative) {
-                       
-                       if (m->clustered)
-                               vm_page_speculate(m, TRUE);
-                       else if (m->reference)
-                               vm_page_activate(m);
-                       else
-                               vm_page_deactivate(m);
+               if ((upl->flags & UPL_PAGE_SYNC_DONE) || (flags & UPL_COMMIT_CLEAR_PRECIOUS)) {
+                       m->vmp_precious = FALSE;
+               }
+
+               if (flags & UPL_COMMIT_SET_DIRTY) {
+                       SET_PAGE_DIRTY(m, FALSE);
+               } else {
+                       m->vmp_dirty = FALSE;
+               }
+
+               /* with the clean queue on, move *all* cleaned pages to the clean queue */
+               if (hibernate_cleaning_in_progress == FALSE && !m->vmp_dirty && (upl->flags & UPL_PAGEOUT)) {
+                       pgpgout_count++;
+
+                       VM_STAT_INCR(pageouts);
+                       DTRACE_VM2(pgout, int, 1, (uint64_t *), NULL);
+
+                       dwp->dw_mask |= DW_enqueue_cleaned;
+               } else if (should_be_throttled == TRUE && (m->vmp_q_state == VM_PAGE_NOT_ON_Q)) {
+                       /*
+                        * page coming back in from being 'frozen'...
+                        * it was dirty before it was frozen, so keep it so
+                        * the vm_page_activate will notice that it really belongs
+                        * on the throttle queue and put it there
+                        */
+                       SET_PAGE_DIRTY(m, FALSE);
+                       dwp->dw_mask |= DW_vm_page_activate;
+               } else {
+                       if ((flags & UPL_COMMIT_INACTIVATE) && !m->vmp_clustered && (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q)) {
+                               dwp->dw_mask |= DW_vm_page_deactivate_internal;
+                               clear_refmod |= VM_MEM_REFERENCED;
+                       } else if (!VM_PAGE_PAGEABLE(m)) {
+                               if (m->vmp_clustered || (flags & UPL_COMMIT_SPECULATE)) {
+                                       dwp->dw_mask |= DW_vm_page_speculate;
+                               } else if (m->vmp_reference) {
+                                       dwp->dw_mask |= DW_vm_page_activate;
+                               } else {
+                                       dwp->dw_mask |= DW_vm_page_deactivate_internal;
+                                       clear_refmod |= VM_MEM_REFERENCED;
+                               }
+                       }
                }
-               if (flags & UPL_COMMIT_ALLOW_ACCESS) {
+               if (upl->flags & UPL_ACCESS_BLOCKED) {
                        /*
                         * We blocked access to the pages in this URL.
                         * Clear the "busy" bit on this page before we
                         * wake up any waiter.
                         */
-                       m->busy = FALSE;
+                       dwp->dw_mask |= DW_clear_busy;
                }
                /*
                 * Wakeup any thread waiting for the page to be un-cleaning.
                 */
-               PAGE_WAKEUP(m);
+               dwp->dw_mask |= DW_PAGE_WAKEUP;
 
 commit_next_page:
+               if (clear_refmod) {
+                       pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m), clear_refmod);
+               }
+
                target_offset += PAGE_SIZE_64;
                xfer_size -= PAGE_SIZE;
                entry++;
 
-               if (delayed_unlock++ > UPL_DELAYED_UNLOCK_LIMIT) {
-                       /*
-                        * pageout_scan takes the vm_page_lock_queues first
-                        * then tries for the object lock... to avoid what
-                        * is effectively a lock inversion, we'll go to the
-                        * trouble of taking them in that same order... otherwise
-                        * if this object contains the majority of the pages resident
-                        * in the UBC (or a small set of large objects actively being
-                        * worked on contain the majority of the pages), we could
-                        * cause the pageout_scan thread to 'starve' in its attempt
-                        * to find pages to move to the free queue, since it has to
-                        * successfully acquire the object lock of any candidate page
-                        * before it can steal/clean it.
-                        */
-                       vm_object_unlock(shadow_object);
-                       mutex_yield(&vm_page_queue_lock);
+               if (dwp->dw_mask) {
+                       if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
+                               VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
 
-                       for (j = 0; ; j++) {
-                               if (vm_object_lock_try(shadow_object))
-                                       break;
-                               vm_page_unlock_queues();
-                               mutex_pause(j);
-                               vm_page_lock_queues();
+                               if (dw_count >= dw_limit) {
+                                       vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
+
+                                       dwp = dwp_start;
+                                       dw_count = 0;
+                               }
+                       } else {
+                               if (dwp->dw_mask & DW_clear_busy) {
+                                       m->vmp_busy = FALSE;
+                               }
+
+                               if (dwp->dw_mask & DW_PAGE_WAKEUP) {
+                                       PAGE_WAKEUP(m);
+                               }
                        }
-                       delayed_unlock = 1;
                }
        }
-       if (delayed_unlock)
-               vm_page_unlock_queues();
+       if (dw_count) {
+               vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
+               dwp = dwp_start;
+               dw_count = 0;
+       }
+
+       if (fast_path_possible) {
+               assert(shadow_object->purgable != VM_PURGABLE_VOLATILE);
+               assert(shadow_object->purgable != VM_PURGABLE_EMPTY);
+
+               if (local_queue_count || unwired_count) {
+                       if (local_queue_count) {
+                               vm_page_t       first_target;
+                               vm_page_queue_head_t    *target_queue;
+
+                               if (throttle_page) {
+                                       target_queue = &vm_page_queue_throttled;
+                               } else {
+                                       if (flags & UPL_COMMIT_INACTIVATE) {
+                                               if (shadow_object->internal) {
+                                                       target_queue = &vm_page_queue_anonymous;
+                                               } else {
+                                                       target_queue = &vm_page_queue_inactive;
+                                               }
+                                       } else {
+                                               target_queue = &vm_page_queue_active;
+                                       }
+                               }
+                               /*
+                                * Transfer the entire local queue to a regular LRU page queues.
+                                */
+                               vm_page_lockspin_queues();
+
+                               first_target = (vm_page_t) vm_page_queue_first(target_queue);
+
+                               if (vm_page_queue_empty(target_queue)) {
+                                       target_queue->prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
+                               } else {
+                                       first_target->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
+                               }
+
+                               target_queue->next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
+                               first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(target_queue);
+                               last_local->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_target);
 
+                               /*
+                                * Adjust the global page counts.
+                                */
+                               if (throttle_page) {
+                                       vm_page_throttled_count += local_queue_count;
+                               } else {
+                                       if (flags & UPL_COMMIT_INACTIVATE) {
+                                               if (shadow_object->internal) {
+                                                       vm_page_anonymous_count += local_queue_count;
+                                               }
+                                               vm_page_inactive_count += local_queue_count;
+
+                                               token_new_pagecount += local_queue_count;
+                                       } else {
+                                               vm_page_active_count += local_queue_count;
+                                       }
+
+                                       if (shadow_object->internal) {
+                                               vm_page_pageable_internal_count += local_queue_count;
+                                       } else {
+                                               vm_page_pageable_external_count += local_queue_count;
+                                       }
+                               }
+                       } else {
+                               vm_page_lockspin_queues();
+                       }
+                       if (unwired_count) {
+                               vm_page_wire_count -= unwired_count;
+                               VM_CHECK_MEMORYSTATUS;
+                       }
+                       vm_page_unlock_queues();
+
+                       VM_OBJECT_WIRED_PAGE_COUNT(shadow_object, -unwired_count);
+               }
+       }
        occupied = 1;
 
-       if (upl->flags & UPL_DEVICE_MEMORY)  {
+       if (upl->flags & UPL_DEVICE_MEMORY) {
                occupied = 0;
        } else if (upl->flags & UPL_LITE) {
-               int     pg_num;
-               int     i;
+               int     pg_num;
+               int     i;
 
-               pg_num = upl->size/PAGE_SIZE;
-               pg_num = (pg_num + 31) >> 5;
                occupied = 0;
 
-               for (i = 0; i < pg_num; i++) {
-                       if (lite_list[i] != 0) {
-                               occupied = 1;
-                               break;
+               if (!fast_path_full_commit) {
+                       pg_num = upl_adjusted_size(upl, PAGE_MASK) / PAGE_SIZE;
+                       pg_num = (pg_num + 31) >> 5;
+
+                       for (i = 0; i < pg_num; i++) {
+                               if (lite_list[i] != 0) {
+                                       occupied = 1;
+                                       break;
+                               }
                        }
                }
        } else {
-               if (queue_empty(&upl->map_object->memq))
+               if (vm_page_queue_empty(&upl->map_object->memq)) {
                        occupied = 0;
+               }
        }
        if (occupied == 0) {
-               if (upl->flags & UPL_COMMIT_NOTIFY_EMPTY)
+               /*
+                * If this UPL element belongs to a Vector UPL and is
+                * empty, then this is the right function to deallocate
+                * it. So go ahead set the *empty variable. The flag
+                * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
+                * should be considered relevant for the Vector UPL and not
+                * the internal UPLs.
+                */
+               if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) {
                        *empty = TRUE;
+               }
 
-               if (object == shadow_object) {
-                       /*
+               if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
+                       /*
                         * this is not a paging object
                         * so we need to drop the paging reference
                         * that was taken when we created the UPL
                         * against this object
                         */
-                       vm_object_paging_end(shadow_object);
+                       vm_object_activity_end(shadow_object);
+                       vm_object_collapse(shadow_object, 0, TRUE);
                } else {
-                        /*
-                         * we dontated the paging reference to
-                         * the map object... vm_pageout_object_terminate
-                         * will drop this reference
-                         */
+                       /*
+                        * we dontated the paging reference to
+                        * the map object... vm_pageout_object_terminate
+                        * will drop this reference
+                        */
                }
        }
+       VM_OBJECT_WIRED_PAGE_UPDATE_END(shadow_object, shadow_object->wire_tag);
        vm_object_unlock(shadow_object);
-       if (object != shadow_object)
-               vm_object_unlock(object);
-       upl_unlock(upl);
+       if (object != shadow_object) {
+               vm_object_unlock(object);
+       }
 
+       if (!isVectorUPL) {
+               upl_unlock(upl);
+       } else {
+               /*
+                * If we completed our operations on an UPL that is
+                * part of a Vectored UPL and if empty is TRUE, then
+                * we should go ahead and deallocate this UPL element.
+                * Then we check if this was the last of the UPL elements
+                * within that Vectored UPL. If so, set empty to TRUE
+                * so that in ubc_upl_commit_range or ubc_upl_commit, we
+                * can go ahead and deallocate the Vector UPL too.
+                */
+               if (*empty == TRUE) {
+                       *empty = vector_upl_set_subupl(vector_upl, upl, 0);
+                       upl_deallocate(upl);
+               }
+               goto process_upl_to_commit;
+       }
        if (pgpgout_count) {
                DTRACE_VM2(pgpgout, int, pgpgout_count, (uint64_t *), NULL);
        }
 
-       return KERN_SUCCESS;
+       kr = KERN_SUCCESS;
+done:
+       if (dwp_start && dwp_finish_ctx) {
+               vm_page_delayed_work_finish_ctx(dwp_start);
+               dwp_start = dwp = NULL;
+       }
+
+       return kr;
 }
 
 kern_return_t
 upl_abort_range(
-       upl_t                   upl, 
-       upl_offset_t            offset, 
-       upl_size_t              size,
-       int                     error,
-       boolean_t               *empty) 
+       upl_t                   upl,
+       upl_offset_t            offset,
+       upl_size_t              size,
+       int                     error,
+       boolean_t               *empty)
 {
-       upl_size_t              xfer_size;
-       vm_object_t             shadow_object;
-       vm_object_t             object;
-       vm_object_offset_t      target_offset;
-       int                     entry;
-       wpl_array_t             lite_list;
-       int                     occupied;
-       int                     delayed_unlock = 0;
-       int                     j;
-
+       upl_page_info_t         *user_page_list = NULL;
+       upl_size_t              xfer_size, subupl_size;
+       vm_object_t             shadow_object;
+       vm_object_t             object;
+       vm_object_offset_t      target_offset;
+       upl_offset_t            subupl_offset = offset;
+       int                     entry;
+       wpl_array_t             lite_list;
+       int                     occupied;
+       struct  vm_page_delayed_work    dw_array;
+       struct  vm_page_delayed_work    *dwp, *dwp_start;
+       bool                    dwp_finish_ctx = TRUE;
+       int                     dw_count;
+       int                     dw_limit;
+       int                     isVectorUPL = 0;
+       upl_t                   vector_upl = NULL;
+       vm_object_offset_t      obj_start, obj_end, obj_offset;
+       kern_return_t           kr = KERN_SUCCESS;
+
+//     DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p offset 0x%llx size 0x%llx error 0x%x\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object, (uint64_t)offset, (uint64_t)size, error);
+
+       dwp_start = dwp = NULL;
+
+       subupl_size = size;
        *empty = FALSE;
 
-       if (upl == UPL_NULL)
+       if (upl == UPL_NULL) {
                return KERN_INVALID_ARGUMENT;
+       }
 
-       if ( (upl->flags & UPL_IO_WIRE) && !(error & UPL_ABORT_DUMP_PAGES) )
-               return upl_commit_range(upl, offset, size, 0, NULL, 0, empty);
+       if ((upl->flags & UPL_IO_WIRE) && !(error & UPL_ABORT_DUMP_PAGES)) {
+               return upl_commit_range(upl, offset, size, UPL_COMMIT_FREE_ABSENT, NULL, 0, empty);
+       }
 
-       if (upl->flags & UPL_DEVICE_MEMORY)
-               xfer_size = 0;
-       else if ((offset + size) <= upl->size)
-               xfer_size = size;
-       else
-               return KERN_FAILURE;
+       dw_count = 0;
+       dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
+       dwp_start = vm_page_delayed_work_get_ctx();
+       if (dwp_start == NULL) {
+               dwp_start = &dw_array;
+               dw_limit = 1;
+               dwp_finish_ctx = FALSE;
+       }
 
-       upl_lock(upl);
+       dwp = dwp_start;
+
+       if ((isVectorUPL = vector_upl_is_valid(upl))) {
+               vector_upl = upl;
+               upl_lock(vector_upl);
+       } else {
+               upl_lock(upl);
+       }
+
+process_upl_to_abort:
+       if (isVectorUPL) {
+               size = subupl_size;
+               offset = subupl_offset;
+               if (size == 0) {
+                       upl_unlock(vector_upl);
+                       kr = KERN_SUCCESS;
+                       goto done;
+               }
+               upl =  vector_upl_subupl_byoffset(vector_upl, &offset, &size);
+               if (upl == NULL) {
+                       upl_unlock(vector_upl);
+                       kr = KERN_FAILURE;
+                       goto done;
+               }
+               subupl_size -= size;
+               subupl_offset += size;
+       }
+
+       *empty = FALSE;
+
+#if UPL_DEBUG
+       if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
+               (void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
+
+               upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
+               upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
+               upl->upl_commit_records[upl->upl_commit_index].c_aborted = 1;
 
+               upl->upl_commit_index++;
+       }
+#endif
+       if (upl->flags & UPL_DEVICE_MEMORY) {
+               xfer_size = 0;
+       } else if ((offset + size) <= upl_adjusted_size(upl, PAGE_MASK)) {
+               xfer_size = size;
+       } else {
+               if (!isVectorUPL) {
+                       upl_unlock(upl);
+               } else {
+                       upl_unlock(vector_upl);
+               }
+               DEBUG4K_ERROR("upl %p (u_offset 0x%llx u_size 0x%x) offset 0x%x size 0x%x\n", upl, upl->u_offset, upl->u_size, offset, size);
+               kr = KERN_FAILURE;
+               goto done;
+       }
        if (upl->flags & UPL_INTERNAL) {
-               lite_list = (wpl_array_t) 
-                       ((((uintptr_t)upl) + sizeof(struct upl))
-                       + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
+               lite_list = (wpl_array_t)
+                   ((((uintptr_t)upl) + sizeof(struct upl))
+                   + ((upl_adjusted_size(upl, PAGE_MASK) / PAGE_SIZE) * sizeof(upl_page_info_t)));
+
+               user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
        } else {
-               lite_list = (wpl_array_t) 
-                       (((uintptr_t)upl) + sizeof(struct upl));
+               lite_list = (wpl_array_t)
+                   (((uintptr_t)upl) + sizeof(struct upl));
        }
        object = upl->map_object;
 
        if (upl->flags & UPL_SHADOWED) {
-               vm_object_lock(object);
+               vm_object_lock(object);
                shadow_object = object->shadow;
-       } else
+       } else {
                shadow_object = object;
+       }
 
-       entry = offset/PAGE_SIZE;
+       entry = offset / PAGE_SIZE;
        target_offset = (vm_object_offset_t)offset;
 
-       /*
-        * pageout_scan takes the vm_page_lock_queues first
-        * then tries for the object lock... to avoid what
-        * is effectively a lock inversion, we'll go to the
-        * trouble of taking them in that same order... otherwise
-        * if this object contains the majority of the pages resident
-        * in the UBC (or a small set of large objects actively being
-        * worked on contain the majority of the pages), we could
-        * cause the pageout_scan thread to 'starve' in its attempt
-        * to find pages to move to the free queue, since it has to
-        * successfully acquire the object lock of any candidate page
-        * before it can steal/clean it.
-        */
-       for (j = 0; ; j++) {
-               vm_page_lock_queues();
+       if (upl->flags & UPL_KERNEL_OBJECT) {
+               vm_object_lock_shared(shadow_object);
+       } else {
+               vm_object_lock(shadow_object);
+       }
 
-               if (vm_object_lock_try(shadow_object))
-                       break;
-               vm_page_unlock_queues();
-               mutex_pause(j);
+       if (upl->flags & UPL_ACCESS_BLOCKED) {
+               assert(shadow_object->blocked_access);
+               shadow_object->blocked_access = FALSE;
+               vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
        }
-       delayed_unlock = 1;
 
-       while (xfer_size) {
-               vm_page_t       t, m;
+       if ((error & UPL_ABORT_DUMP_PAGES) && (upl->flags & UPL_KERNEL_OBJECT)) {
+               panic("upl_abort_range: kernel_object being DUMPED");
+       }
+
+       obj_start = target_offset + upl->u_offset - shadow_object->paging_offset;
+       obj_end = obj_start + xfer_size;
+       obj_start = vm_object_trunc_page(obj_start);
+       obj_end = vm_object_round_page(obj_end);
+       for (obj_offset = obj_start;
+           obj_offset < obj_end;
+           obj_offset += PAGE_SIZE) {
+               vm_page_t       t, m;
+               unsigned int    pg_num;
+               boolean_t       needed;
+
+               pg_num = (unsigned int) (target_offset / PAGE_SIZE);
+               assert(pg_num == target_offset / PAGE_SIZE);
+
+               needed = FALSE;
 
+               if (user_page_list) {
+                       needed = user_page_list[pg_num].needed;
+               }
+
+               dwp->dw_mask = 0;
                m = VM_PAGE_NULL;
 
                if (upl->flags & UPL_LITE) {
-                       int     pg_num;
-                       pg_num = target_offset/PAGE_SIZE;
-
-                       if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
-                               lite_list[pg_num>>5] &= ~(1 << (pg_num & 31));
+                       if (lite_list[pg_num >> 5] & (1U << (pg_num & 31))) {
+                               lite_list[pg_num >> 5] &= ~(1U << (pg_num & 31));
 
-                               m = vm_page_lookup(shadow_object, target_offset +
-                                                  (upl->offset - shadow_object->paging_offset));
+                               if (!(upl->flags & UPL_KERNEL_OBJECT)) {
+                                       m = vm_page_lookup(shadow_object, obj_offset);
+                               }
                        }
                }
                if (upl->flags & UPL_SHADOWED) {
-                       if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
-                               t->pageout = FALSE;
+                       if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
+                               t->vmp_free_when_done = FALSE;
 
-                               vm_page_free(t);
+                               VM_PAGE_FREE(t);
 
-                               if (m == VM_PAGE_NULL)
-                                       m = vm_page_lookup(shadow_object, target_offset + object->shadow_offset);
+                               if (m == VM_PAGE_NULL) {
+                                       m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
+                               }
                        }
                }
+               if ((upl->flags & UPL_KERNEL_OBJECT)) {
+                       goto abort_next_page;
+               }
+
                if (m != VM_PAGE_NULL) {
+                       assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
 
-                       if (m->absent) {
-                               boolean_t must_free = TRUE;
+                       if (m->vmp_absent) {
+                               boolean_t must_free = TRUE;
 
-                               m->clustered = FALSE;
                                /*
                                 * COPYOUT = FALSE case
                                 * check for error conditions which must
                                 * be passed back to the pages customer
                                 */
                                if (error & UPL_ABORT_RESTART) {
-                                       m->restart = TRUE;
-                                       m->absent = FALSE;
-                                       m->error = TRUE;
-                                       m->unusual = TRUE;
+                                       m->vmp_restart = TRUE;
+                                       m->vmp_absent = FALSE;
+                                       m->vmp_unusual = TRUE;
                                        must_free = FALSE;
                                } else if (error & UPL_ABORT_UNAVAILABLE) {
-                                       m->restart = FALSE;
-                                       m->unusual = TRUE;
+                                       m->vmp_restart = FALSE;
+                                       m->vmp_unusual = TRUE;
                                        must_free = FALSE;
                                } else if (error & UPL_ABORT_ERROR) {
-                                       m->restart = FALSE;
-                                       m->absent = FALSE;
-                                       m->error = TRUE;
-                                       m->unusual = TRUE;
+                                       m->vmp_restart = FALSE;
+                                       m->vmp_absent = FALSE;
+                                       m->vmp_error = TRUE;
+                                       m->vmp_unusual = TRUE;
                                        must_free = FALSE;
                                }
+                               if (m->vmp_clustered && needed == FALSE) {
+                                       /*
+                                        * This page was a part of a speculative
+                                        * read-ahead initiated by the kernel
+                                        * itself.  No one is expecting this
+                                        * page and no one will clean up its
+                                        * error state if it ever becomes valid
+                                        * in the future.
+                                        * We have to free it here.
+                                        */
+                                       must_free = TRUE;
+                               }
+                               m->vmp_cleaning = FALSE;
 
-                               /*
-                                * ENCRYPTED SWAP:
-                                * If the page was already encrypted,
-                                * we don't really need to decrypt it
-                                * now.  It will get decrypted later,
-                                * on demand, as soon as someone needs
-                                * to access its contents.
-                                */
+                               if (m->vmp_overwriting && !m->vmp_busy) {
+                                       /*
+                                        * this shouldn't happen since
+                                        * this is an 'absent' page, but
+                                        * it doesn't hurt to check for
+                                        * the 'alternate' method of
+                                        * stabilizing the page...
+                                        * we will mark 'busy' to be cleared
+                                        * in the following code which will
+                                        * take care of the primary stabilzation
+                                        * method (i.e. setting 'busy' to TRUE)
+                                        */
+                                       dwp->dw_mask |= DW_vm_page_unwire;
+                               }
+                               m->vmp_overwriting = FALSE;
 
-                               m->cleaning = FALSE;
-                               m->encrypted_cleaning = FALSE;
-                               m->overwriting = FALSE;
-                               PAGE_WAKEUP_DONE(m);
+                               dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
 
-                               if (must_free == TRUE)
-                                       vm_page_free(m);
-                               else
-                                       vm_page_activate(m);
+                               if (must_free == TRUE) {
+                                       dwp->dw_mask |= DW_vm_page_free;
+                               } else {
+                                       dwp->dw_mask |= DW_vm_page_activate;
+                               }
                        } else {
-                               /*                          
+                               /*
                                 * Handle the trusted pager throttle.
-                                */                     
-                               if (m->laundry)
-                                       vm_pageout_throttle_up(m);
-
-                               if (m->pageout) {
-                                       assert(m->busy);
-                                       assert(m->wire_count == 1);
-                                       m->pageout = FALSE;
-                                       vm_page_unwire(m);
+                                */
+                               if (m->vmp_laundry) {
+                                       dwp->dw_mask |= DW_vm_pageout_throttle_up;
                                }
-                               m->dump_cleaning = FALSE;
-                               m->cleaning = FALSE;
-                               m->encrypted_cleaning = FALSE;
-                               m->overwriting = FALSE;
-#if    MACH_PAGEMAP
-                               vm_external_state_clr(m->object->existence_map, m->offset);
-#endif /* MACH_PAGEMAP */
-                               if (error & UPL_ABORT_DUMP_PAGES) {
-                                       pmap_disconnect(m->phys_page);
-                                       vm_page_free(m);
-                               } else {
-                                       if (error & UPL_ABORT_REFERENCE) {
+
+                               if (upl->flags & UPL_ACCESS_BLOCKED) {
+                                       /*
+                                        * We blocked access to the pages in this UPL.
+                                        * Clear the "busy" bit and wake up any waiter
+                                        * for this page.
+                                        */
+                                       dwp->dw_mask |= DW_clear_busy;
+                               }
+                               if (m->vmp_overwriting) {
+                                       if (m->vmp_busy) {
+                                               dwp->dw_mask |= DW_clear_busy;
+                                       } else {
                                                /*
-                                                * we've been told to explictly
-                                                * reference this page... for 
-                                                * file I/O, this is done by
-                                                * implementing an LRU on the inactive q
+                                                * deal with the 'alternate' method
+                                                * of stabilizing the page...
+                                                * we will either free the page
+                                                * or mark 'busy' to be cleared
+                                                * in the following code which will
+                                                * take care of the primary stabilzation
+                                                * method (i.e. setting 'busy' to TRUE)
                                                 */
-                                               vm_page_lru(m);
+                                               dwp->dw_mask |= DW_vm_page_unwire;
                                        }
-                                       PAGE_WAKEUP_DONE(m);
+                                       m->vmp_overwriting = FALSE;
                                }
-                       }
-               }
-               if (delayed_unlock++ > UPL_DELAYED_UNLOCK_LIMIT) {
-                       /*
-                        * pageout_scan takes the vm_page_lock_queues first
-                        * then tries for the object lock... to avoid what
-                        * is effectively a lock inversion, we'll go to the
-                        * trouble of taking them in that same order... otherwise
-                        * if this object contains the majority of the pages resident
-                        * in the UBC (or a small set of large objects actively being
-                        * worked on contain the majority of the pages), we could
-                        * cause the pageout_scan thread to 'starve' in its attempt
-                        * to find pages to move to the free queue, since it has to
-                        * successfully acquire the object lock of any candidate page
-                        * before it can steal/clean it.
-                        */
-                       vm_object_unlock(shadow_object);
-                       mutex_yield(&vm_page_queue_lock);
+                               m->vmp_free_when_done = FALSE;
+                               m->vmp_cleaning = FALSE;
+
+                               if (error & UPL_ABORT_DUMP_PAGES) {
+                                       pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
 
-                       for (j = 0; ; j++) {
-                               if (vm_object_lock_try(shadow_object))
-                                       break;
-                               vm_page_unlock_queues();
-                               mutex_pause(j);
-                               vm_page_lock_queues();
+                                       dwp->dw_mask |= DW_vm_page_free;
+                               } else {
+                                       if (!(dwp->dw_mask & DW_vm_page_unwire)) {
+                                               if (error & UPL_ABORT_REFERENCE) {
+                                                       /*
+                                                        * we've been told to explictly
+                                                        * reference this page... for
+                                                        * file I/O, this is done by
+                                                        * implementing an LRU on the inactive q
+                                                        */
+                                                       dwp->dw_mask |= DW_vm_page_lru;
+                                               } else if (!VM_PAGE_PAGEABLE(m)) {
+                                                       dwp->dw_mask |= DW_vm_page_deactivate_internal;
+                                               }
+                                       }
+                                       dwp->dw_mask |= DW_PAGE_WAKEUP;
+                               }
                        }
-                       delayed_unlock = 1;
                }
+abort_next_page:
                target_offset += PAGE_SIZE_64;
                xfer_size -= PAGE_SIZE;
                entry++;
+
+               if (dwp->dw_mask) {
+                       if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
+                               VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
+
+                               if (dw_count >= dw_limit) {
+                                       vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
+
+                                       dwp = dwp_start;
+                                       dw_count = 0;
+                               }
+                       } else {
+                               if (dwp->dw_mask & DW_clear_busy) {
+                                       m->vmp_busy = FALSE;
+                               }
+
+                               if (dwp->dw_mask & DW_PAGE_WAKEUP) {
+                                       PAGE_WAKEUP(m);
+                               }
+                       }
+               }
+       }
+       if (dw_count) {
+               vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
+               dwp = dwp_start;
+               dw_count = 0;
        }
-       if (delayed_unlock)
-               vm_page_unlock_queues();
 
        occupied = 1;
 
-       if (upl->flags & UPL_DEVICE_MEMORY)  {
+       if (upl->flags & UPL_DEVICE_MEMORY) {
                occupied = 0;
        } else if (upl->flags & UPL_LITE) {
-               int     pg_num;
-               int     i;
+               int     pg_num;
+               int     i;
 
-               pg_num = upl->size/PAGE_SIZE;
+               pg_num = upl_adjusted_size(upl, PAGE_MASK) / PAGE_SIZE;
                pg_num = (pg_num + 31) >> 5;
                occupied = 0;
 
@@ -4653,85 +8389,537 @@ upl_abort_range(
                        }
                }
        } else {
-               if (queue_empty(&upl->map_object->memq))
+               if (vm_page_queue_empty(&upl->map_object->memq)) {
                        occupied = 0;
+               }
        }
        if (occupied == 0) {
-               if (upl->flags & UPL_COMMIT_NOTIFY_EMPTY)
+               /*
+                * If this UPL element belongs to a Vector UPL and is
+                * empty, then this is the right function to deallocate
+                * it. So go ahead set the *empty variable. The flag
+                * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
+                * should be considered relevant for the Vector UPL and
+                * not the internal UPLs.
+                */
+               if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) {
                        *empty = TRUE;
+               }
 
-               if (object == shadow_object) {
-                       /*
+               if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
+                       /*
                         * this is not a paging object
                         * so we need to drop the paging reference
                         * that was taken when we created the UPL
                         * against this object
                         */
-                       vm_object_paging_end(shadow_object);
+                       vm_object_activity_end(shadow_object);
+                       vm_object_collapse(shadow_object, 0, TRUE);
                } else {
-                        /*
-                         * we dontated the paging reference to
-                         * the map object... vm_pageout_object_terminate
-                         * will drop this reference
-                         */
+                       /*
+                        * we dontated the paging reference to
+                        * the map object... vm_pageout_object_terminate
+                        * will drop this reference
+                        */
                }
        }
        vm_object_unlock(shadow_object);
-       if (object != shadow_object)
-               vm_object_unlock(object);
-       upl_unlock(upl);
+       if (object != shadow_object) {
+               vm_object_unlock(object);
+       }
 
-       return KERN_SUCCESS;
+       if (!isVectorUPL) {
+               upl_unlock(upl);
+       } else {
+               /*
+                * If we completed our operations on an UPL that is
+                * part of a Vectored UPL and if empty is TRUE, then
+                * we should go ahead and deallocate this UPL element.
+                * Then we check if this was the last of the UPL elements
+                * within that Vectored UPL. If so, set empty to TRUE
+                * so that in ubc_upl_abort_range or ubc_upl_abort, we
+                * can go ahead and deallocate the Vector UPL too.
+                */
+               if (*empty == TRUE) {
+                       *empty = vector_upl_set_subupl(vector_upl, upl, 0);
+                       upl_deallocate(upl);
+               }
+               goto process_upl_to_abort;
+       }
+
+       kr = KERN_SUCCESS;
+
+done:
+       if (dwp_start && dwp_finish_ctx) {
+               vm_page_delayed_work_finish_ctx(dwp_start);
+               dwp_start = dwp = NULL;
+       }
+
+       return kr;
 }
 
 
 kern_return_t
 upl_abort(
-       upl_t   upl,
-       int     error)
+       upl_t   upl,
+       int     error)
 {
-       boolean_t       empty;
+       boolean_t       empty;
+
+       if (upl == UPL_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
-       return upl_abort_range(upl, 0, upl->size, error, &empty);
+       return upl_abort_range(upl, 0, upl->u_size, error, &empty);
 }
 
 
 /* an option on commit should be wire */
 kern_return_t
 upl_commit(
-       upl_t                   upl,
-       upl_page_info_t         *page_list,
-       mach_msg_type_number_t  count)
+       upl_t                   upl,
+       upl_page_info_t         *page_list,
+       mach_msg_type_number_t  count)
+{
+       boolean_t       empty;
+
+       if (upl == UPL_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       return upl_commit_range(upl, 0, upl->u_size, 0,
+                  page_list, count, &empty);
+}
+
+
+void
+iopl_valid_data(
+       upl_t    upl,
+       vm_tag_t tag)
+{
+       vm_object_t     object;
+       vm_offset_t     offset;
+       vm_page_t       m, nxt_page = VM_PAGE_NULL;
+       upl_size_t      size;
+       int             wired_count = 0;
+
+       if (upl == NULL) {
+               panic("iopl_valid_data: NULL upl");
+       }
+       if (vector_upl_is_valid(upl)) {
+               panic("iopl_valid_data: vector upl");
+       }
+       if ((upl->flags & (UPL_DEVICE_MEMORY | UPL_SHADOWED | UPL_ACCESS_BLOCKED | UPL_IO_WIRE | UPL_INTERNAL)) != UPL_IO_WIRE) {
+               panic("iopl_valid_data: unsupported upl, flags = %x", upl->flags);
+       }
+
+       object = upl->map_object;
+
+       if (object == kernel_object || object == compressor_object) {
+               panic("iopl_valid_data: object == kernel or compressor");
+       }
+
+       if (object->purgable == VM_PURGABLE_VOLATILE ||
+           object->purgable == VM_PURGABLE_EMPTY) {
+               panic("iopl_valid_data: object %p purgable %d",
+                   object, object->purgable);
+       }
+
+       size = upl_adjusted_size(upl, PAGE_MASK);
+
+       vm_object_lock(object);
+       VM_OBJECT_WIRED_PAGE_UPDATE_START(object);
+
+       if (object->vo_size == size && object->resident_page_count == (size / PAGE_SIZE)) {
+               nxt_page = (vm_page_t)vm_page_queue_first(&object->memq);
+       } else {
+               offset = (vm_offset_t)(upl_adjusted_offset(upl, PAGE_MASK) - object->paging_offset);
+       }
+
+       while (size) {
+               if (nxt_page != VM_PAGE_NULL) {
+                       m = nxt_page;
+                       nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->vmp_listq);
+               } else {
+                       m = vm_page_lookup(object, offset);
+                       offset += PAGE_SIZE;
+
+                       if (m == VM_PAGE_NULL) {
+                               panic("iopl_valid_data: missing expected page at offset %lx", (long)offset);
+                       }
+               }
+               if (m->vmp_busy) {
+                       if (!m->vmp_absent) {
+                               panic("iopl_valid_data: busy page w/o absent");
+                       }
+
+                       if (m->vmp_pageq.next || m->vmp_pageq.prev) {
+                               panic("iopl_valid_data: busy+absent page on page queue");
+                       }
+                       if (m->vmp_reusable) {
+                               panic("iopl_valid_data: %p is reusable", m);
+                       }
+
+                       m->vmp_absent = FALSE;
+                       m->vmp_dirty = TRUE;
+                       assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
+                       assert(m->vmp_wire_count == 0);
+                       m->vmp_wire_count++;
+                       assert(m->vmp_wire_count);
+                       if (m->vmp_wire_count == 1) {
+                               m->vmp_q_state = VM_PAGE_IS_WIRED;
+                               wired_count++;
+                       } else {
+                               panic("iopl_valid_data: %p already wired\n", m);
+                       }
+
+                       PAGE_WAKEUP_DONE(m);
+               }
+               size -= PAGE_SIZE;
+       }
+       if (wired_count) {
+               VM_OBJECT_WIRED_PAGE_COUNT(object, wired_count);
+               assert(object->resident_page_count >= object->wired_page_count);
+
+               /* no need to adjust purgeable accounting for this object: */
+               assert(object->purgable != VM_PURGABLE_VOLATILE);
+               assert(object->purgable != VM_PURGABLE_EMPTY);
+
+               vm_page_lockspin_queues();
+               vm_page_wire_count += wired_count;
+               vm_page_unlock_queues();
+       }
+       VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag);
+       vm_object_unlock(object);
+}
+
+
+void
+vm_object_set_pmap_cache_attr(
+       vm_object_t             object,
+       upl_page_info_array_t   user_page_list,
+       unsigned int            num_pages,
+       boolean_t               batch_pmap_op)
+{
+       unsigned int    cache_attr = 0;
+
+       cache_attr = object->wimg_bits & VM_WIMG_MASK;
+       assert(user_page_list);
+       if (cache_attr != VM_WIMG_USE_DEFAULT) {
+               PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, cache_attr, num_pages, batch_pmap_op);
+       }
+}
+
+
+boolean_t       vm_object_iopl_wire_full(vm_object_t, upl_t, upl_page_info_array_t, wpl_array_t, upl_control_flags_t, vm_tag_t);
+kern_return_t   vm_object_iopl_wire_empty(vm_object_t, upl_t, upl_page_info_array_t, wpl_array_t, upl_control_flags_t, vm_tag_t, vm_object_offset_t *, int, int*);
+
+
+
+boolean_t
+vm_object_iopl_wire_full(vm_object_t object, upl_t upl, upl_page_info_array_t user_page_list,
+    wpl_array_t lite_list, upl_control_flags_t cntrl_flags, vm_tag_t tag)
+{
+       vm_page_t       dst_page;
+       unsigned int    entry;
+       int             page_count;
+       int             delayed_unlock = 0;
+       boolean_t       retval = TRUE;
+       ppnum_t         phys_page;
+
+       vm_object_lock_assert_exclusive(object);
+       assert(object->purgable != VM_PURGABLE_VOLATILE);
+       assert(object->purgable != VM_PURGABLE_EMPTY);
+       assert(object->pager == NULL);
+       assert(object->copy == NULL);
+       assert(object->shadow == NULL);
+
+       page_count = object->resident_page_count;
+       dst_page = (vm_page_t)vm_page_queue_first(&object->memq);
+
+       vm_page_lock_queues();
+
+       while (page_count--) {
+               if (dst_page->vmp_busy ||
+                   dst_page->vmp_fictitious ||
+                   dst_page->vmp_absent ||
+                   dst_page->vmp_error ||
+                   dst_page->vmp_cleaning ||
+                   dst_page->vmp_restart ||
+                   dst_page->vmp_laundry) {
+                       retval = FALSE;
+                       goto done;
+               }
+               if ((cntrl_flags & UPL_REQUEST_FORCE_COHERENCY) && dst_page->vmp_written_by_kernel == TRUE) {
+                       retval = FALSE;
+                       goto done;
+               }
+               dst_page->vmp_reference = TRUE;
+
+               vm_page_wire(dst_page, tag, FALSE);
+
+               if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
+                       SET_PAGE_DIRTY(dst_page, FALSE);
+               }
+               entry = (unsigned int)(dst_page->vmp_offset / PAGE_SIZE);
+               assert(entry >= 0 && entry < object->resident_page_count);
+               lite_list[entry >> 5] |= 1U << (entry & 31);
+
+               phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
+
+               if (phys_page > upl->highest_page) {
+                       upl->highest_page = phys_page;
+               }
+
+               if (user_page_list) {
+                       user_page_list[entry].phys_addr = phys_page;
+                       user_page_list[entry].absent    = dst_page->vmp_absent;
+                       user_page_list[entry].dirty     = dst_page->vmp_dirty;
+                       user_page_list[entry].free_when_done   = dst_page->vmp_free_when_done;
+                       user_page_list[entry].precious  = dst_page->vmp_precious;
+                       user_page_list[entry].device    = FALSE;
+                       user_page_list[entry].speculative = FALSE;
+                       user_page_list[entry].cs_validated = FALSE;
+                       user_page_list[entry].cs_tainted = FALSE;
+                       user_page_list[entry].cs_nx     = FALSE;
+                       user_page_list[entry].needed    = FALSE;
+                       user_page_list[entry].mark      = FALSE;
+               }
+               if (delayed_unlock++ > 256) {
+                       delayed_unlock = 0;
+                       lck_mtx_yield(&vm_page_queue_lock);
+
+                       VM_CHECK_MEMORYSTATUS;
+               }
+               dst_page = (vm_page_t)vm_page_queue_next(&dst_page->vmp_listq);
+       }
+done:
+       vm_page_unlock_queues();
+
+       VM_CHECK_MEMORYSTATUS;
+
+       return retval;
+}
+
+
+kern_return_t
+vm_object_iopl_wire_empty(vm_object_t object, upl_t upl, upl_page_info_array_t user_page_list,
+    wpl_array_t lite_list, upl_control_flags_t cntrl_flags, vm_tag_t tag, vm_object_offset_t *dst_offset,
+    int page_count, int* page_grab_count)
 {
-       boolean_t       empty;
+       vm_page_t       dst_page;
+       boolean_t       no_zero_fill = FALSE;
+       int             interruptible;
+       int             pages_wired = 0;
+       int             pages_inserted = 0;
+       int             entry = 0;
+       uint64_t        delayed_ledger_update = 0;
+       kern_return_t   ret = KERN_SUCCESS;
+       int             grab_options;
+       ppnum_t         phys_page;
+
+       vm_object_lock_assert_exclusive(object);
+       assert(object->purgable != VM_PURGABLE_VOLATILE);
+       assert(object->purgable != VM_PURGABLE_EMPTY);
+       assert(object->pager == NULL);
+       assert(object->copy == NULL);
+       assert(object->shadow == NULL);
+
+       if (cntrl_flags & UPL_SET_INTERRUPTIBLE) {
+               interruptible = THREAD_ABORTSAFE;
+       } else {
+               interruptible = THREAD_UNINT;
+       }
+
+       if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO)) {
+               no_zero_fill = TRUE;
+       }
+
+       grab_options = 0;
+#if CONFIG_SECLUDED_MEMORY
+       if (object->can_grab_secluded) {
+               grab_options |= VM_PAGE_GRAB_SECLUDED;
+       }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+       while (page_count--) {
+               while ((dst_page = vm_page_grab_options(grab_options))
+                   == VM_PAGE_NULL) {
+                       OSAddAtomic(page_count, &vm_upl_wait_for_pages);
+
+                       VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
+
+                       if (vm_page_wait(interruptible) == FALSE) {
+                               /*
+                                * interrupted case
+                                */
+                               OSAddAtomic(-page_count, &vm_upl_wait_for_pages);
+
+                               VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1);
+
+                               ret = MACH_SEND_INTERRUPTED;
+                               goto done;
+                       }
+                       OSAddAtomic(-page_count, &vm_upl_wait_for_pages);
+
+                       VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
+               }
+               if (no_zero_fill == FALSE) {
+                       vm_page_zero_fill(dst_page);
+               } else {
+                       dst_page->vmp_absent = TRUE;
+               }
+
+               dst_page->vmp_reference = TRUE;
+
+               if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
+                       SET_PAGE_DIRTY(dst_page, FALSE);
+               }
+               if (dst_page->vmp_absent == FALSE) {
+                       assert(dst_page->vmp_q_state == VM_PAGE_NOT_ON_Q);
+                       assert(dst_page->vmp_wire_count == 0);
+                       dst_page->vmp_wire_count++;
+                       dst_page->vmp_q_state = VM_PAGE_IS_WIRED;
+                       assert(dst_page->vmp_wire_count);
+                       pages_wired++;
+                       PAGE_WAKEUP_DONE(dst_page);
+               }
+               pages_inserted++;
+
+               vm_page_insert_internal(dst_page, object, *dst_offset, tag, FALSE, TRUE, TRUE, TRUE, &delayed_ledger_update);
+
+               lite_list[entry >> 5] |= 1U << (entry & 31);
+
+               phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
+
+               if (phys_page > upl->highest_page) {
+                       upl->highest_page = phys_page;
+               }
+
+               if (user_page_list) {
+                       user_page_list[entry].phys_addr = phys_page;
+                       user_page_list[entry].absent    = dst_page->vmp_absent;
+                       user_page_list[entry].dirty     = dst_page->vmp_dirty;
+                       user_page_list[entry].free_when_done    = FALSE;
+                       user_page_list[entry].precious  = FALSE;
+                       user_page_list[entry].device    = FALSE;
+                       user_page_list[entry].speculative = FALSE;
+                       user_page_list[entry].cs_validated = FALSE;
+                       user_page_list[entry].cs_tainted = FALSE;
+                       user_page_list[entry].cs_nx     = FALSE;
+                       user_page_list[entry].needed    = FALSE;
+                       user_page_list[entry].mark      = FALSE;
+               }
+               entry++;
+               *dst_offset += PAGE_SIZE_64;
+       }
+done:
+       if (pages_wired) {
+               vm_page_lockspin_queues();
+               vm_page_wire_count += pages_wired;
+               vm_page_unlock_queues();
+       }
+       if (pages_inserted) {
+               if (object->internal) {
+                       OSAddAtomic(pages_inserted, &vm_page_internal_count);
+               } else {
+                       OSAddAtomic(pages_inserted, &vm_page_external_count);
+               }
+       }
+       if (delayed_ledger_update) {
+               task_t          owner;
+               int             ledger_idx_volatile;
+               int             ledger_idx_nonvolatile;
+               int             ledger_idx_volatile_compressed;
+               int             ledger_idx_nonvolatile_compressed;
+               boolean_t       do_footprint;
+
+               owner = VM_OBJECT_OWNER(object);
+               assert(owner);
+
+               vm_object_ledger_tag_ledgers(object,
+                   &ledger_idx_volatile,
+                   &ledger_idx_nonvolatile,
+                   &ledger_idx_volatile_compressed,
+                   &ledger_idx_nonvolatile_compressed,
+                   &do_footprint);
+
+               /* more non-volatile bytes */
+               ledger_credit(owner->ledger,
+                   ledger_idx_nonvolatile,
+                   delayed_ledger_update);
+               if (do_footprint) {
+                       /* more footprint */
+                       ledger_credit(owner->ledger,
+                           task_ledgers.phys_footprint,
+                           delayed_ledger_update);
+               }
+       }
+
+       assert(page_grab_count);
+       *page_grab_count = pages_inserted;
 
-       return upl_commit_range(upl, 0, upl->size, 0, page_list, count, &empty);
+       return ret;
 }
 
 
+
 kern_return_t
 vm_object_iopl_request(
-       vm_object_t             object,
-       vm_object_offset_t      offset,
-       upl_size_t              size,
-       upl_t                   *upl_ptr,
-       upl_page_info_array_t   user_page_list,
-       unsigned int            *page_list_count,
-       int                     cntrl_flags)
+       vm_object_t             object,
+       vm_object_offset_t      offset,
+       upl_size_t              size,
+       upl_t                   *upl_ptr,
+       upl_page_info_array_t   user_page_list,
+       unsigned int            *page_list_count,
+       upl_control_flags_t     cntrl_flags,
+       vm_tag_t                tag)
 {
-       vm_page_t               dst_page;
-       vm_object_offset_t      dst_offset;
-       upl_size_t              xfer_size;
-       upl_t                   upl = NULL;
-       unsigned int            entry;
-       wpl_array_t             lite_list = NULL;
-       int                     delayed_unlock = 0;
-       int                     no_zero_fill = FALSE;
-       u_int32_t               psize;
-       kern_return_t           ret;
-       vm_prot_t               prot;
-       struct vm_object_fault_info fault_info;
+       vm_page_t               dst_page;
+       vm_object_offset_t      dst_offset;
+       upl_size_t              xfer_size;
+       upl_t                   upl = NULL;
+       unsigned int            entry;
+       wpl_array_t             lite_list = NULL;
+       int                     no_zero_fill = FALSE;
+       unsigned int            size_in_pages;
+       int                     page_grab_count = 0;
+       u_int32_t               psize;
+       kern_return_t           ret;
+       vm_prot_t               prot;
+       struct vm_object_fault_info fault_info = {};
+       struct  vm_page_delayed_work    dw_array;
+       struct  vm_page_delayed_work    *dwp, *dwp_start;
+       bool                    dwp_finish_ctx = TRUE;
+       int                     dw_count;
+       int                     dw_limit;
+       int                     dw_index;
+       boolean_t               caller_lookup;
+       int                     io_tracking_flag = 0;
+       int                     interruptible;
+       ppnum_t                 phys_page;
+
+       boolean_t               set_cache_attr_needed = FALSE;
+       boolean_t               free_wired_pages = FALSE;
+       boolean_t               fast_path_empty_req = FALSE;
+       boolean_t               fast_path_full_req = FALSE;
+
+#if DEVELOPMENT || DEBUG
+       task_t                  task = current_task();
+#endif /* DEVELOPMENT || DEBUG */
+
+       dwp_start = dwp = NULL;
+
+       vm_object_offset_t original_offset = offset;
+       upl_size_t original_size = size;
 
+//     DEBUG4K_UPL("object %p offset 0x%llx size 0x%llx cntrl_flags 0x%llx\n", object, (uint64_t)offset, (uint64_t)size, cntrl_flags);
+
+       size = (upl_size_t)(vm_object_round_page(offset + size) - vm_object_trunc_page(offset));
+       offset = vm_object_trunc_page(offset);
+       if (size != original_size || offset != original_offset) {
+               DEBUG4K_IOKIT("flags 0x%llx object %p offset 0x%llx size 0x%x -> offset 0x%llx size 0x%x\n", cntrl_flags, object, original_offset, original_size, offset, size);
+       }
 
        if (cntrl_flags & ~UPL_VALID_FLAGS) {
                /*
@@ -4740,88 +8928,148 @@ vm_object_iopl_request(
                 */
                return KERN_INVALID_VALUE;
        }
-       if (vm_lopage_poolsize == 0)
-               cntrl_flags &= ~UPL_NEED_32BIT_ADDR;
+       if (vm_lopage_needed == FALSE) {
+               cntrl_flags &= ~UPL_NEED_32BIT_ADDR;
+       }
 
        if (cntrl_flags & UPL_NEED_32BIT_ADDR) {
-               if ( (cntrl_flags & (UPL_SET_IO_WIRE | UPL_SET_LITE)) != (UPL_SET_IO_WIRE | UPL_SET_LITE))
-                       return KERN_INVALID_VALUE;
+               if ((cntrl_flags & (UPL_SET_IO_WIRE | UPL_SET_LITE)) != (UPL_SET_IO_WIRE | UPL_SET_LITE)) {
+                       return KERN_INVALID_VALUE;
+               }
 
                if (object->phys_contiguous) {
-                       if ((offset + object->shadow_offset) >= (vm_object_offset_t)max_valid_dma_address)
-                               return KERN_INVALID_ADDRESS;
-             
-                       if (((offset + object->shadow_offset) + size) >= (vm_object_offset_t)max_valid_dma_address)
-                               return KERN_INVALID_ADDRESS;
+                       if ((offset + object->vo_shadow_offset) >= (vm_object_offset_t)max_valid_dma_address) {
+                               return KERN_INVALID_ADDRESS;
+                       }
+
+                       if (((offset + object->vo_shadow_offset) + size) >= (vm_object_offset_t)max_valid_dma_address) {
+                               return KERN_INVALID_ADDRESS;
+                       }
                }
        }
-
-       if (cntrl_flags & UPL_ENCRYPT) {
-               /*
-                * ENCRYPTED SWAP:
-                * The paging path doesn't use this interface,
-                * so we don't support the UPL_ENCRYPT flag
-                * here.  We won't encrypt the pages.
-                */
-               assert(! (cntrl_flags & UPL_ENCRYPT));
+       if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO)) {
+               no_zero_fill = TRUE;
        }
-       if (cntrl_flags & UPL_NOZEROFILL)
-               no_zero_fill = TRUE;
 
-       if (cntrl_flags & UPL_COPYOUT_FROM)
+       if (cntrl_flags & UPL_COPYOUT_FROM) {
                prot = VM_PROT_READ;
-       else
+       } else {
                prot = VM_PROT_READ | VM_PROT_WRITE;
+       }
 
-       if (((size/page_size) > MAX_UPL_SIZE) && !object->phys_contiguous)
-               size = MAX_UPL_SIZE * page_size;
+       if ((!object->internal) && (object->paging_offset != 0)) {
+               panic("vm_object_iopl_request: external object with non-zero paging offset\n");
+       }
 
-       if (cntrl_flags & UPL_SET_INTERNAL) {
-               if (page_list_count != NULL)
-                       *page_list_count = MAX_UPL_SIZE;
+
+       VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_START, size, cntrl_flags, prot, 0);
+
+#if CONFIG_IOSCHED || UPL_DEBUG
+       if ((object->io_tracking && object != kernel_object) || upl_debug_enabled) {
+               io_tracking_flag |= UPL_CREATE_IO_TRACKING;
        }
-       if (((cntrl_flags & UPL_SET_INTERNAL) && !(object->phys_contiguous)) &&
-           ((page_list_count != NULL) && (*page_list_count != 0) && *page_list_count < (size/page_size)))
-               return KERN_INVALID_ARGUMENT;
+#endif
 
-       if ((!object->internal) && (object->paging_offset != 0))
-               panic("vm_object_iopl_request: external object with non-zero paging offset\n");
+#if CONFIG_IOSCHED
+       if (object->io_tracking) {
+               /* Check if we're dealing with the kernel object. We do not support expedite on kernel object UPLs */
+               if (object != kernel_object) {
+                       io_tracking_flag |= UPL_CREATE_EXPEDITE_SUP;
+               }
+       }
+#endif
 
+       if (object->phys_contiguous) {
+               psize = PAGE_SIZE;
+       } else {
+               psize = size;
+
+               dw_count = 0;
+               dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
+               dwp_start = vm_page_delayed_work_get_ctx();
+               if (dwp_start == NULL) {
+                       dwp_start = &dw_array;
+                       dw_limit = 1;
+                       dwp_finish_ctx = FALSE;
+               }
 
-       if (object->phys_contiguous)
-               psize = PAGE_SIZE;
-       else
-               psize = size;
+               dwp = dwp_start;
+       }
 
        if (cntrl_flags & UPL_SET_INTERNAL) {
-               upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE, UPL_IO_WIRE, psize);
+               upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize);
 
                user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
                lite_list = (wpl_array_t) (((uintptr_t)user_page_list) +
-                                          ((psize / PAGE_SIZE) * sizeof(upl_page_info_t)));
+                   ((psize / PAGE_SIZE) * sizeof(upl_page_info_t)));
+               if (size == 0) {
+                       user_page_list = NULL;
+                       lite_list = NULL;
+               }
        } else {
-               upl = upl_create(UPL_CREATE_LITE, UPL_IO_WIRE, psize);
+               upl = upl_create(UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize);
 
                lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
+               if (size == 0) {
+                       lite_list = NULL;
+               }
+       }
+       if (user_page_list) {
+               user_page_list[0].device = FALSE;
        }
-       if (user_page_list)
-               user_page_list[0].device = FALSE;
        *upl_ptr = upl;
 
+       if (cntrl_flags & UPL_NOZEROFILLIO) {
+               DTRACE_VM4(upl_nozerofillio,
+                   vm_object_t, object,
+                   vm_object_offset_t, offset,
+                   upl_size_t, size,
+                   upl_t, upl);
+       }
+
        upl->map_object = object;
-       upl->size = size;
+       upl->u_offset = original_offset;
+       upl->u_size = original_size;
 
-       vm_object_lock(object);
-       vm_object_paging_begin(object);
+       size_in_pages = size / PAGE_SIZE;
+
+       if (object == kernel_object &&
+           !(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS))) {
+               upl->flags |= UPL_KERNEL_OBJECT;
+#if UPL_DEBUG
+               vm_object_lock(object);
+#else
+               vm_object_lock_shared(object);
+#endif
+       } else {
+               vm_object_lock(object);
+               vm_object_activity_begin(object);
+       }
        /*
         * paging in progress also protects the paging_offset
         */
-       upl->offset = offset + object->paging_offset;
+       upl->u_offset = original_offset + object->paging_offset;
 
-       if (object->phys_contiguous) {
-#ifdef UPL_DEBUG
+       if (cntrl_flags & UPL_BLOCK_ACCESS) {
+               /*
+                * The user requested that access to the pages in this UPL
+                * be blocked until the UPL is commited or aborted.
+                */
+               upl->flags |= UPL_ACCESS_BLOCKED;
+       }
+
+#if CONFIG_IOSCHED || UPL_DEBUG
+       if ((upl->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) {
+               vm_object_activity_begin(object);
                queue_enter(&object->uplq, upl, upl_t, uplq);
-#endif /* UPL_DEBUG */
+       }
+#endif
+
+       if (object->phys_contiguous) {
+               if (upl->flags & UPL_ACCESS_BLOCKED) {
+                       assert(!object->blocked_access);
+                       object->blocked_access = TRUE;
+               }
 
                vm_object_unlock(object);
 
@@ -4831,154 +9079,336 @@ vm_object_iopl_request(
                 */
                upl->flags |= UPL_DEVICE_MEMORY;
 
-               upl->highest_page = (offset + object->shadow_offset + size - 1)>>PAGE_SHIFT;
+               upl->highest_page = (ppnum_t) ((offset + object->vo_shadow_offset + size - 1) >> PAGE_SHIFT);
 
                if (user_page_list) {
-                       user_page_list[0].phys_addr = (offset + object->shadow_offset)>>PAGE_SHIFT;
+                       user_page_list[0].phys_addr = (ppnum_t) ((offset + object->vo_shadow_offset) >> PAGE_SHIFT);
                        user_page_list[0].device = TRUE;
                }
                if (page_list_count != NULL) {
-                       if (upl->flags & UPL_INTERNAL)
-                               *page_list_count = 0;
-                       else
-                               *page_list_count = 1;
+                       if (upl->flags & UPL_INTERNAL) {
+                               *page_list_count = 0;
+                       } else {
+                               *page_list_count = 1;
+                       }
                }
+
+               VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, KERN_SUCCESS, 0, 0);
+#if DEVELOPMENT || DEBUG
+               if (task != NULL) {
+                       ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count);
+               }
+#endif /* DEVELOPMENT || DEBUG */
                return KERN_SUCCESS;
        }
-       /*
-        * Protect user space from future COW operations
-        */
-       object->true_share = TRUE;
+       if (object != kernel_object && object != compressor_object) {
+               /*
+                * Protect user space from future COW operations
+                */
+#if VM_OBJECT_TRACKING_OP_TRUESHARE
+               if (!object->true_share &&
+                   vm_object_tracking_inited) {
+                       void *bt[VM_OBJECT_TRACKING_BTDEPTH];
+                       int num = 0;
+
+                       num = OSBacktrace(bt,
+                           VM_OBJECT_TRACKING_BTDEPTH);
+                       btlog_add_entry(vm_object_tracking_btlog,
+                           object,
+                           VM_OBJECT_TRACKING_OP_TRUESHARE,
+                           bt,
+                           num);
+               }
+#endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
 
-       if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
-               object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
+               vm_object_lock_assert_exclusive(object);
+               object->true_share = TRUE;
 
-#ifdef UPL_DEBUG
-       queue_enter(&object->uplq, upl, upl_t, uplq);
-#endif /* UPL_DEBUG */
+               if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
+                       object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
+               }
+       }
 
-       if (cntrl_flags & UPL_BLOCK_ACCESS) {
+       if (!(cntrl_flags & UPL_COPYOUT_FROM) &&
+           object->copy != VM_OBJECT_NULL) {
                /*
-                * The user requested that access to the pages in this URL
-                * be blocked until the UPL is commited or aborted.
+                * Honor copy-on-write obligations
+                *
+                * The caller is gathering these pages and
+                * might modify their contents.  We need to
+                * make sure that the copy object has its own
+                * private copies of these pages before we let
+                * the caller modify them.
+                *
+                * NOTE: someone else could map the original object
+                * after we've done this copy-on-write here, and they
+                * could then see an inconsistent picture of the memory
+                * while it's being modified via the UPL.  To prevent this,
+                * we would have to block access to these pages until the
+                * UPL is released.  We could use the UPL_BLOCK_ACCESS
+                * code path for that...
                 */
-               upl->flags |= UPL_ACCESS_BLOCKED;
+               vm_object_update(object,
+                   offset,
+                   size,
+                   NULL,
+                   NULL,
+                   FALSE,              /* should_return */
+                   MEMORY_OBJECT_COPY_SYNC,
+                   VM_PROT_NO_CHANGE);
+               VM_PAGEOUT_DEBUG(iopl_cow, 1);
+               VM_PAGEOUT_DEBUG(iopl_cow_pages, (size >> PAGE_SHIFT));
+       }
+       if (!(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS)) &&
+           object->purgable != VM_PURGABLE_VOLATILE &&
+           object->purgable != VM_PURGABLE_EMPTY &&
+           object->copy == NULL &&
+           size == object->vo_size &&
+           offset == 0 &&
+           object->shadow == NULL &&
+           object->pager == NULL) {
+               if (object->resident_page_count == size_in_pages) {
+                       assert(object != compressor_object);
+                       assert(object != kernel_object);
+                       fast_path_full_req = TRUE;
+               } else if (object->resident_page_count == 0) {
+                       assert(object != compressor_object);
+                       assert(object != kernel_object);
+                       fast_path_empty_req = TRUE;
+                       set_cache_attr_needed = TRUE;
+               }
+       }
+
+       if (cntrl_flags & UPL_SET_INTERRUPTIBLE) {
+               interruptible = THREAD_ABORTSAFE;
+       } else {
+               interruptible = THREAD_UNINT;
        }
+
        entry = 0;
 
        xfer_size = size;
        dst_offset = offset;
 
+       if (fast_path_full_req) {
+               if (vm_object_iopl_wire_full(object, upl, user_page_list, lite_list, cntrl_flags, tag) == TRUE) {
+                       goto finish;
+               }
+               /*
+                * we couldn't complete the processing of this request on the fast path
+                * so fall through to the slow path and finish up
+                */
+       } else if (fast_path_empty_req) {
+               if (cntrl_flags & UPL_REQUEST_NO_FAULT) {
+                       ret = KERN_MEMORY_ERROR;
+                       goto return_err;
+               }
+               ret = vm_object_iopl_wire_empty(object, upl, user_page_list, lite_list, cntrl_flags, tag, &dst_offset, size_in_pages, &page_grab_count);
+
+               if (ret) {
+                       free_wired_pages = TRUE;
+                       goto return_err;
+               }
+               goto finish;
+       }
+
        fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
-       fault_info.user_tag  = 0;
        fault_info.lo_offset = offset;
        fault_info.hi_offset = offset + xfer_size;
-       fault_info.no_cache  = FALSE;
+       fault_info.mark_zf_absent = TRUE;
+       fault_info.interruptible = interruptible;
+       fault_info.batch_pmap_op = TRUE;
 
        while (xfer_size) {
-               vm_fault_return_t       result;
-               int                     pg_num;
+               vm_fault_return_t       result;
+
+               dwp->dw_mask = 0;
 
+               if (fast_path_full_req) {
+                       /*
+                        * if we get here, it means that we ran into a page
+                        * state we couldn't handle in the fast path and
+                        * bailed out to the slow path... since the order
+                        * we look at pages is different between the 2 paths,
+                        * the following check is needed to determine whether
+                        * this page was already processed in the fast path
+                        */
+                       if (lite_list[entry >> 5] & (1 << (entry & 31))) {
+                               goto skip_page;
+                       }
+               }
                dst_page = vm_page_lookup(object, dst_offset);
 
-               /*
-                * ENCRYPTED SWAP:
-                * If the page is encrypted, we need to decrypt it,
-                * so force a soft page fault.
-                */
-               if ((dst_page == VM_PAGE_NULL) || (dst_page->busy) ||
-                   (dst_page->encrypted) ||
-                   (dst_page->unusual && (dst_page->error || 
-                                          dst_page->restart ||
-                                          dst_page->absent ||
-                                          dst_page->fictitious))) {
-
-                  do {
-                       vm_page_t       top_page;
-                       kern_return_t   error_code;
-                       int             interruptible;
-
-                       if (delayed_unlock) {
-                               delayed_unlock = 0;
-                               vm_page_unlock_queues();
-                       }
-                       if (cntrl_flags & UPL_SET_INTERRUPTIBLE)
-                               interruptible = THREAD_ABORTSAFE;
-                       else
-                               interruptible = THREAD_UNINT;
-
-                       fault_info.interruptible = interruptible;
-                       fault_info.cluster_size = xfer_size;
-
-                       result = vm_fault_page(object, dst_offset,
-                                              prot | VM_PROT_WRITE, FALSE, 
-                                              &prot, &dst_page, &top_page,
-                                              (int *)0,
-                                              &error_code, no_zero_fill,
-                                              FALSE, &fault_info);
-
-                       switch (result) {
-
-                       case VM_FAULT_SUCCESS:
-
-                               PAGE_WAKEUP_DONE(dst_page);
-                               /*
-                                *      Release paging references and
-                                *      top-level placeholder page, if any.
-                                */
-                               if (top_page != VM_PAGE_NULL) {
-                                       vm_object_t local_object;
+               if (dst_page == VM_PAGE_NULL ||
+                   dst_page->vmp_busy ||
+                   dst_page->vmp_error ||
+                   dst_page->vmp_restart ||
+                   dst_page->vmp_absent ||
+                   dst_page->vmp_fictitious) {
+                       if (object == kernel_object) {
+                               panic("vm_object_iopl_request: missing/bad page in kernel object\n");
+                       }
+                       if (object == compressor_object) {
+                               panic("vm_object_iopl_request: missing/bad page in compressor object\n");
+                       }
+
+                       if (cntrl_flags & UPL_REQUEST_NO_FAULT) {
+                               ret = KERN_MEMORY_ERROR;
+                               goto return_err;
+                       }
+                       set_cache_attr_needed = TRUE;
+
+                       /*
+                        * We just looked up the page and the result remains valid
+                        * until the object lock is release, so send it to
+                        * vm_fault_page() (as "dst_page"), to avoid having to
+                        * look it up again there.
+                        */
+                       caller_lookup = TRUE;
+
+                       do {
+                               vm_page_t       top_page;
+                               kern_return_t   error_code;
+
+                               fault_info.cluster_size = xfer_size;
 
-                                       local_object = top_page->object;
+                               vm_object_paging_begin(object);
+
+                               result = vm_fault_page(object, dst_offset,
+                                   prot | VM_PROT_WRITE, FALSE,
+                                   caller_lookup,
+                                   &prot, &dst_page, &top_page,
+                                   (int *)0,
+                                   &error_code, no_zero_fill,
+                                   FALSE, &fault_info);
+
+                               /* our lookup is no longer valid at this point */
+                               caller_lookup = FALSE;
+
+                               switch (result) {
+                               case VM_FAULT_SUCCESS:
+                                       page_grab_count++;
 
-                                       if (top_page->object != dst_page->object) {
-                                               vm_object_lock(local_object);
-                                               VM_PAGE_FREE(top_page);
-                                               vm_object_paging_end(local_object);
-                                               vm_object_unlock(local_object);
+                                       if (!dst_page->vmp_absent) {
+                                               PAGE_WAKEUP_DONE(dst_page);
                                        } else {
-                                               VM_PAGE_FREE(top_page);
-                                               vm_object_paging_end(local_object);
+                                               /*
+                                                * we only get back an absent page if we
+                                                * requested that it not be zero-filled
+                                                * because we are about to fill it via I/O
+                                                *
+                                                * absent pages should be left BUSY
+                                                * to prevent them from being faulted
+                                                * into an address space before we've
+                                                * had a chance to complete the I/O on
+                                                * them since they may contain info that
+                                                * shouldn't be seen by the faulting task
+                                                */
                                        }
-                               }
-                               break;
-                       
-                       case VM_FAULT_RETRY:
-                               vm_object_lock(object);
-                               vm_object_paging_begin(object);
-                               break;
+                                       /*
+                                        *      Release paging references and
+                                        *      top-level placeholder page, if any.
+                                        */
+                                       if (top_page != VM_PAGE_NULL) {
+                                               vm_object_t local_object;
 
-                       case VM_FAULT_FICTITIOUS_SHORTAGE:
-                               vm_page_more_fictitious();
+                                               local_object = VM_PAGE_OBJECT(top_page);
 
-                               vm_object_lock(object);
-                               vm_object_paging_begin(object);
-                               break;
+                                               /*
+                                                * comparing 2 packed pointers
+                                                */
+                                               if (top_page->vmp_object != dst_page->vmp_object) {
+                                                       vm_object_lock(local_object);
+                                                       VM_PAGE_FREE(top_page);
+                                                       vm_object_paging_end(local_object);
+                                                       vm_object_unlock(local_object);
+                                               } else {
+                                                       VM_PAGE_FREE(top_page);
+                                                       vm_object_paging_end(local_object);
+                                               }
+                                       }
+                                       vm_object_paging_end(object);
+                                       break;
 
-                       case VM_FAULT_MEMORY_SHORTAGE:
-                               if (vm_page_wait(interruptible)) {
+                               case VM_FAULT_RETRY:
                                        vm_object_lock(object);
-                                       vm_object_paging_begin(object);
                                        break;
+
+                               case VM_FAULT_MEMORY_SHORTAGE:
+                                       OSAddAtomic((size_in_pages - entry), &vm_upl_wait_for_pages);
+
+                                       VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
+
+                                       if (vm_page_wait(interruptible)) {
+                                               OSAddAtomic(-(size_in_pages - entry), &vm_upl_wait_for_pages);
+
+                                               VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
+                                               vm_object_lock(object);
+
+                                               break;
+                                       }
+                                       OSAddAtomic(-(size_in_pages - entry), &vm_upl_wait_for_pages);
+
+                                       VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1);
+
+                                       OS_FALLTHROUGH;
+
+                               case VM_FAULT_INTERRUPTED:
+                                       error_code = MACH_SEND_INTERRUPTED;
+                                       OS_FALLTHROUGH;
+                               case VM_FAULT_MEMORY_ERROR:
+memory_error:
+                                       ret = (error_code ? error_code: KERN_MEMORY_ERROR);
+
+                                       vm_object_lock(object);
+                                       goto return_err;
+
+                               case VM_FAULT_SUCCESS_NO_VM_PAGE:
+                                       /* success but no page: fail */
+                                       vm_object_paging_end(object);
+                                       vm_object_unlock(object);
+                                       goto memory_error;
+
+                               default:
+                                       panic("vm_object_iopl_request: unexpected error"
+                                           " 0x%x from vm_fault_page()\n", result);
                                }
-                               /* fall thru */
+                       } while (result != VM_FAULT_SUCCESS);
+               }
+               phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
 
-                       case VM_FAULT_INTERRUPTED:
-                               error_code = MACH_SEND_INTERRUPTED;
-                       case VM_FAULT_MEMORY_ERROR:
-                               ret = (error_code ? error_code: KERN_MEMORY_ERROR);
+               if (upl->flags & UPL_KERNEL_OBJECT) {
+                       goto record_phys_addr;
+               }
 
-                               vm_object_lock(object);
-                               vm_object_paging_begin(object);
-                               goto return_err;
-                       }
-                  } while (result != VM_FAULT_SUCCESS);
+               if (dst_page->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
+                       dst_page->vmp_busy = TRUE;
+                       goto record_phys_addr;
                }
 
-               if ( (cntrl_flags & UPL_NEED_32BIT_ADDR) &&
-                    dst_page->phys_page >= (max_valid_dma_address >> PAGE_SHIFT) ) {
-                       vm_page_t       low_page;
-                       int             refmod;
+               if (dst_page->vmp_cleaning) {
+                       /*
+                        * Someone else is cleaning this page in place.
+                        * In theory, we should be able to  proceed and use this
+                        * page but they'll probably end up clearing the "busy"
+                        * bit on it in upl_commit_range() but they didn't set
+                        * it, so they would clear our "busy" bit and open
+                        * us to race conditions.
+                        * We'd better wait for the cleaning to complete and
+                        * then try again.
+                        */
+                       VM_PAGEOUT_DEBUG(vm_object_iopl_request_sleep_for_cleaning, 1);
+                       PAGE_SLEEP(object, dst_page, THREAD_UNINT);
+                       continue;
+               }
+               if (dst_page->vmp_laundry) {
+                       vm_pageout_steal_laundry(dst_page, FALSE);
+               }
+
+               if ((cntrl_flags & UPL_NEED_32BIT_ADDR) &&
+                   phys_page >= (max_valid_dma_address >> PAGE_SHIFT)) {
+                       vm_page_t       low_page;
+                       int             refmod;
 
                        /*
                         * support devices that can't DMA above 32 bits
@@ -4988,18 +9418,14 @@ vm_object_iopl_request(
                         * we don't know whether that physical address has been
                         * handed out to some other 64 bit capable DMA device to use
                         */
-                       if (dst_page->wire_count) {
-                               ret = KERN_PROTECTION_FAILURE;
+                       if (VM_PAGE_WIRED(dst_page)) {
+                               ret = KERN_PROTECTION_FAILURE;
                                goto return_err;
                        }
-                       if (delayed_unlock) {
-                               delayed_unlock = 0;
-                               vm_page_unlock_queues();
-                       }
                        low_page = vm_page_grablo();
 
                        if (low_page == VM_PAGE_NULL) {
-                               ret = KERN_RESOURCE_SHORTAGE;
+                               ret = KERN_RESOURCE_SHORTAGE;
                                goto return_err;
                        }
                        /*
@@ -5009,27 +9435,28 @@ vm_object_iopl_request(
                         * it after we disconnect it... we want the fault
                         * to find the new page being substituted.
                         */
-                       if (dst_page->pmapped)
-                               refmod = pmap_disconnect(dst_page->phys_page);
-                       else
-                               refmod = 0;
-                       vm_page_copy(dst_page, low_page);
-                 
-                       low_page->reference = dst_page->reference;
-                       low_page->dirty     = dst_page->dirty;
-
-                       if (refmod & VM_MEM_REFERENCED)
-                               low_page->reference = TRUE;
-                       if (refmod & VM_MEM_MODIFIED)
-                               low_page->dirty = TRUE;
+                       if (dst_page->vmp_pmapped) {
+                               refmod = pmap_disconnect(phys_page);
+                       } else {
+                               refmod = 0;
+                       }
+
+                       if (!dst_page->vmp_absent) {
+                               vm_page_copy(dst_page, low_page);
+                       }
+
+                       low_page->vmp_reference = dst_page->vmp_reference;
+                       low_page->vmp_dirty     = dst_page->vmp_dirty;
+                       low_page->vmp_absent    = dst_page->vmp_absent;
+
+                       if (refmod & VM_MEM_REFERENCED) {
+                               low_page->vmp_reference = TRUE;
+                       }
+                       if (refmod & VM_MEM_MODIFIED) {
+                               SET_PAGE_DIRTY(low_page, FALSE);
+                       }
 
-                       vm_page_lock_queues();
                        vm_page_replace(low_page, object, dst_offset);
-                       /*
-                        * keep the queue lock since we're going to 
-                        * need it immediately
-                        */
-                       delayed_unlock = 1;
 
                        dst_page = low_page;
                        /*
@@ -5037,74 +9464,129 @@ vm_object_iopl_request(
                         * BUSY... we don't need a PAGE_WAKEUP_DONE
                         * here, because we've never dropped the object lock
                         */
-                       dst_page->busy = FALSE;
-               }
-               if (delayed_unlock == 0)
-                       vm_page_lock_queues();
+                       if (!dst_page->vmp_absent) {
+                               dst_page->vmp_busy = FALSE;
+                       }
 
-               vm_page_wire(dst_page);
+                       phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
+               }
+               if (!dst_page->vmp_busy) {
+                       dwp->dw_mask |= DW_vm_page_wire;
+               }
 
                if (cntrl_flags & UPL_BLOCK_ACCESS) {
                        /*
                         * Mark the page "busy" to block any future page fault
-                        * on this page.  We'll also remove the mapping
+                        * on this page in addition to wiring it.
+                        * We'll also remove the mapping
                         * of all these pages before leaving this routine.
                         */
-                       assert(!dst_page->fictitious);
-                       dst_page->busy = TRUE;
+                       assert(!dst_page->vmp_fictitious);
+                       dst_page->vmp_busy = TRUE;
                }
-               pg_num = (dst_offset-offset)/PAGE_SIZE;
-               lite_list[pg_num>>5] |= 1 << (pg_num & 31);
-
                /*
                 * expect the page to be used
                 * page queues lock must be held to set 'reference'
                 */
-               dst_page->reference = TRUE;
+               dwp->dw_mask |= DW_set_reference;
 
-               if (!(cntrl_flags & UPL_COPYOUT_FROM))
-                       dst_page->dirty = TRUE;
+               if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
+                       SET_PAGE_DIRTY(dst_page, TRUE);
+                       /*
+                        * Page belonging to a code-signed object is about to
+                        * be written. Mark it tainted and disconnect it from
+                        * all pmaps so processes have to fault it back in and
+                        * deal with the tainted bit.
+                        */
+                       if (object->code_signed && dst_page->vmp_cs_tainted != VMP_CS_ALL_TRUE) {
+                               dst_page->vmp_cs_tainted = VMP_CS_ALL_TRUE;
+                               vm_page_iopl_tainted++;
+                               if (dst_page->vmp_pmapped) {
+                                       int refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
+                                       if (refmod & VM_MEM_REFERENCED) {
+                                               dst_page->vmp_reference = TRUE;
+                                       }
+                               }
+                       }
+               }
+               if ((cntrl_flags & UPL_REQUEST_FORCE_COHERENCY) && dst_page->vmp_written_by_kernel == TRUE) {
+                       pmap_sync_page_attributes_phys(phys_page);
+                       dst_page->vmp_written_by_kernel = FALSE;
+               }
+
+record_phys_addr:
+               if (dst_page->vmp_busy) {
+                       upl->flags |= UPL_HAS_BUSY;
+               }
 
-               if (dst_page->phys_page > upl->highest_page)
-                       upl->highest_page = dst_page->phys_page;
+               lite_list[entry >> 5] |= 1U << (entry & 31);
 
-               if (user_page_list) {
-                       user_page_list[entry].phys_addr = dst_page->phys_page;
-                       user_page_list[entry].pageout   = dst_page->pageout;
-                       user_page_list[entry].absent    = dst_page->absent;
-                       user_page_list[entry].dirty     = dst_page->dirty;
-                       user_page_list[entry].precious  = dst_page->precious;
-                       user_page_list[entry].device    = FALSE;
-                       if (dst_page->clustered == TRUE)
-                               user_page_list[entry].speculative = dst_page->speculative;
-                       else
-                               user_page_list[entry].speculative = FALSE;
-                       user_page_list[entry].cs_validated = dst_page->cs_validated;
-                       user_page_list[entry].cs_tainted = dst_page->cs_tainted;
+               if (phys_page > upl->highest_page) {
+                       upl->highest_page = phys_page;
                }
-               /*
-                * someone is explicitly grabbing this page...
-                * update clustered and speculative state
-                * 
-                */
-               VM_PAGE_CONSUME_CLUSTERED(dst_page);
 
-               if (delayed_unlock++ > UPL_DELAYED_UNLOCK_LIMIT) {
-                       mutex_yield(&vm_page_queue_lock);
-                       delayed_unlock = 1;
+               if (user_page_list) {
+                       user_page_list[entry].phys_addr = phys_page;
+                       user_page_list[entry].free_when_done    = dst_page->vmp_free_when_done;
+                       user_page_list[entry].absent    = dst_page->vmp_absent;
+                       user_page_list[entry].dirty     = dst_page->vmp_dirty;
+                       user_page_list[entry].precious  = dst_page->vmp_precious;
+                       user_page_list[entry].device    = FALSE;
+                       user_page_list[entry].needed    = FALSE;
+                       if (dst_page->vmp_clustered == TRUE) {
+                               user_page_list[entry].speculative = (dst_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ? TRUE : FALSE;
+                       } else {
+                               user_page_list[entry].speculative = FALSE;
+                       }
+                       user_page_list[entry].cs_validated = dst_page->vmp_cs_validated;
+                       user_page_list[entry].cs_tainted = dst_page->vmp_cs_tainted;
+                       user_page_list[entry].cs_nx = dst_page->vmp_cs_nx;
+                       user_page_list[entry].mark      = FALSE;
+               }
+               if (object != kernel_object && object != compressor_object) {
+                       /*
+                        * someone is explicitly grabbing this page...
+                        * update clustered and speculative state
+                        *
+                        */
+                       if (dst_page->vmp_clustered) {
+                               VM_PAGE_CONSUME_CLUSTERED(dst_page);
+                       }
                }
+skip_page:
                entry++;
                dst_offset += PAGE_SIZE_64;
                xfer_size -= PAGE_SIZE;
+
+               if (dwp->dw_mask) {
+                       VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count);
+
+                       if (dw_count >= dw_limit) {
+                               vm_page_do_delayed_work(object, tag, dwp_start, dw_count);
+
+                               dwp = dwp_start;
+                               dw_count = 0;
+                       }
+               }
+       }
+       assert(entry == size_in_pages);
+
+       if (dw_count) {
+               vm_page_do_delayed_work(object, tag, dwp_start, dw_count);
+               dwp = dwp_start;
+               dw_count = 0;
+       }
+finish:
+       if (user_page_list && set_cache_attr_needed == TRUE) {
+               vm_object_set_pmap_cache_attr(object, user_page_list, size_in_pages, TRUE);
        }
-       if (delayed_unlock)
-               vm_page_unlock_queues();
 
        if (page_list_count != NULL) {
-               if (upl->flags & UPL_INTERNAL)
+               if (upl->flags & UPL_INTERNAL) {
                        *page_list_count = 0;
-               else if (*page_list_count > entry)
-                       *page_list_count = entry;
+               } else if (*page_list_count > size_in_pages) {
+                       *page_list_count = size_in_pages;
+               }
        }
        vm_object_unlock(object);
 
@@ -5116,46 +9598,120 @@ vm_object_iopl_request(
                 * can't be accessed without causing a page fault.
                 */
                vm_object_pmap_protect(object, offset, (vm_object_size_t)size,
-                                      PMAP_NULL, 0, VM_PROT_NONE);
+                   PMAP_NULL,
+                   PAGE_SIZE,
+                   0, VM_PROT_NONE);
+               assert(!object->blocked_access);
+               object->blocked_access = TRUE;
+       }
+
+       VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, KERN_SUCCESS, 0, 0);
+#if DEVELOPMENT || DEBUG
+       if (task != NULL) {
+               ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count);
        }
+#endif /* DEVELOPMENT || DEBUG */
+
+       if (dwp_start && dwp_finish_ctx) {
+               vm_page_delayed_work_finish_ctx(dwp_start);
+               dwp_start = dwp = NULL;
+       }
+
        return KERN_SUCCESS;
 
 return_err:
-       if (delayed_unlock)
-               vm_page_unlock_queues();
+       dw_index = 0;
 
        for (; offset < dst_offset; offset += PAGE_SIZE) {
-               dst_page = vm_page_lookup(object, offset);
+               boolean_t need_unwire;
 
-               if (dst_page == VM_PAGE_NULL)
-                       panic("vm_object_iopl_request: Wired pages missing. \n");
+               dst_page = vm_page_lookup(object, offset);
 
-               vm_page_lockspin_queues();
-               vm_page_unwire(dst_page);
+               if (dst_page == VM_PAGE_NULL) {
+                       panic("vm_object_iopl_request: Wired page missing. \n");
+               }
+
+               /*
+                * if we've already processed this page in an earlier
+                * dw_do_work, we need to undo the wiring... we will
+                * leave the dirty and reference bits on if they
+                * were set, since we don't have a good way of knowing
+                * what the previous state was and we won't get here
+                * under any normal circumstances...  we will always
+                * clear BUSY and wakeup any waiters via vm_page_free
+                * or PAGE_WAKEUP_DONE
+                */
+               need_unwire = TRUE;
+
+               if (dw_count) {
+                       if ((dwp_start)[dw_index].dw_m == dst_page) {
+                               /*
+                                * still in the deferred work list
+                                * which means we haven't yet called
+                                * vm_page_wire on this page
+                                */
+                               need_unwire = FALSE;
+
+                               dw_index++;
+                               dw_count--;
+                       }
+               }
+               vm_page_lock_queues();
+
+               if (dst_page->vmp_absent || free_wired_pages == TRUE) {
+                       vm_page_free(dst_page);
+
+                       need_unwire = FALSE;
+               } else {
+                       if (need_unwire == TRUE) {
+                               vm_page_unwire(dst_page, TRUE);
+                       }
+
+                       PAGE_WAKEUP_DONE(dst_page);
+               }
                vm_page_unlock_queues();
 
-               VM_STAT_INCR(reactivations);
+               if (need_unwire == TRUE) {
+                       VM_STAT_INCR(reactivations);
+               }
+       }
+#if UPL_DEBUG
+       upl->upl_state = 2;
+#endif
+       if (!(upl->flags & UPL_KERNEL_OBJECT)) {
+               vm_object_activity_end(object);
+               vm_object_collapse(object, 0, TRUE);
        }
-       vm_object_paging_end(object);
        vm_object_unlock(object);
        upl_destroy(upl);
 
+       VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, ret, 0, 0);
+#if DEVELOPMENT || DEBUG
+       if (task != NULL) {
+               ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count);
+       }
+#endif /* DEVELOPMENT || DEBUG */
+
+       if (dwp_start && dwp_finish_ctx) {
+               vm_page_delayed_work_finish_ctx(dwp_start);
+               dwp_start = dwp = NULL;
+       }
        return ret;
 }
 
 kern_return_t
 upl_transpose(
-       upl_t           upl1,
-       upl_t           upl2)
+       upl_t           upl1,
+       upl_t           upl2)
 {
-       kern_return_t           retval;
-       boolean_t               upls_locked;
-       vm_object_t             object1, object2;
+       kern_return_t           retval;
+       boolean_t               upls_locked;
+       vm_object_t             object1, object2;
 
-       if (upl1 == UPL_NULL || upl2 == UPL_NULL || upl1 == upl2) {
+       if (upl1 == UPL_NULL || upl2 == UPL_NULL || upl1 == upl2 || ((upl1->flags & UPL_VECTOR) == UPL_VECTOR) || ((upl2->flags & UPL_VECTOR) == UPL_VECTOR)) {
                return KERN_INVALID_ARGUMENT;
        }
-       
+
        upls_locked = FALSE;
 
        /*
@@ -5169,13 +9725,13 @@ upl_transpose(
                upl_lock(upl2);
                upl_lock(upl1);
        }
-       upls_locked = TRUE;     /* the UPLs will need to be unlocked */
+       upls_locked = TRUE;     /* the UPLs will need to be unlocked */
 
        object1 = upl1->map_object;
        object2 = upl2->map_object;
 
-       if (upl1->offset != 0 || upl2->offset != 0 ||
-           upl1->size != upl2->size) {
+       if (upl1->u_offset != 0 || upl2->u_offset != 0 ||
+           upl1->u_size != upl2->u_size) {
                /*
                 * We deal only with full objects, not subsets.
                 * That's because we exchange the entire backing store info
@@ -5190,22 +9746,39 @@ upl_transpose(
         * Tranpose the VM objects' backing store.
         */
        retval = vm_object_transpose(object1, object2,
-                                    (vm_object_size_t) upl1->size);
+           upl_adjusted_size(upl1, PAGE_MASK));
 
        if (retval == KERN_SUCCESS) {
                /*
                 * Make each UPL point to the correct VM object, i.e. the
                 * object holding the pages that the UPL refers to...
                 */
-#ifdef UPL_DEBUG
-               queue_remove(&object1->uplq, upl1, upl_t, uplq);
-               queue_remove(&object2->uplq, upl2, upl_t, uplq);
+#if CONFIG_IOSCHED || UPL_DEBUG
+               if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || (upl2->flags & UPL_TRACKED_BY_OBJECT)) {
+                       vm_object_lock(object1);
+                       vm_object_lock(object2);
+               }
+               if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) {
+                       queue_remove(&object1->uplq, upl1, upl_t, uplq);
+               }
+               if ((upl2->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) {
+                       queue_remove(&object2->uplq, upl2, upl_t, uplq);
+               }
 #endif
                upl1->map_object = object2;
                upl2->map_object = object1;
-#ifdef UPL_DEBUG
-               queue_enter(&object1->uplq, upl2, upl_t, uplq);
-               queue_enter(&object2->uplq, upl1, upl_t, uplq);
+
+#if CONFIG_IOSCHED || UPL_DEBUG
+               if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) {
+                       queue_enter(&object2->uplq, upl1, upl_t, uplq);
+               }
+               if ((upl2->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) {
+                       queue_enter(&object1->uplq, upl2, upl_t, uplq);
+               }
+               if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || (upl2->flags & UPL_TRACKED_BY_OBJECT)) {
+                       vm_object_unlock(object2);
+                       vm_object_unlock(object1);
+               }
 #endif
        }
 
@@ -5222,68 +9795,59 @@ done:
        return retval;
 }
 
+void
+upl_range_needed(
+       upl_t           upl,
+       int             index,
+       int             count)
+{
+       upl_page_info_t *user_page_list;
+       int             size_in_pages;
+
+       if (!(upl->flags & UPL_INTERNAL) || count <= 0) {
+               return;
+       }
+
+       size_in_pages = upl_adjusted_size(upl, PAGE_MASK) / PAGE_SIZE;
+
+       user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
+
+       while (count-- && index < size_in_pages) {
+               user_page_list[index++].needed = TRUE;
+       }
+}
+
+
 /*
- * ENCRYPTED SWAP:
- *
- * Rationale:  the user might have some encrypted data on disk (via
- * FileVault or any other mechanism).  That data is then decrypted in
- * memory, which is safe as long as the machine is secure.  But that
- * decrypted data in memory could be paged out to disk by the default
- * pager.  The data would then be stored on disk in clear (not encrypted)
- * and it could be accessed by anyone who gets physical access to the
- * disk (if the laptop or the disk gets stolen for example).  This weakens
- * the security offered by FileVault.
- *
- * Solution:  the default pager will optionally request that all the
- * pages it gathers for pageout be encrypted, via the UPL interfaces,
- * before it sends this UPL to disk via the vnode_pageout() path.
- * 
- * Notes:
- * 
- * To avoid disrupting the VM LRU algorithms, we want to keep the
- * clean-in-place mechanisms, which allow us to send some extra pages to 
- * swap (clustering) without actually removing them from the user's
- * address space.  We don't want the user to unknowingly access encrypted
- * data, so we have to actually remove the encrypted pages from the page
- * table.  When the user accesses the data, the hardware will fail to
- * locate the virtual page in its page table and will trigger a page
- * fault.  We can then decrypt the page and enter it in the page table
- * again.  Whenever we allow the user to access the contents of a page,
- * we have to make sure it's not encrypted.
- *
- * 
- */
-/*
- * ENCRYPTED SWAP:
  * Reserve of virtual addresses in the kernel address space.
  * We need to map the physical pages in the kernel, so that we
- * can call the encryption/decryption routines with a kernel
+ * can call the code-signing or slide routines with a kernel
  * virtual address.  We keep this pool of pre-allocated kernel
  * virtual addresses so that we don't have to scan the kernel's
- * virtaul address space each time we need to encrypt or decrypt
+ * virtaul address space each time we need to work with
  * a physical page.
- * It would be nice to be able to encrypt and decrypt in physical
- * mode but that might not always be more efficient...
  */
-decl_simple_lock_data(,vm_paging_lock)
-#define VM_PAGING_NUM_PAGES    64
+SIMPLE_LOCK_DECLARE(vm_paging_lock, 0);
+#define VM_PAGING_NUM_PAGES     64
 vm_map_offset_t vm_paging_base_address = 0;
-boolean_t      vm_paging_page_inuse[VM_PAGING_NUM_PAGES] = { FALSE, };
-int            vm_paging_max_index = 0;
-int            vm_paging_page_waiter = 0;
-int            vm_paging_page_waiter_total = 0;
-unsigned long  vm_paging_no_kernel_page = 0;
-unsigned long  vm_paging_objects_mapped = 0;
-unsigned long  vm_paging_pages_mapped = 0;
-unsigned long  vm_paging_objects_mapped_slow = 0;
-unsigned long  vm_paging_pages_mapped_slow = 0;
-
+boolean_t       vm_paging_page_inuse[VM_PAGING_NUM_PAGES] = { FALSE, };
+int             vm_paging_max_index = 0;
+int             vm_paging_page_waiter = 0;
+int             vm_paging_page_waiter_total = 0;
+
+unsigned long   vm_paging_no_kernel_page = 0;
+unsigned long   vm_paging_objects_mapped = 0;
+unsigned long   vm_paging_pages_mapped = 0;
+unsigned long   vm_paging_objects_mapped_slow = 0;
+unsigned long   vm_paging_pages_mapped_slow = 0;
+
+__startup_func
 void
 vm_paging_map_init(void)
 {
-       kern_return_t   kr;
-       vm_map_offset_t page_map_offset;
-       vm_map_entry_t  map_entry;
+       kern_return_t   kr;
+       vm_map_offset_t page_map_offset;
+       vm_map_entry_t  map_entry;
 
        assert(vm_paging_base_address == 0);
 
@@ -5293,17 +9857,21 @@ vm_paging_map_init(void)
         */
        page_map_offset = 0;
        kr = vm_map_find_space(kernel_map,
-                              &page_map_offset,
-                              VM_PAGING_NUM_PAGES * PAGE_SIZE,
-                              0,
-                              0,
-                              &map_entry);
+           &page_map_offset,
+           VM_PAGING_NUM_PAGES * PAGE_SIZE,
+           0,
+           0,
+           VM_MAP_KERNEL_FLAGS_NONE,
+           VM_KERN_MEMORY_NONE,
+           &map_entry);
        if (kr != KERN_SUCCESS) {
                panic("vm_paging_map_init: kernel_map full\n");
        }
-       map_entry->object.vm_object = kernel_object;
-       map_entry->offset =
-               page_map_offset - VM_MIN_KERNEL_ADDRESS;
+       VME_OBJECT_SET(map_entry, kernel_object);
+       VME_OFFSET_SET(map_entry, page_map_offset);
+       map_entry->protection = VM_PROT_NONE;
+       map_entry->max_protection = VM_PROT_NONE;
+       map_entry->permanent = TRUE;
        vm_object_reference(kernel_object);
        vm_map_unlock(kernel_map);
 
@@ -5312,45 +9880,50 @@ vm_paging_map_init(void)
 }
 
 /*
- * ENCRYPTED SWAP:
  * vm_paging_map_object:
  *     Maps part of a VM object's pages in the kernel
- *     virtual address space, using the pre-allocated
+ *      virtual address space, using the pre-allocated
  *     kernel virtual addresses, if possible.
  * Context:
- *     The VM object is locked.  This lock will get
- *     dropped and re-acquired though, so the caller
- *     must make sure the VM object is kept alive
+ *      The VM object is locked.  This lock will get
+ *      dropped and re-acquired though, so the caller
+ *      must make sure the VM object is kept alive
  *     (by holding a VM map that has a reference
- *     on it, for example, or taking an extra reference).
- *     The page should also be kept busy to prevent
+ *      on it, for example, or taking an extra reference).
+ *      The page should also be kept busy to prevent
  *     it from being reclaimed.
  */
 kern_return_t
 vm_paging_map_object(
-       vm_map_offset_t         *address,
-       vm_page_t               page,
-       vm_object_t             object,
-       vm_object_offset_t      offset,
-       vm_map_size_t           *size,
-       vm_prot_t               protection,
-       boolean_t               can_unlock_object)
+       vm_page_t               page,
+       vm_object_t             object,
+       vm_object_offset_t      offset,
+       vm_prot_t               protection,
+       boolean_t               can_unlock_object,
+       vm_map_size_t           *size,          /* IN/OUT */
+       vm_map_offset_t         *address,       /* OUT */
+       boolean_t               *need_unmap)    /* OUT */
 {
-       kern_return_t           kr;
-       vm_map_offset_t         page_map_offset;
-       vm_map_size_t           map_size;
-       vm_object_offset_t      object_offset;
-       int                     i;
+       kern_return_t           kr;
+       vm_map_offset_t         page_map_offset;
+       vm_map_size_t           map_size;
+       vm_object_offset_t      object_offset;
+       int                     i;
 
-       
        if (page != VM_PAGE_NULL && *size == PAGE_SIZE) {
-               assert(page->busy);
+               /* use permanent 1-to-1 kernel mapping of physical memory ? */
+               *address = (vm_map_offset_t)
+                   phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(page) << PAGE_SHIFT);
+               *need_unmap = FALSE;
+               return KERN_SUCCESS;
+
+               assert(page->vmp_busy);
                /*
                 * Use one of the pre-allocated kernel virtual addresses
                 * and just enter the VM page in the kernel address space
                 * at that virtual address.
                 */
-               simple_lock(&vm_paging_lock);
+               simple_lock(&vm_paging_lock, &vm_pageout_lck_grp);
 
                /*
                 * Try and find an available kernel virtual address
@@ -5361,8 +9934,8 @@ vm_paging_map_object(
                        for (i = 0; i < VM_PAGING_NUM_PAGES; i++) {
                                if (vm_paging_page_inuse[i] == FALSE) {
                                        page_map_offset =
-                                               vm_paging_base_address +
-                                               (i * PAGE_SIZE);
+                                           vm_paging_base_address +
+                                           (i * PAGE_SIZE);
                                        break;
                                }
                        }
@@ -5384,9 +9957,12 @@ vm_paging_map_object(
                         */
                        vm_paging_page_waiter_total++;
                        vm_paging_page_waiter++;
-                       thread_sleep_fast_usimple_lock(&vm_paging_page_waiter,
-                                                      &vm_paging_lock,
-                                                      THREAD_UNINT);
+                       kr = assert_wait((event_t)&vm_paging_page_waiter, THREAD_UNINT);
+                       if (kr == THREAD_WAITING) {
+                               simple_unlock(&vm_paging_lock);
+                               kr = thread_block(THREAD_CONTINUE_NULL);
+                               simple_lock(&vm_paging_lock, &vm_pageout_lck_grp);
+                       }
                        vm_paging_page_waiter--;
                        /* ... and try again */
                }
@@ -5402,27 +9978,31 @@ vm_paging_map_object(
                        vm_paging_page_inuse[i] = TRUE;
                        simple_unlock(&vm_paging_lock);
 
-                       if (page->pmapped == FALSE) {
-                               pmap_sync_page_data_phys(page->phys_page);
-                       }
-                       page->pmapped = TRUE;
+                       page->vmp_pmapped = TRUE;
 
                        /*
                         * Keep the VM object locked over the PMAP_ENTER
                         * and the actual use of the page by the kernel,
-                        * or this pmap mapping might get undone by a 
+                        * or this pmap mapping might get undone by a
                         * vm_object_pmap_protect() call...
                         */
                        PMAP_ENTER(kernel_pmap,
-                                  page_map_offset,
-                                  page,
-                                  protection,
-                                  ((int) page->object->wimg_bits &
-                                   VM_WIMG_MASK),
-                                  TRUE);
+                           page_map_offset,
+                           page,
+                           protection,
+                           VM_PROT_NONE,
+                           0,
+                           TRUE,
+                           kr);
+                       assert(kr == KERN_SUCCESS);
                        vm_paging_objects_mapped++;
-                       vm_paging_pages_mapped++; 
+                       vm_paging_pages_mapped++;
                        *address = page_map_offset;
+                       *need_unmap = TRUE;
+
+#if KASAN
+                       kasan_notify_address(page_map_offset, PAGE_SIZE);
+#endif
 
                        /* all done and mapped, ready to use ! */
                        return KERN_SUCCESS;
@@ -5437,36 +10017,43 @@ vm_paging_map_object(
                simple_unlock(&vm_paging_lock);
        }
 
-       if (! can_unlock_object) {
+       if (!can_unlock_object) {
+               *address = 0;
+               *size = 0;
+               *need_unmap = FALSE;
                return KERN_NOT_SUPPORTED;
        }
 
        object_offset = vm_object_trunc_page(offset);
-       map_size = vm_map_round_page(*size);
+       map_size = vm_map_round_page(*size,
+           VM_MAP_PAGE_MASK(kernel_map));
 
        /*
         * Try and map the required range of the object
         * in the kernel_map
         */
 
-       vm_object_reference_locked(object);     /* for the map entry */
+       vm_object_reference_locked(object);     /* for the map entry */
        vm_object_unlock(object);
 
        kr = vm_map_enter(kernel_map,
-                         address,
-                         map_size,
-                         0,
-                         VM_FLAGS_ANYWHERE,
-                         object,
-                         object_offset,
-                         FALSE,
-                         protection,
-                         VM_PROT_ALL,
-                         VM_INHERIT_NONE);
+           address,
+           map_size,
+           0,
+           VM_FLAGS_ANYWHERE,
+           VM_MAP_KERNEL_FLAGS_NONE,
+           VM_KERN_MEMORY_NONE,
+           object,
+           object_offset,
+           FALSE,
+           protection,
+           VM_PROT_ALL,
+           VM_INHERIT_NONE);
        if (kr != KERN_SUCCESS) {
                *address = 0;
                *size = 0;
-               vm_object_deallocate(object);   /* for the map entry */
+               *need_unmap = FALSE;
+               vm_object_deallocate(object);   /* for the map entry */
                vm_object_lock(object);
                return kr;
        }
@@ -5485,65 +10072,67 @@ vm_paging_map_object(
         */
 
        for (page_map_offset = 0;
-            map_size != 0;
-            map_size -= PAGE_SIZE_64, page_map_offset += PAGE_SIZE_64) {
-               unsigned int    cache_attr;
-
+           map_size != 0;
+           map_size -= PAGE_SIZE_64, page_map_offset += PAGE_SIZE_64) {
                page = vm_page_lookup(object, offset + page_map_offset);
                if (page == VM_PAGE_NULL) {
                        printf("vm_paging_map_object: no page !?");
                        vm_object_unlock(object);
                        kr = vm_map_remove(kernel_map, *address, *size,
-                                          VM_MAP_NO_FLAGS);
+                           VM_MAP_REMOVE_NO_FLAGS);
                        assert(kr == KERN_SUCCESS);
                        *address = 0;
                        *size = 0;
+                       *need_unmap = FALSE;
                        vm_object_lock(object);
                        return KERN_MEMORY_ERROR;
                }
-               if (page->pmapped == FALSE) {
-                       pmap_sync_page_data_phys(page->phys_page);
-               }
-               page->pmapped = TRUE;
-               cache_attr = ((unsigned int) object->wimg_bits) & VM_WIMG_MASK;
+               page->vmp_pmapped = TRUE;
 
-               //assert(pmap_verify_free(page->phys_page));
+               //assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(page)));
                PMAP_ENTER(kernel_pmap,
-                          *address + page_map_offset,
-                          page,
-                          protection,
-                          cache_attr,
-                          TRUE);
+                   *address + page_map_offset,
+                   page,
+                   protection,
+                   VM_PROT_NONE,
+                   0,
+                   TRUE,
+                   kr);
+               assert(kr == KERN_SUCCESS);
+#if KASAN
+               kasan_notify_address(*address + page_map_offset, PAGE_SIZE);
+#endif
        }
-                          
+
        vm_paging_objects_mapped_slow++;
-       vm_paging_pages_mapped_slow += map_size / PAGE_SIZE_64;
+       vm_paging_pages_mapped_slow += (unsigned long) (map_size / PAGE_SIZE_64);
+
+       *need_unmap = TRUE;
 
        return KERN_SUCCESS;
 }
 
 /*
- * ENCRYPTED SWAP:
  * vm_paging_unmap_object:
  *     Unmaps part of a VM object's pages from the kernel
- *     virtual address space.
+ *      virtual address space.
  * Context:
- *     The VM object is locked.  This lock will get
- *     dropped and re-acquired though.
+ *      The VM object is locked.  This lock will get
+ *      dropped and re-acquired though.
  */
 void
 vm_paging_unmap_object(
-       vm_object_t     object,
-       vm_map_offset_t start,
-       vm_map_offset_t end)
+       vm_object_t     object,
+       vm_map_offset_t start,
+       vm_map_offset_t end)
 {
-       kern_return_t   kr;
-       int             i;
+       kern_return_t   kr;
+       int             i;
 
        if ((vm_paging_base_address == 0) ||
            (start < vm_paging_base_address) ||
            (end > (vm_paging_base_address
-                    + (VM_PAGING_NUM_PAGES * PAGE_SIZE)))) {
+           + (VM_PAGING_NUM_PAGES * PAGE_SIZE)))) {
                /*
                 * We didn't use our pre-allocated pool of
                 * kernel virtual address.  Deallocate the
@@ -5552,7 +10141,8 @@ vm_paging_unmap_object(
                if (object != VM_OBJECT_NULL) {
                        vm_object_unlock(object);
                }
-               kr = vm_map_remove(kernel_map, start, end, VM_MAP_NO_FLAGS);
+               kr = vm_map_remove(kernel_map, start, end,
+                   VM_MAP_REMOVE_NO_FLAGS);
                if (object != VM_OBJECT_NULL) {
                        vm_object_lock(object);
                }
@@ -5564,12 +10154,13 @@ vm_paging_unmap_object(
                 * for next time.
                 */
                assert(end - start == PAGE_SIZE);
-               i = (start - vm_paging_base_address) >> PAGE_SHIFT;
+               i = (int) ((start - vm_paging_base_address) >> PAGE_SHIFT);
+               assert(i >= 0 && i < VM_PAGING_NUM_PAGES);
 
                /* undo the pmap mapping */
                pmap_remove(kernel_pmap, start, end);
 
-               simple_lock(&vm_paging_lock);
+               simple_lock(&vm_paging_lock, &vm_pageout_lck_grp);
                vm_paging_page_inuse[i] = FALSE;
                if (vm_paging_page_waiter) {
                        thread_wakeup(&vm_paging_page_waiter);
@@ -5578,527 +10169,358 @@ vm_paging_unmap_object(
        }
 }
 
-#if CRYPTO
-/*
- * Encryption data.
- * "iv" is the "initial vector".  Ideally, we want to
- * have a different one for each page we encrypt, so that
- * crackers can't find encryption patterns too easily.
- */
-#define SWAP_CRYPT_AES_KEY_SIZE        128     /* XXX 192 and 256 don't work ! */
-boolean_t              swap_crypt_ctx_initialized = FALSE;
-aes_32t                swap_crypt_key[8]; /* big enough for a 256 key */
-aes_ctx                        swap_crypt_ctx;
-const unsigned char    swap_crypt_null_iv[AES_BLOCK_SIZE] = {0xa, };
-
-#if DEBUG
-boolean_t              swap_crypt_ctx_tested = FALSE;
-unsigned char swap_crypt_test_page_ref[4096] __attribute__((aligned(4096)));
-unsigned char swap_crypt_test_page_encrypt[4096] __attribute__((aligned(4096)));
-unsigned char swap_crypt_test_page_decrypt[4096] __attribute__((aligned(4096)));
-#endif /* DEBUG */
-
-extern u_long random(void);
 
 /*
- * Initialize the encryption context: key and key size.
+ * page->vmp_object must be locked
  */
-void swap_crypt_ctx_initialize(void); /* forward */
 void
-swap_crypt_ctx_initialize(void)
+vm_pageout_steal_laundry(vm_page_t page, boolean_t queues_locked)
 {
-       unsigned int    i;
+       if (!queues_locked) {
+               vm_page_lockspin_queues();
+       }
 
+       page->vmp_free_when_done = FALSE;
        /*
-        * No need for locking to protect swap_crypt_ctx_initialized
-        * because the first use of encryption will come from the
-        * pageout thread (we won't pagein before there's been a pageout)
-        * and there's only one pageout thread.
-        */
-       if (swap_crypt_ctx_initialized == FALSE) {
-               for (i = 0;
-                    i < (sizeof (swap_crypt_key) /
-                         sizeof (swap_crypt_key[0]));
-                    i++) {
-                       swap_crypt_key[i] = random();
-               }
-               aes_encrypt_key((const unsigned char *) swap_crypt_key,
-                               SWAP_CRYPT_AES_KEY_SIZE,
-                               &swap_crypt_ctx.encrypt);
-               aes_decrypt_key((const unsigned char *) swap_crypt_key,
-                               SWAP_CRYPT_AES_KEY_SIZE,
-                               &swap_crypt_ctx.decrypt);
-               swap_crypt_ctx_initialized = TRUE;
-       }
-
-#if DEBUG
-       /*
-        * Validate the encryption algorithms.
+        * need to drop the laundry count...
+        * we may also need to remove it
+        * from the I/O paging queue...
+        * vm_pageout_throttle_up handles both cases
+        *
+        * the laundry and pageout_queue flags are cleared...
         */
-       if (swap_crypt_ctx_tested == FALSE) {
-               /* initialize */
-               for (i = 0; i < 4096; i++) {
-                       swap_crypt_test_page_ref[i] = (char) i;
-               }
-               /* encrypt */
-               aes_encrypt_cbc(swap_crypt_test_page_ref,
-                               swap_crypt_null_iv,
-                               PAGE_SIZE / AES_BLOCK_SIZE,
-                               swap_crypt_test_page_encrypt,
-                               &swap_crypt_ctx.encrypt);
-               /* decrypt */
-               aes_decrypt_cbc(swap_crypt_test_page_encrypt,
-                               swap_crypt_null_iv,
-                               PAGE_SIZE / AES_BLOCK_SIZE,
-                               swap_crypt_test_page_decrypt,
-                               &swap_crypt_ctx.decrypt);
-               /* compare result with original */
-               for (i = 0; i < 4096; i ++) {
-                       if (swap_crypt_test_page_decrypt[i] !=
-                           swap_crypt_test_page_ref[i]) {
-                               panic("encryption test failed");
-                       }
-               }
-
-               /* encrypt again */
-               aes_encrypt_cbc(swap_crypt_test_page_decrypt,
-                               swap_crypt_null_iv,
-                               PAGE_SIZE / AES_BLOCK_SIZE,
-                               swap_crypt_test_page_decrypt,
-                               &swap_crypt_ctx.encrypt);
-               /* decrypt in place */
-               aes_decrypt_cbc(swap_crypt_test_page_decrypt,
-                               swap_crypt_null_iv,
-                               PAGE_SIZE / AES_BLOCK_SIZE,
-                               swap_crypt_test_page_decrypt,
-                               &swap_crypt_ctx.decrypt);
-               for (i = 0; i < 4096; i ++) {
-                       if (swap_crypt_test_page_decrypt[i] !=
-                           swap_crypt_test_page_ref[i]) {
-                               panic("in place encryption test failed");
-                       }
-               }
-
-               swap_crypt_ctx_tested = TRUE;
-       }
-#endif /* DEBUG */
+       vm_pageout_throttle_up(page);
+
+       if (!queues_locked) {
+               vm_page_unlock_queues();
+       }
 }
 
-/*
- * ENCRYPTED SWAP:
- * vm_page_encrypt:
- *     Encrypt the given page, for secure paging.
- *     The page might already be mapped at kernel virtual
- *     address "kernel_mapping_offset".  Otherwise, we need
- *     to map it.
- * 
- * Context:
- *     The page's object is locked, but this lock will be released
- *     and re-acquired.
- *     The page is busy and not accessible by users (not entered in any pmap).
- */
-void
-vm_page_encrypt(
-       vm_page_t       page,
-       vm_map_offset_t kernel_mapping_offset)
+upl_t
+vector_upl_create(vm_offset_t upl_offset)
 {
-       kern_return_t           kr;
-       vm_map_size_t           kernel_mapping_size;
-       vm_offset_t             kernel_vaddr;
-       union {
-               unsigned char   aes_iv[AES_BLOCK_SIZE];
-               struct {
-                       memory_object_t         pager_object;
-                       vm_object_offset_t      paging_offset;
-               } vm;
-       } encrypt_iv;
-
-       if (! vm_pages_encrypted) {
-               vm_pages_encrypted = TRUE;
-       }
-
-       assert(page->busy);
-       assert(page->dirty || page->precious);
-       
-       if (page->encrypted) {
-               /*
-                * Already encrypted: no need to do it again.
-                */
-               vm_page_encrypt_already_encrypted_counter++;
-               return;
+       int     vector_upl_size  = sizeof(struct _vector_upl);
+       int i = 0;
+       upl_t   upl;
+       vector_upl_t vector_upl = (vector_upl_t)kalloc(vector_upl_size);
+
+       upl = upl_create(0, UPL_VECTOR, 0);
+       upl->vector_upl = vector_upl;
+       upl->u_offset = upl_offset;
+       vector_upl->size = 0;
+       vector_upl->offset = upl_offset;
+       vector_upl->invalid_upls = 0;
+       vector_upl->num_upls = 0;
+       vector_upl->pagelist = NULL;
+
+       for (i = 0; i < MAX_VECTOR_UPL_ELEMENTS; i++) {
+               vector_upl->upl_iostates[i].size = 0;
+               vector_upl->upl_iostates[i].offset = 0;
        }
-       ASSERT_PAGE_DECRYPTED(page);
-
-       /*
-        * Take a paging-in-progress reference to keep the object
-        * alive even if we have to unlock it (in vm_paging_map_object()
-        * for example)...
-        */
-       vm_object_paging_begin(page->object);
+       return upl;
+}
 
-       if (kernel_mapping_offset == 0) {
-               /*
-                * The page hasn't already been mapped in kernel space
-                * by the caller.  Map it now, so that we can access
-                * its contents and encrypt them.
-                */
-               kernel_mapping_size = PAGE_SIZE;
-               kr = vm_paging_map_object(&kernel_mapping_offset,
-                                         page,
-                                         page->object,
-                                         page->offset,
-                                         &kernel_mapping_size,
-                                         VM_PROT_READ | VM_PROT_WRITE,
-                                         FALSE);
-               if (kr != KERN_SUCCESS) {
-                       panic("vm_page_encrypt: "
-                             "could not map page in kernel: 0x%x\n",
-                             kr);
+void
+vector_upl_deallocate(upl_t upl)
+{
+       if (upl) {
+               vector_upl_t vector_upl = upl->vector_upl;
+               if (vector_upl) {
+                       if (vector_upl->invalid_upls != vector_upl->num_upls) {
+                               panic("Deallocating non-empty Vectored UPL\n");
+                       }
+                       kfree(vector_upl->pagelist, (sizeof(struct upl_page_info) * (vector_upl->size / PAGE_SIZE)));
+                       vector_upl->invalid_upls = 0;
+                       vector_upl->num_upls = 0;
+                       vector_upl->pagelist = NULL;
+                       vector_upl->size = 0;
+                       vector_upl->offset = 0;
+                       kfree(vector_upl, sizeof(struct _vector_upl));
+                       vector_upl = (vector_upl_t)0xfeedfeed;
+               } else {
+                       panic("vector_upl_deallocate was passed a non-vectored upl\n");
                }
        } else {
-               kernel_mapping_size = 0;
+               panic("vector_upl_deallocate was passed a NULL upl\n");
        }
-       kernel_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping_offset);
+}
 
-       if (swap_crypt_ctx_initialized == FALSE) {
-               swap_crypt_ctx_initialize();
+boolean_t
+vector_upl_is_valid(upl_t upl)
+{
+       if (upl && ((upl->flags & UPL_VECTOR) == UPL_VECTOR)) {
+               vector_upl_t vector_upl = upl->vector_upl;
+               if (vector_upl == NULL || vector_upl == (vector_upl_t)0xfeedfeed || vector_upl == (vector_upl_t)0xfeedbeef) {
+                       return FALSE;
+               } else {
+                       return TRUE;
+               }
        }
-       assert(swap_crypt_ctx_initialized);
-
-       /*
-        * Prepare an "initial vector" for the encryption.
-        * We use the "pager" and the "paging_offset" for that
-        * page to obfuscate the encrypted data a bit more and
-        * prevent crackers from finding patterns that they could
-        * use to break the key.
-        */
-       bzero(&encrypt_iv.aes_iv[0], sizeof (encrypt_iv.aes_iv));
-       encrypt_iv.vm.pager_object = page->object->pager;
-       encrypt_iv.vm.paging_offset =
-               page->object->paging_offset + page->offset;
-
-       /* encrypt the "initial vector" */
-       aes_encrypt_cbc((const unsigned char *) &encrypt_iv.aes_iv[0],
-                       swap_crypt_null_iv,
-                       1,
-                       &encrypt_iv.aes_iv[0],
-                       &swap_crypt_ctx.encrypt);
-                 
-       /*
-        * Encrypt the page.
-        */
-       aes_encrypt_cbc((const unsigned char *) kernel_vaddr,
-                       &encrypt_iv.aes_iv[0],
-                       PAGE_SIZE / AES_BLOCK_SIZE,
-                       (unsigned char *) kernel_vaddr,
-                       &swap_crypt_ctx.encrypt);
+       return FALSE;
+}
 
-       vm_page_encrypt_counter++;
+boolean_t
+vector_upl_set_subupl(upl_t upl, upl_t subupl, uint32_t io_size)
+{
+       if (vector_upl_is_valid(upl)) {
+               vector_upl_t vector_upl = upl->vector_upl;
+
+               if (vector_upl) {
+                       if (subupl) {
+                               if (io_size) {
+                                       if (io_size < PAGE_SIZE) {
+                                               io_size = PAGE_SIZE;
+                                       }
+                                       subupl->vector_upl = (void*)vector_upl;
+                                       vector_upl->upl_elems[vector_upl->num_upls++] = subupl;
+                                       vector_upl->size += io_size;
+                                       upl->u_size += io_size;
+                               } else {
+                                       uint32_t i = 0, invalid_upls = 0;
+                                       for (i = 0; i < vector_upl->num_upls; i++) {
+                                               if (vector_upl->upl_elems[i] == subupl) {
+                                                       break;
+                                               }
+                                       }
+                                       if (i == vector_upl->num_upls) {
+                                               panic("Trying to remove sub-upl when none exists");
+                                       }
 
-       /*
-        * Unmap the page from the kernel's address space,
-        * if we had to map it ourselves.  Otherwise, let
-        * the caller undo the mapping if needed.
-        */
-       if (kernel_mapping_size != 0) {
-               vm_paging_unmap_object(page->object,
-                                      kernel_mapping_offset,
-                                      kernel_mapping_offset + kernel_mapping_size);
+                                       vector_upl->upl_elems[i] = NULL;
+                                       invalid_upls = os_atomic_inc(&(vector_upl)->invalid_upls,
+                                           relaxed);
+                                       if (invalid_upls == vector_upl->num_upls) {
+                                               return TRUE;
+                                       } else {
+                                               return FALSE;
+                                       }
+                               }
+                       } else {
+                               panic("vector_upl_set_subupl was passed a NULL upl element\n");
+                       }
+               } else {
+                       panic("vector_upl_set_subupl was passed a non-vectored upl\n");
+               }
+       } else {
+               panic("vector_upl_set_subupl was passed a NULL upl\n");
        }
 
-       /*
-        * Clear the "reference" and "modified" bits.
-        * This should clean up any impact the encryption had
-        * on them.
-        * The page was kept busy and disconnected from all pmaps,
-        * so it can't have been referenced or modified from user
-        * space.
-        * The software bits will be reset later after the I/O
-        * has completed (in upl_commit_range()).
-        */
-       pmap_clear_refmod(page->phys_page, VM_MEM_REFERENCED | VM_MEM_MODIFIED);
-
-       page->encrypted = TRUE;
-
-       vm_object_paging_end(page->object);
+       return FALSE;
 }
 
-/*
- * ENCRYPTED SWAP:
- * vm_page_decrypt:
- *     Decrypt the given page.
- *     The page might already be mapped at kernel virtual
- *     address "kernel_mapping_offset".  Otherwise, we need
- *     to map it.
- *
- * Context:
- *     The page's VM object is locked but will be unlocked and relocked.
- *     The page is busy and not accessible by users (not entered in any pmap).
- */
 void
-vm_page_decrypt(
-       vm_page_t       page,
-       vm_map_offset_t kernel_mapping_offset)
+vector_upl_set_pagelist(upl_t upl)
 {
-       kern_return_t           kr;
-       vm_map_size_t           kernel_mapping_size;
-       vm_offset_t             kernel_vaddr;
-       union {
-               unsigned char   aes_iv[AES_BLOCK_SIZE];
-               struct {
-                       memory_object_t         pager_object;
-                       vm_object_offset_t      paging_offset;
-               } vm;
-       } decrypt_iv;
-
-       assert(page->busy);
-       assert(page->encrypted);
+       if (vector_upl_is_valid(upl)) {
+               uint32_t i = 0;
+               vector_upl_t vector_upl = upl->vector_upl;
 
-       /*
-        * Take a paging-in-progress reference to keep the object
-        * alive even if we have to unlock it (in vm_paging_map_object()
-        * for example)...
-        */
-       vm_object_paging_begin(page->object);
+               if (vector_upl) {
+                       vm_offset_t pagelist_size = 0, cur_upl_pagelist_size = 0;
 
-       if (kernel_mapping_offset == 0) {
-               /*
-                * The page hasn't already been mapped in kernel space
-                * by the caller.  Map it now, so that we can access
-                * its contents and decrypt them.
-                */
-               kernel_mapping_size = PAGE_SIZE;
-               kr = vm_paging_map_object(&kernel_mapping_offset,
-                                         page,
-                                         page->object,
-                                         page->offset,
-                                         &kernel_mapping_size,
-                                         VM_PROT_READ | VM_PROT_WRITE,
-                                         FALSE);
-               if (kr != KERN_SUCCESS) {
-                       panic("vm_page_decrypt: "
-                             "could not map page in kernel: 0x%x\n",
-                             kr);
+                       vector_upl->pagelist = (upl_page_info_array_t)kalloc(sizeof(struct upl_page_info) * (vector_upl->size / PAGE_SIZE));
+
+                       for (i = 0; i < vector_upl->num_upls; i++) {
+                               cur_upl_pagelist_size = sizeof(struct upl_page_info) * upl_adjusted_size(vector_upl->upl_elems[i], PAGE_MASK) / PAGE_SIZE;
+                               bcopy(UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(vector_upl->upl_elems[i]), (char*)vector_upl->pagelist + pagelist_size, cur_upl_pagelist_size);
+                               pagelist_size += cur_upl_pagelist_size;
+                               if (vector_upl->upl_elems[i]->highest_page > upl->highest_page) {
+                                       upl->highest_page = vector_upl->upl_elems[i]->highest_page;
+                               }
+                       }
+                       assert( pagelist_size == (sizeof(struct upl_page_info) * (vector_upl->size / PAGE_SIZE)));
+               } else {
+                       panic("vector_upl_set_pagelist was passed a non-vectored upl\n");
                }
        } else {
-               kernel_mapping_size = 0;
+               panic("vector_upl_set_pagelist was passed a NULL upl\n");
        }
-       kernel_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping_offset);
-
-       assert(swap_crypt_ctx_initialized);
-
-       /*
-        * Prepare an "initial vector" for the decryption.
-        * It has to be the same as the "initial vector" we
-        * used to encrypt that page.
-        */
-       bzero(&decrypt_iv.aes_iv[0], sizeof (decrypt_iv.aes_iv));
-       decrypt_iv.vm.pager_object = page->object->pager;
-       decrypt_iv.vm.paging_offset =
-               page->object->paging_offset + page->offset;
-
-       /* encrypt the "initial vector" */
-       aes_encrypt_cbc((const unsigned char *) &decrypt_iv.aes_iv[0],
-                       swap_crypt_null_iv,
-                       1,
-                       &decrypt_iv.aes_iv[0],
-                       &swap_crypt_ctx.encrypt);
-
-       /*
-        * Decrypt the page.
-        */
-       aes_decrypt_cbc((const unsigned char *) kernel_vaddr,
-                       &decrypt_iv.aes_iv[0],
-                       PAGE_SIZE / AES_BLOCK_SIZE,
-                       (unsigned char *) kernel_vaddr,
-                       &swap_crypt_ctx.decrypt);
-       vm_page_decrypt_counter++;
+}
 
-       /*
-        * Unmap the page from the kernel's address space,
-        * if we had to map it ourselves.  Otherwise, let
-        * the caller undo the mapping if needed.
-        */
-       if (kernel_mapping_size != 0) {
-               vm_paging_unmap_object(page->object,
-                                      kernel_vaddr,
-                                      kernel_vaddr + PAGE_SIZE);
+upl_t
+vector_upl_subupl_byindex(upl_t upl, uint32_t index)
+{
+       if (vector_upl_is_valid(upl)) {
+               vector_upl_t vector_upl = upl->vector_upl;
+               if (vector_upl) {
+                       if (index < vector_upl->num_upls) {
+                               return vector_upl->upl_elems[index];
+                       }
+               } else {
+                       panic("vector_upl_subupl_byindex was passed a non-vectored upl\n");
+               }
        }
+       return NULL;
+}
 
-       /*
-        * After decryption, the page is actually clean.
-        * It was encrypted as part of paging, which "cleans"
-        * the "dirty" pages.
-        * Noone could access it after it was encrypted
-        * and the decryption doesn't count.
-        */
-       page->dirty = FALSE;
-       if (page->cs_validated && !page->cs_tainted) {
-               /*
-                * CODE SIGNING:
-                * This page is no longer dirty
-                * but could have been modified,
-                * so it will need to be
-                * re-validated.
-                */
-               page->cs_validated = FALSE;
-               vm_cs_validated_resets++;
+upl_t
+vector_upl_subupl_byoffset(upl_t upl, upl_offset_t *upl_offset, upl_size_t *upl_size)
+{
+       if (vector_upl_is_valid(upl)) {
+               uint32_t i = 0;
+               vector_upl_t vector_upl = upl->vector_upl;
+
+               if (vector_upl) {
+                       upl_t subupl = NULL;
+                       vector_upl_iostates_t subupl_state;
+
+                       for (i = 0; i < vector_upl->num_upls; i++) {
+                               subupl = vector_upl->upl_elems[i];
+                               subupl_state = vector_upl->upl_iostates[i];
+                               if (*upl_offset <= (subupl_state.offset + subupl_state.size - 1)) {
+                                       /* We could have been passed an offset/size pair that belongs
+                                        * to an UPL element that has already been committed/aborted.
+                                        * If so, return NULL.
+                                        */
+                                       if (subupl == NULL) {
+                                               return NULL;
+                                       }
+                                       if ((subupl_state.offset + subupl_state.size) < (*upl_offset + *upl_size)) {
+                                               *upl_size = (subupl_state.offset + subupl_state.size) - *upl_offset;
+                                               if (*upl_size > subupl_state.size) {
+                                                       *upl_size = subupl_state.size;
+                                               }
+                                       }
+                                       if (*upl_offset >= subupl_state.offset) {
+                                               *upl_offset -= subupl_state.offset;
+                                       } else if (i) {
+                                               panic("Vector UPL offset miscalculation\n");
+                                       }
+                                       return subupl;
+                               }
+                       }
+               } else {
+                       panic("vector_upl_subupl_byoffset was passed a non-vectored UPL\n");
+               }
        }
-       pmap_clear_refmod(page->phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
-
-       page->encrypted = FALSE;
+       return NULL;
+}
 
-       /*
-        * We've just modified the page's contents via the data cache and part
-        * of the new contents might still be in the cache and not yet in RAM.
-        * Since the page is now available and might get gathered in a UPL to
-        * be part of a DMA transfer from a driver that expects the memory to
-        * be coherent at this point, we have to flush the data cache.
-        */
-       pmap_sync_page_attributes_phys(page->phys_page);
-       /*
-        * Since the page is not mapped yet, some code might assume that it
-        * doesn't need to invalidate the instruction cache when writing to
-        * that page.  That code relies on "pmapped" being FALSE, so that the
-        * caches get synchronized when the page is first mapped.
-        */
-       assert(pmap_verify_free(page->phys_page));
-       page->pmapped = FALSE;
-       page->wpmapped = FALSE;
+void
+vector_upl_get_submap(upl_t upl, vm_map_t *v_upl_submap, vm_offset_t *submap_dst_addr)
+{
+       *v_upl_submap = NULL;
 
-       vm_object_paging_end(page->object);
+       if (vector_upl_is_valid(upl)) {
+               vector_upl_t vector_upl = upl->vector_upl;
+               if (vector_upl) {
+                       *v_upl_submap = vector_upl->submap;
+                       *submap_dst_addr = vector_upl->submap_dst_addr;
+               } else {
+                       panic("vector_upl_get_submap was passed a non-vectored UPL\n");
+               }
+       } else {
+               panic("vector_upl_get_submap was passed a null UPL\n");
+       }
 }
 
-unsigned long upl_encrypt_upls = 0;
-unsigned long upl_encrypt_pages = 0;
-
-/*
- * ENCRYPTED SWAP:
- *
- * upl_encrypt:
- *     Encrypts all the pages in the UPL, within the specified range.
- *
- */
 void
-upl_encrypt(
-       upl_t                   upl,
-       upl_offset_t            crypt_offset,
-       upl_size_t              crypt_size)
+vector_upl_set_submap(upl_t upl, vm_map_t submap, vm_offset_t submap_dst_addr)
 {
-       upl_size_t              upl_size;
-       upl_offset_t            upl_offset;
-       vm_object_t             upl_object;
-       vm_page_t               page;
-       vm_object_t             shadow_object;
-       vm_object_offset_t      shadow_offset;
-       vm_object_offset_t      paging_offset;
-       vm_object_offset_t      base_offset;
+       if (vector_upl_is_valid(upl)) {
+               vector_upl_t vector_upl = upl->vector_upl;
+               if (vector_upl) {
+                       vector_upl->submap = submap;
+                       vector_upl->submap_dst_addr = submap_dst_addr;
+               } else {
+                       panic("vector_upl_get_submap was passed a non-vectored UPL\n");
+               }
+       } else {
+               panic("vector_upl_get_submap was passed a NULL UPL\n");
+       }
+}
 
-       upl_encrypt_upls++;
-       upl_encrypt_pages += crypt_size / PAGE_SIZE;
+void
+vector_upl_set_iostate(upl_t upl, upl_t subupl, upl_offset_t offset, upl_size_t size)
+{
+       if (vector_upl_is_valid(upl)) {
+               uint32_t i = 0;
+               vector_upl_t vector_upl = upl->vector_upl;
 
-       upl_object = upl->map_object;
-       upl_offset = upl->offset;
-       upl_size = upl->size;
+               if (vector_upl) {
+                       for (i = 0; i < vector_upl->num_upls; i++) {
+                               if (vector_upl->upl_elems[i] == subupl) {
+                                       break;
+                               }
+                       }
 
-       vm_object_lock(upl_object);
+                       if (i == vector_upl->num_upls) {
+                               panic("setting sub-upl iostate when none exists");
+                       }
 
-       /*
-        * Find the VM object that contains the actual pages.
-        */
-       if (upl_object->pageout) {
-               shadow_object = upl_object->shadow;
-               /*
-                * The offset in the shadow object is actually also
-                * accounted for in upl->offset.  It possibly shouldn't be
-                * this way, but for now don't account for it twice.
-                */
-               shadow_offset = 0;
-               assert(upl_object->paging_offset == 0); /* XXX ? */
-               vm_object_lock(shadow_object);
+                       vector_upl->upl_iostates[i].offset = offset;
+                       if (size < PAGE_SIZE) {
+                               size = PAGE_SIZE;
+                       }
+                       vector_upl->upl_iostates[i].size = size;
+               } else {
+                       panic("vector_upl_set_iostate was passed a non-vectored UPL\n");
+               }
        } else {
-               shadow_object = upl_object;
-               shadow_offset = 0;
+               panic("vector_upl_set_iostate was passed a NULL UPL\n");
        }
+}
 
-       paging_offset = shadow_object->paging_offset;
-       vm_object_paging_begin(shadow_object);
-
-       if (shadow_object != upl_object)
-               vm_object_unlock(upl_object);
-
-
-       base_offset = shadow_offset;
-       base_offset += upl_offset;
-       base_offset += crypt_offset;
-       base_offset -= paging_offset;
+void
+vector_upl_get_iostate(upl_t upl, upl_t subupl, upl_offset_t *offset, upl_size_t *size)
+{
+       if (vector_upl_is_valid(upl)) {
+               uint32_t i = 0;
+               vector_upl_t vector_upl = upl->vector_upl;
 
-       assert(crypt_offset + crypt_size <= upl_size);
+               if (vector_upl) {
+                       for (i = 0; i < vector_upl->num_upls; i++) {
+                               if (vector_upl->upl_elems[i] == subupl) {
+                                       break;
+                               }
+                       }
 
-       for (upl_offset = 0;
-            upl_offset < crypt_size;
-            upl_offset += PAGE_SIZE) {
-               page = vm_page_lookup(shadow_object,
-                                     base_offset + upl_offset);
-               if (page == VM_PAGE_NULL) {
-                       panic("upl_encrypt: "
-                             "no page for (obj=%p,off=%lld+%d)!\n",
-                             shadow_object,
-                             base_offset,
-                             upl_offset);
-               }
-               /*
-                * Disconnect the page from all pmaps, so that nobody can
-                * access it while it's encrypted.  After that point, all
-                * accesses to this page will cause a page fault and block
-                * while the page is busy being encrypted.  After the
-                * encryption completes, any access will cause a
-                * page fault and the page gets decrypted at that time.
-                */
-               pmap_disconnect(page->phys_page);
-               vm_page_encrypt(page, 0);
+                       if (i == vector_upl->num_upls) {
+                               panic("getting sub-upl iostate when none exists");
+                       }
 
-               if (shadow_object == vm_pageout_scan_wants_object) {
-                       /*
-                        * Give vm_pageout_scan() a chance to convert more
-                        * pages from "clean-in-place" to "clean-and-free",
-                        * if it's interested in the same pages we selected
-                        * in this cluster.
-                        */
-                       vm_object_unlock(shadow_object);
-                       vm_object_lock(shadow_object);
+                       *offset = vector_upl->upl_iostates[i].offset;
+                       *size = vector_upl->upl_iostates[i].size;
+               } else {
+                       panic("vector_upl_get_iostate was passed a non-vectored UPL\n");
                }
+       } else {
+               panic("vector_upl_get_iostate was passed a NULL UPL\n");
        }
-
-       vm_object_paging_end(shadow_object);
-       vm_object_unlock(shadow_object);
 }
 
-#else /* CRYPTO */
 void
-upl_encrypt(
-       __unused upl_t                  upl,
-       __unused upl_offset_t   crypt_offset,
-       __unused upl_size_t     crypt_size)
+vector_upl_get_iostate_byindex(upl_t upl, uint32_t index, upl_offset_t *offset, upl_size_t *size)
 {
+       if (vector_upl_is_valid(upl)) {
+               vector_upl_t vector_upl = upl->vector_upl;
+               if (vector_upl) {
+                       if (index < vector_upl->num_upls) {
+                               *offset = vector_upl->upl_iostates[index].offset;
+                               *size = vector_upl->upl_iostates[index].size;
+                       } else {
+                               *offset = *size = 0;
+                       }
+               } else {
+                       panic("vector_upl_get_iostate_byindex was passed a non-vectored UPL\n");
+               }
+       } else {
+               panic("vector_upl_get_iostate_byindex was passed a NULL UPL\n");
+       }
 }
 
-void
-vm_page_encrypt(
-       __unused vm_page_t              page,
-       __unused vm_map_offset_t        kernel_mapping_offset)
+upl_page_info_t *
+upl_get_internal_vectorupl_pagelist(upl_t upl)
 {
-} 
+       return ((vector_upl_t)(upl->vector_upl))->pagelist;
+}
 
-void
-vm_page_decrypt(
-       __unused vm_page_t              page,
-       __unused vm_map_offset_t        kernel_mapping_offset)
+void *
+upl_get_internal_vectorupl(upl_t upl)
 {
+       return upl->vector_upl;
 }
 
-#endif /* CRYPTO */
-
 vm_size_t
 upl_get_internal_pagelist_offset(void)
 {
@@ -6107,8 +10529,8 @@ upl_get_internal_pagelist_offset(void)
 
 void
 upl_clear_dirty(
-       upl_t           upl,
-       boolean_t       value)
+       upl_t           upl,
+       boolean_t       value)
 {
        if (value) {
                upl->flags |= UPL_CLEAR_DIRTY;
@@ -6117,34 +10539,101 @@ upl_clear_dirty(
        }
 }
 
+void
+upl_set_referenced(
+       upl_t           upl,
+       boolean_t       value)
+{
+       upl_lock(upl);
+       if (value) {
+               upl->ext_ref_count++;
+       } else {
+               if (!upl->ext_ref_count) {
+                       panic("upl_set_referenced not %p\n", upl);
+               }
+               upl->ext_ref_count--;
+       }
+       upl_unlock(upl);
+}
+
+#if CONFIG_IOSCHED
+void
+upl_set_blkno(
+       upl_t           upl,
+       vm_offset_t     upl_offset,
+       int             io_size,
+       int64_t         blkno)
+{
+       int i, j;
+       if ((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) {
+               return;
+       }
+
+       assert(upl->upl_reprio_info != 0);
+       for (i = (int)(upl_offset / PAGE_SIZE), j = 0; j < io_size; i++, j += PAGE_SIZE) {
+               UPL_SET_REPRIO_INFO(upl, i, blkno, io_size);
+       }
+}
+#endif
+
+void inline
+memoryshot(unsigned int event, unsigned int control)
+{
+       if (vm_debug_events) {
+               KERNEL_DEBUG_CONSTANT1((MACHDBG_CODE(DBG_MACH_VM_PRESSURE, event)) | control,
+                   vm_page_active_count, vm_page_inactive_count,
+                   vm_page_free_count, vm_page_speculative_count,
+                   vm_page_throttled_count);
+       } else {
+               (void) event;
+               (void) control;
+       }
+}
 
 #ifdef MACH_BSD
 
-boolean_t  upl_device_page(upl_page_info_t *upl)
+boolean_t
+upl_device_page(upl_page_info_t *upl)
+{
+       return UPL_DEVICE_PAGE(upl);
+}
+boolean_t
+upl_page_present(upl_page_info_t *upl, int index)
 {
-       return(UPL_DEVICE_PAGE(upl));
+       return UPL_PAGE_PRESENT(upl, index);
 }
-boolean_t  upl_page_present(upl_page_info_t *upl, int index)
+boolean_t
+upl_speculative_page(upl_page_info_t *upl, int index)
 {
-       return(UPL_PAGE_PRESENT(upl, index));
+       return UPL_SPECULATIVE_PAGE(upl, index);
 }
-boolean_t  upl_speculative_page(upl_page_info_t *upl, int index)
+boolean_t
+upl_dirty_page(upl_page_info_t *upl, int index)
 {
-       return(UPL_SPECULATIVE_PAGE(upl, index));
+       return UPL_DIRTY_PAGE(upl, index);
 }
-boolean_t  upl_dirty_page(upl_page_info_t *upl, int index)
+boolean_t
+upl_valid_page(upl_page_info_t *upl, int index)
 {
-       return(UPL_DIRTY_PAGE(upl, index));
+       return UPL_VALID_PAGE(upl, index);
 }
-boolean_t  upl_valid_page(upl_page_info_t *upl, int index)
+ppnum_t
+upl_phys_page(upl_page_info_t *upl, int index)
 {
-       return(UPL_VALID_PAGE(upl, index));
+       return UPL_PHYS_PAGE(upl, index);
 }
-ppnum_t  upl_phys_page(upl_page_info_t *upl, int index)
+
+void
+upl_page_set_mark(upl_page_info_t *upl, int index, boolean_t v)
 {
-       return(UPL_PHYS_PAGE(upl, index));
+       upl[index].mark = v;
 }
 
+boolean_t
+upl_page_get_mark(upl_page_info_t *upl, int index)
+{
+       return upl[index].mark;
+}
 
 void
 vm_countdirtypages(void)
@@ -6155,180 +10644,313 @@ vm_countdirtypages(void)
        int precpages;
 
 
-       dpages=0;
-       pgopages=0;
-       precpages=0;
+       dpages = 0;
+       pgopages = 0;
+       precpages = 0;
 
        vm_page_lock_queues();
-       m = (vm_page_t) queue_first(&vm_page_queue_inactive);
+       m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
        do {
-               if (m ==(vm_page_t )0) break;
-
-               if(m->dirty) dpages++;
-               if(m->pageout) pgopages++;
-               if(m->precious) precpages++;
+               if (m == (vm_page_t)0) {
+                       break;
+               }
 
-               assert(m->object != kernel_object);
-               m = (vm_page_t) queue_next(&m->pageq);
-               if (m ==(vm_page_t )0) break;
+               if (m->vmp_dirty) {
+                       dpages++;
+               }
+               if (m->vmp_free_when_done) {
+                       pgopages++;
+               }
+               if (m->vmp_precious) {
+                       precpages++;
+               }
 
-       } while (!queue_end(&vm_page_queue_inactive,(queue_entry_t) m));
+               assert(VM_PAGE_OBJECT(m) != kernel_object);
+               m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
+               if (m == (vm_page_t)0) {
+                       break;
+               }
+       } while (!vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t) m));
        vm_page_unlock_queues();
 
        vm_page_lock_queues();
-       m = (vm_page_t) queue_first(&vm_page_queue_throttled);
+       m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
        do {
-               if (m ==(vm_page_t )0) break;
+               if (m == (vm_page_t)0) {
+                       break;
+               }
 
                dpages++;
-               assert(m->dirty);
-               assert(!m->pageout);
-               assert(m->object != kernel_object);
-               m = (vm_page_t) queue_next(&m->pageq);
-               if (m ==(vm_page_t )0) break;
-
-       } while (!queue_end(&vm_page_queue_throttled,(queue_entry_t) m));
+               assert(m->vmp_dirty);
+               assert(!m->vmp_free_when_done);
+               assert(VM_PAGE_OBJECT(m) != kernel_object);
+               m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
+               if (m == (vm_page_t)0) {
+                       break;
+               }
+       } while (!vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t) m));
        vm_page_unlock_queues();
 
        vm_page_lock_queues();
-       m = (vm_page_t) queue_first(&vm_page_queue_zf);
+       m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
        do {
-               if (m ==(vm_page_t )0) break;
-
-               if(m->dirty) dpages++;
-               if(m->pageout) pgopages++;
-               if(m->precious) precpages++;
+               if (m == (vm_page_t)0) {
+                       break;
+               }
 
-               assert(m->object != kernel_object);
-               m = (vm_page_t) queue_next(&m->pageq);
-               if (m ==(vm_page_t )0) break;
+               if (m->vmp_dirty) {
+                       dpages++;
+               }
+               if (m->vmp_free_when_done) {
+                       pgopages++;
+               }
+               if (m->vmp_precious) {
+                       precpages++;
+               }
 
-       } while (!queue_end(&vm_page_queue_zf,(queue_entry_t) m));
+               assert(VM_PAGE_OBJECT(m) != kernel_object);
+               m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
+               if (m == (vm_page_t)0) {
+                       break;
+               }
+       } while (!vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t) m));
        vm_page_unlock_queues();
 
        printf("IN Q: %d : %d : %d\n", dpages, pgopages, precpages);
 
-       dpages=0;
-       pgopages=0;
-       precpages=0;
+       dpages = 0;
+       pgopages = 0;
+       precpages = 0;
 
        vm_page_lock_queues();
-       m = (vm_page_t) queue_first(&vm_page_queue_active);
+       m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
 
        do {
-               if(m == (vm_page_t )0) break;
-               if(m->dirty) dpages++;
-               if(m->pageout) pgopages++;
-               if(m->precious) precpages++;
-
-               assert(m->object != kernel_object);
-               m = (vm_page_t) queue_next(&m->pageq);
-               if(m == (vm_page_t )0) break;
+               if (m == (vm_page_t)0) {
+                       break;
+               }
+               if (m->vmp_dirty) {
+                       dpages++;
+               }
+               if (m->vmp_free_when_done) {
+                       pgopages++;
+               }
+               if (m->vmp_precious) {
+                       precpages++;
+               }
 
-       } while (!queue_end(&vm_page_queue_active,(queue_entry_t) m));
+               assert(VM_PAGE_OBJECT(m) != kernel_object);
+               m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
+               if (m == (vm_page_t)0) {
+                       break;
+               }
+       } while (!vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t) m));
        vm_page_unlock_queues();
 
        printf("AC Q: %d : %d : %d\n", dpages, pgopages, precpages);
-
 }
 #endif /* MACH_BSD */
 
-ppnum_t upl_get_highest_page(
-                            upl_t                      upl)
+
+#if CONFIG_IOSCHED
+int
+upl_get_cached_tier(upl_t  upl)
+{
+       assert(upl);
+       if (upl->flags & UPL_TRACKED_BY_OBJECT) {
+               return upl->upl_priority;
+       }
+       return -1;
+}
+#endif /* CONFIG_IOSCHED */
+
+
+void
+upl_callout_iodone(upl_t upl)
+{
+       struct upl_io_completion *upl_ctx = upl->upl_iodone;
+
+       if (upl_ctx) {
+               void    (*iodone_func)(void *, int) = upl_ctx->io_done;
+
+               assert(upl_ctx->io_done);
+
+               (*iodone_func)(upl_ctx->io_context, upl_ctx->io_error);
+       }
+}
+
+void
+upl_set_iodone(upl_t upl, void *upl_iodone)
+{
+       upl->upl_iodone = (struct upl_io_completion *)upl_iodone;
+}
+
+void
+upl_set_iodone_error(upl_t upl, int error)
+{
+       struct upl_io_completion *upl_ctx = upl->upl_iodone;
+
+       if (upl_ctx) {
+               upl_ctx->io_error = error;
+       }
+}
+
+
+ppnum_t
+upl_get_highest_page(
+       upl_t                      upl)
+{
+       return upl->highest_page;
+}
+
+upl_size_t
+upl_get_size(
+       upl_t                      upl)
+{
+       return upl_adjusted_size(upl, PAGE_MASK);
+}
+
+upl_size_t
+upl_adjusted_size(
+       upl_t upl,
+       vm_map_offset_t pgmask)
+{
+       vm_object_offset_t start_offset, end_offset;
+
+       start_offset = trunc_page_mask_64(upl->u_offset, pgmask);
+       end_offset = round_page_mask_64(upl->u_offset + upl->u_size, pgmask);
+
+       return (upl_size_t)(end_offset - start_offset);
+}
+
+vm_object_offset_t
+upl_adjusted_offset(
+       upl_t upl,
+       vm_map_offset_t pgmask)
+{
+       return trunc_page_mask_64(upl->u_offset, pgmask);
+}
+
+vm_object_offset_t
+upl_get_data_offset(
+       upl_t upl)
+{
+       return upl->u_offset - upl_adjusted_offset(upl, PAGE_MASK);
+}
+
+upl_t
+upl_associated_upl(upl_t upl)
 {
-        return upl->highest_page;
+       return upl->associated_upl;
+}
+
+void
+upl_set_associated_upl(upl_t upl, upl_t associated_upl)
+{
+       upl->associated_upl = associated_upl;
+}
+
+struct vnode *
+upl_lookup_vnode(upl_t upl)
+{
+       if (!upl->map_object->internal) {
+               return vnode_pager_lookup_vnode(upl->map_object->pager);
+       } else {
+               return NULL;
+       }
 }
 
-#ifdef UPL_DEBUG
-kern_return_t  upl_ubc_alias_set(upl_t upl, unsigned int alias1, unsigned int alias2)
+#if UPL_DEBUG
+kern_return_t
+upl_ubc_alias_set(upl_t upl, uintptr_t alias1, uintptr_t alias2)
 {
        upl->ubc_alias1 = alias1;
        upl->ubc_alias2 = alias2;
        return KERN_SUCCESS;
 }
-int  upl_ubc_alias_get(upl_t upl, unsigned int * al, unsigned int * al2)
+int
+upl_ubc_alias_get(upl_t upl, uintptr_t * al, uintptr_t * al2)
 {
-       if(al)
+       if (al) {
                *al = upl->ubc_alias1;
-       if(al2)
+       }
+       if (al2) {
                *al2 = upl->ubc_alias2;
+       }
        return KERN_SUCCESS;
 }
 #endif /* UPL_DEBUG */
 
+#if VM_PRESSURE_EVENTS
+/*
+ * Upward trajectory.
+ */
+extern boolean_t vm_compressor_low_on_space(void);
 
-
-#if    MACH_KDB
-#include <ddb/db_output.h>
-#include <ddb/db_print.h>
-#include <vm/vm_print.h>
-
-#define        printf  kdbprintf
-void           db_pageout(void);
-
-void
-db_vm(void)
+boolean_t
+VM_PRESSURE_NORMAL_TO_WARNING(void)
 {
-
-       iprintf("VM Statistics:\n");
-       db_indent += 2;
-       iprintf("pages:\n");
-       db_indent += 2;
-       iprintf("activ %5d  inact %5d  free  %5d",
-               vm_page_active_count, vm_page_inactive_count,
-               vm_page_free_count);
-       printf("   wire  %5d  gobbl %5d\n",
-              vm_page_wire_count, vm_page_gobble_count);
-       db_indent -= 2;
-       iprintf("target:\n");
-       db_indent += 2;
-       iprintf("min   %5d  inact %5d  free  %5d",
-               vm_page_free_min, vm_page_inactive_target,
-               vm_page_free_target);
-       printf("   resrv %5d\n", vm_page_free_reserved);
-       db_indent -= 2;
-       iprintf("pause:\n");
-       db_pageout();
-       db_indent -= 2;
+       if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
+               /* Available pages below our threshold */
+               if (memorystatus_available_pages < memorystatus_available_pages_pressure) {
+                       /* No frozen processes to kill */
+                       if (memorystatus_frozen_count == 0) {
+                               /* Not enough suspended processes available. */
+                               if (memorystatus_suspended_count < MEMORYSTATUS_SUSPENDED_THRESHOLD) {
+                                       return TRUE;
+                               }
+                       }
+               }
+               return FALSE;
+       } else {
+               return (AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) ? 1 : 0;
+       }
 }
 
-#if    MACH_COUNTERS
-extern int c_laundry_pages_freed;
-#endif /* MACH_COUNTERS */
+boolean_t
+VM_PRESSURE_WARNING_TO_CRITICAL(void)
+{
+       if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
+               /* Available pages below our threshold */
+               if (memorystatus_available_pages < memorystatus_available_pages_critical) {
+                       return TRUE;
+               }
+               return FALSE;
+       } else {
+               return vm_compressor_low_on_space() || (AVAILABLE_NON_COMPRESSED_MEMORY < ((12 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 10)) ? 1 : 0;
+       }
+}
 
-void
-db_pageout(void)
+/*
+ * Downward trajectory.
+ */
+boolean_t
+VM_PRESSURE_WARNING_TO_NORMAL(void)
 {
-       iprintf("Pageout Statistics:\n");
-       db_indent += 2;
-       iprintf("active %5d  inactv %5d\n",
-               vm_pageout_active, vm_pageout_inactive);
-       iprintf("nolock %5d  avoid  %5d  busy   %5d  absent %5d\n",
-               vm_pageout_inactive_nolock, vm_pageout_inactive_avoid,
-               vm_pageout_inactive_busy, vm_pageout_inactive_absent);
-       iprintf("used   %5d  clean  %5d  dirty  %5d\n",
-               vm_pageout_inactive_used, vm_pageout_inactive_clean,
-               vm_pageout_inactive_dirty);
-#if    MACH_COUNTERS
-       iprintf("laundry_pages_freed %d\n", c_laundry_pages_freed);
-#endif /* MACH_COUNTERS */
-#if    MACH_CLUSTER_STATS
-       iprintf("Cluster Statistics:\n");
-       db_indent += 2;
-       iprintf("dirtied   %5d   cleaned  %5d   collisions  %5d\n",
-               vm_pageout_cluster_dirtied, vm_pageout_cluster_cleaned,
-               vm_pageout_cluster_collisions);
-       iprintf("clusters  %5d   conversions  %5d\n",
-               vm_pageout_cluster_clusters, vm_pageout_cluster_conversions);
-       db_indent -= 2;
-       iprintf("Target Statistics:\n");
-       db_indent += 2;
-       iprintf("collisions   %5d   page_dirtied  %5d   page_freed  %5d\n",
-               vm_pageout_target_collisions, vm_pageout_target_page_dirtied,
-               vm_pageout_target_page_freed);
-       db_indent -= 2;
-#endif /* MACH_CLUSTER_STATS */
-       db_indent -= 2;
+       if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
+               /* Available pages above our threshold */
+               unsigned int target_threshold = (unsigned int) (memorystatus_available_pages_pressure + ((15 * memorystatus_available_pages_pressure) / 100));
+               if (memorystatus_available_pages > target_threshold) {
+                       return TRUE;
+               }
+               return FALSE;
+       } else {
+               return (AVAILABLE_NON_COMPRESSED_MEMORY > ((12 * VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) / 10)) ? 1 : 0;
+       }
 }
 
-#endif /* MACH_KDB */
+boolean_t
+VM_PRESSURE_CRITICAL_TO_WARNING(void)
+{
+       if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
+               /* Available pages above our threshold */
+               unsigned int target_threshold = (unsigned int)(memorystatus_available_pages_critical + ((15 * memorystatus_available_pages_critical) / 100));
+               if (memorystatus_available_pages > target_threshold) {
+                       return TRUE;
+               }
+               return FALSE;
+       } else {
+               return (AVAILABLE_NON_COMPRESSED_MEMORY > ((14 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 10)) ? 1 : 0;
+       }
+}
+#endif /* VM_PRESSURE_EVENTS */