]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_pageout.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / osfmk / vm / vm_pageout.c
index cc5e40edb2fed238f53daab863811682410c9a68..4098fb8bc2719cd109b43251501464f765af4b75 100644 (file)
@@ -1,23 +1,29 @@
 /*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
  * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
  * 
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
  *
  *     The proverbial page-out daemon.
  */
-#ifdef MACH_BSD
-/* remove after component merge */
-extern int     vnode_pager_workaround;
-#endif
 
+#include <stdint.h>
+
+#include <debug.h>
 #include <mach_pagemap.h>
 #include <mach_cluster_stats.h>
 #include <mach_kdb.h>
@@ -69,35 +74,74 @@ extern int  vnode_pager_workaround;
 #include <mach/mach_types.h>
 #include <mach/memory_object.h>
 #include <mach/memory_object_default.h>
+#include <mach/memory_object_control_server.h>
 #include <mach/mach_host_server.h>
+#include <mach/upl.h>
+#include <mach/vm_map.h>
 #include <mach/vm_param.h>
 #include <mach/vm_statistics.h>
-#include <kern/host_statistics.h>
+#include <mach/sdt.h>
+
+#include <kern/kern_types.h>
 #include <kern/counters.h>
+#include <kern/host_statistics.h>
+#include <kern/machine.h>
+#include <kern/misc_protos.h>
+#include <kern/sched.h>
 #include <kern/thread.h>
-#include <kern/thread_swap.h>
 #include <kern/xpr.h>
+#include <kern/kalloc.h>
+
+#include <machine/vm_tuning.h>
+#include <machine/commpage.h>
+
+#if CONFIG_EMBEDDED
+#include <sys/kern_memorystatus.h>
+#endif
+
 #include <vm/pmap.h>
+#include <vm/vm_fault.h>
 #include <vm/vm_map.h>
 #include <vm/vm_object.h>
 #include <vm/vm_page.h>
 #include <vm/vm_pageout.h>
-#include <machine/vm_tuning.h>
-#include <kern/misc_protos.h>
+#include <vm/vm_protos.h> /* must be last */
+#include <vm/memory_object.h>
+#include <vm/vm_purgeable_internal.h>
+
+/*
+ * ENCRYPTED SWAP:
+ */
+#include <../bsd/crypto/aes/aes.h>
+extern u_int32_t random(void); /* from <libkern/libkern.h> */
 
-extern ipc_port_t      memory_manager_default;
+#if UPL_DEBUG
+#include <libkern/OSDebug.h>
+#endif
 
-#ifndef        VM_PAGE_LAUNDRY_MAX
-#define        VM_PAGE_LAUNDRY_MAX     10      /* outstanding DMM page cleans */
-#endif /* VM_PAGEOUT_LAUNDRY_MAX */
+#ifndef VM_PAGEOUT_BURST_ACTIVE_THROTTLE   /* maximum iterations of the active queue to move pages to inactive */
+#define VM_PAGEOUT_BURST_ACTIVE_THROTTLE  100
+#endif
+
+#ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE  /* maximum iterations of the inactive queue w/o stealing/cleaning a page */
+#ifdef CONFIG_EMBEDDED
+#define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 1024
+#else
+#define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 4096
+#endif
+#endif
+
+#ifndef VM_PAGEOUT_DEADLOCK_RELIEF
+#define VM_PAGEOUT_DEADLOCK_RELIEF 100 /* number of pages to move to break deadlock */
+#endif
 
-#ifndef        VM_PAGEOUT_BURST_MAX
-#define        VM_PAGEOUT_BURST_MAX    32      /* simultaneous EMM page cleans */
-#endif /* VM_PAGEOUT_BURST_MAX */
+#ifndef VM_PAGEOUT_INACTIVE_RELIEF
+#define VM_PAGEOUT_INACTIVE_RELIEF 50  /* minimum number of pages to move to the inactive q */
+#endif
 
-#ifndef        VM_PAGEOUT_DISCARD_MAX
-#define        VM_PAGEOUT_DISCARD_MAX  68      /* simultaneous EMM page cleans */
-#endif /* VM_PAGEOUT_DISCARD_MAX */
+#ifndef        VM_PAGE_LAUNDRY_MAX
+#define        VM_PAGE_LAUNDRY_MAX     16UL    /* maximum pageouts on a given pageout queue */
+#endif /* VM_PAGEOUT_LAUNDRY_MAX */
 
 #ifndef        VM_PAGEOUT_BURST_WAIT
 #define        VM_PAGEOUT_BURST_WAIT   30      /* milliseconds per page */
@@ -107,6 +151,23 @@ extern ipc_port_t  memory_manager_default;
 #define VM_PAGEOUT_EMPTY_WAIT  200     /* milliseconds */
 #endif /* VM_PAGEOUT_EMPTY_WAIT */
 
+#ifndef        VM_PAGEOUT_DEADLOCK_WAIT
+#define VM_PAGEOUT_DEADLOCK_WAIT       300     /* milliseconds */
+#endif /* VM_PAGEOUT_DEADLOCK_WAIT */
+
+#ifndef        VM_PAGEOUT_IDLE_WAIT
+#define VM_PAGEOUT_IDLE_WAIT   10      /* milliseconds */
+#endif /* VM_PAGEOUT_IDLE_WAIT */
+
+#ifndef VM_PAGE_SPECULATIVE_TARGET
+#define VM_PAGE_SPECULATIVE_TARGET(total) ((total) * 1 / 20)
+#endif /* VM_PAGE_SPECULATIVE_TARGET */
+
+#ifndef VM_PAGE_INACTIVE_HEALTHY_LIMIT
+#define VM_PAGE_INACTIVE_HEALTHY_LIMIT(total) ((total) * 1 / 200)
+#endif /* VM_PAGE_INACTIVE_HEALTHY_LIMIT */
+
+
 /*
  *     To obtain a reasonable LRU approximation, the inactive queue
  *     needs to be large enough to give pages on it a chance to be
@@ -129,7 +190,11 @@ extern ipc_port_t  memory_manager_default;
  */
 
 #ifndef        VM_PAGE_FREE_TARGET
+#ifdef CONFIG_EMBEDDED
+#define        VM_PAGE_FREE_TARGET(free)       (15 + (free) / 100)
+#else
 #define        VM_PAGE_FREE_TARGET(free)       (15 + (free) / 80)
+#endif
 #endif /* VM_PAGE_FREE_TARGET */
 
 /*
@@ -138,9 +203,17 @@ extern ipc_port_t  memory_manager_default;
  */
 
 #ifndef        VM_PAGE_FREE_MIN
-#define        VM_PAGE_FREE_MIN(free)  (10 + (free) / 100)
+#ifdef CONFIG_EMBEDDED
+#define        VM_PAGE_FREE_MIN(free)          (10 + (free) / 200)
+#else
+#define        VM_PAGE_FREE_MIN(free)          (10 + (free) / 100)
+#endif
 #endif /* VM_PAGE_FREE_MIN */
 
+#define VM_PAGE_FREE_MIN_LIMIT         1500
+#define VM_PAGE_FREE_TARGET_LIMIT      2000
+
+
 /*
  *     When vm_page_free_count falls below vm_page_free_reserved,
  *     only vm-privileged threads can allocate pages.  vm-privilege
@@ -150,34 +223,78 @@ extern ipc_port_t memory_manager_default;
  */
 
 #ifndef        VM_PAGE_FREE_RESERVED
-#define        VM_PAGE_FREE_RESERVED   \
-       ((8 * VM_PAGE_LAUNDRY_MAX) + NCPUS)
+#define        VM_PAGE_FREE_RESERVED(n)        \
+       ((unsigned) (6 * VM_PAGE_LAUNDRY_MAX) + (n))
 #endif /* VM_PAGE_FREE_RESERVED */
 
+/*
+ *     When we dequeue pages from the inactive list, they are
+ *     reactivated (ie, put back on the active queue) if referenced.
+ *     However, it is possible to starve the free list if other
+ *     processors are referencing pages faster than we can turn off
+ *     the referenced bit.  So we limit the number of reactivations
+ *     we will make per call of vm_pageout_scan().
+ */
+#define VM_PAGE_REACTIVATE_LIMIT_MAX 20000
+#ifndef        VM_PAGE_REACTIVATE_LIMIT
+#ifdef CONFIG_EMBEDDED
+#define        VM_PAGE_REACTIVATE_LIMIT(avail) (VM_PAGE_INACTIVE_TARGET(avail) / 2)
+#else
+#define        VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX))
+#endif
+#endif /* VM_PAGE_REACTIVATE_LIMIT */
+#define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM      100
+
+
+/*
+ * Exported variable used to broadcast the activation of the pageout scan
+ * Working Set uses this to throttle its use of pmap removes.  In this
+ * way, code which runs within memory in an uncontested context does
+ * not keep encountering soft faults.
+ */
+
+unsigned int   vm_pageout_scan_event_counter = 0;
 
 /*
  * Forward declarations for internal routines.
  */
+
+static void vm_pageout_garbage_collect(int);
+static void vm_pageout_iothread_continue(struct vm_pageout_queue *);
+static void vm_pageout_iothread_external(void);
+static void vm_pageout_iothread_internal(void);
+
 extern void vm_pageout_continue(void);
 extern void vm_pageout_scan(void);
-extern void vm_pageout_throttle(vm_page_t m);
-extern vm_page_t vm_pageout_cluster_page(
-                       vm_object_t             object,
-                       vm_object_offset_t      offset,
-                       boolean_t               precious_clean);
+
+static thread_t        vm_pageout_external_iothread = THREAD_NULL;
+static thread_t        vm_pageout_internal_iothread = THREAD_NULL;
 
 unsigned int vm_pageout_reserved_internal = 0;
 unsigned int vm_pageout_reserved_really = 0;
 
-unsigned int vm_page_laundry_max = 0;          /* # of clusters outstanding */
-unsigned int vm_page_laundry_min = 0;
-unsigned int vm_pageout_burst_max = 0;
-unsigned int vm_pageout_burst_wait = 0;                /* milliseconds per page */
+unsigned int vm_pageout_idle_wait = 0;         /* milliseconds */
 unsigned int vm_pageout_empty_wait = 0;                /* milliseconds */
-unsigned int vm_pageout_burst_min = 0;
-unsigned int vm_pageout_pause_count = 0;
-unsigned int vm_pageout_pause_max = 0;
-unsigned int vm_free_page_pause = 100;                 /* milliseconds */
+unsigned int vm_pageout_burst_wait = 0;                /* milliseconds */
+unsigned int vm_pageout_deadlock_wait = 0;     /* milliseconds */
+unsigned int vm_pageout_deadlock_relief = 0;
+unsigned int vm_pageout_inactive_relief = 0;
+unsigned int vm_pageout_burst_active_throttle = 0;
+unsigned int vm_pageout_burst_inactive_throttle = 0;
+
+/*
+ *     Protection against zero fill flushing live working sets derived
+ *     from existing backing store and files
+ */
+unsigned int vm_accellerate_zf_pageout_trigger = 400;
+unsigned int zf_queue_min_count = 100;
+unsigned int vm_zf_queue_count = 0;
+
+#if defined(__ppc__) /* On ppc, vm statistics are still 32-bit */
+unsigned int vm_zf_count = 0;
+#else
+uint64_t vm_zf_count __attribute__((aligned(8))) = 0;
+#endif
 
 /*
  *     These variables record the pageout daemon's actions:
@@ -196,78 +313,87 @@ unsigned int vm_pageout_inactive_absent = 0;      /* debugging */
 unsigned int vm_pageout_inactive_used = 0;     /* debugging */
 unsigned int vm_pageout_inactive_clean = 0;    /* debugging */
 unsigned int vm_pageout_inactive_dirty = 0;    /* debugging */
+unsigned int vm_pageout_inactive_deactivated = 0;      /* debugging */
+unsigned int vm_pageout_inactive_zf = 0;       /* debugging */
 unsigned int vm_pageout_dirty_no_pager = 0;    /* debugging */
-unsigned int vm_pageout_inactive_pinned = 0;   /* debugging */
-unsigned int vm_pageout_inactive_limbo = 0;    /* debugging */
-unsigned int vm_pageout_setup_limbo = 0;       /* debugging */
-unsigned int vm_pageout_setup_unprepped = 0;   /* debugging */
+unsigned int vm_pageout_purged_objects = 0;    /* debugging */
 unsigned int vm_stat_discard = 0;              /* debugging */
 unsigned int vm_stat_discard_sent = 0;         /* debugging */
 unsigned int vm_stat_discard_failure = 0;      /* debugging */
 unsigned int vm_stat_discard_throttle = 0;     /* debugging */
-unsigned int vm_pageout_scan_active_emm_throttle = 0;          /* debugging */
-unsigned int vm_pageout_scan_active_emm_throttle_success = 0;  /* debugging */
-unsigned int vm_pageout_scan_active_emm_throttle_failure = 0;  /* debugging */
-unsigned int vm_pageout_scan_inactive_emm_throttle = 0;                /* debugging */
-unsigned int vm_pageout_scan_inactive_emm_throttle_success = 0;        /* debugging */
-unsigned int vm_pageout_scan_inactive_emm_throttle_failure = 0;        /* debugging */
+unsigned int vm_pageout_reactivation_limit_exceeded = 0;       /* debugging */
+unsigned int vm_pageout_catch_ups = 0;                         /* debugging */
+unsigned int vm_pageout_inactive_force_reclaim = 0;    /* debugging */
+
+unsigned int vm_pageout_scan_active_throttled = 0;
+unsigned int vm_pageout_scan_inactive_throttled = 0;
+unsigned int vm_pageout_scan_throttle = 0;                     /* debugging */
+unsigned int vm_pageout_scan_throttle_aborted = 0;             /* debugging */
+unsigned int vm_pageout_scan_burst_throttle = 0;               /* debugging */
+unsigned int vm_pageout_scan_empty_throttle = 0;               /* debugging */
+unsigned int vm_pageout_scan_deadlock_detected = 0;            /* debugging */
+unsigned int vm_pageout_scan_active_throttle_success = 0;      /* debugging */
+unsigned int vm_pageout_scan_inactive_throttle_success = 0;    /* debugging */
+
+unsigned int vm_page_speculative_count_drifts = 0;
+unsigned int vm_page_speculative_count_drift_max = 0;
 
+/*
+ * Backing store throttle when BS is exhausted
+ */
+unsigned int   vm_backing_store_low = 0;
 
 unsigned int vm_pageout_out_of_line  = 0;
 unsigned int vm_pageout_in_place  = 0;
+
+unsigned int vm_page_steal_pageout_page = 0;
+
 /*
- *     Routine:        vm_pageout_object_allocate
- *     Purpose:
- *             Allocate an object for use as out-of-line memory in a
- *             data_return/data_initialize message.
- *             The page must be in an unlocked object.
- *
- *             If the page belongs to a trusted pager, cleaning in place
- *             will be used, which utilizes a special "pageout object"
- *             containing private alias pages for the real page frames.
- *             Untrusted pagers use normal out-of-line memory.
+ * ENCRYPTED SWAP:
+ * counters and statistics...
  */
-vm_object_t
-vm_pageout_object_allocate(
-       vm_page_t               m,
-       vm_size_t               size,
-       vm_object_offset_t      offset)
-{
-       vm_object_t     object = m->object;
-       vm_object_t     new_object;
-
-       assert(object->pager_ready);
+unsigned long vm_page_decrypt_counter = 0;
+unsigned long vm_page_decrypt_for_upl_counter = 0;
+unsigned long vm_page_encrypt_counter = 0;
+unsigned long vm_page_encrypt_abort_counter = 0;
+unsigned long vm_page_encrypt_already_encrypted_counter = 0;
+boolean_t vm_pages_encrypted = FALSE; /* are there encrypted pages ? */
 
-       if (object->pager_trusted || object->internal)
-               vm_pageout_throttle(m);
+struct vm_pageout_queue vm_pageout_queue_internal;
+struct vm_pageout_queue vm_pageout_queue_external;
 
-       new_object = vm_object_allocate(size);
+unsigned int vm_page_speculative_target = 0;
 
-       if (object->pager_trusted) {
-               assert (offset < object->size);
+vm_object_t    vm_pageout_scan_wants_object = VM_OBJECT_NULL;
 
-               vm_object_lock(new_object);
-               new_object->pageout = TRUE;
-               new_object->shadow = object;
-               new_object->can_persist = FALSE;
-               new_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
-               new_object->shadow_offset = offset;
-               vm_object_unlock(new_object);
+boolean_t (* volatile consider_buffer_cache_collect)(int) = NULL;
 
-               /*
-                * Take a paging reference on the object. This will be dropped
-                * in vm_pageout_object_terminate()
-                */
-               vm_object_lock(object);
-               vm_object_paging_begin(object);
-               vm_object_unlock(object);
+#if DEVELOPMENT || DEBUG
+unsigned long vm_cs_validated_resets = 0;
+#endif
 
-               vm_pageout_in_place++;
-       } else
-               vm_pageout_out_of_line++;
-       return(new_object);
+/*
+ *     Routine:        vm_backing_store_disable
+ *     Purpose:
+ *             Suspend non-privileged threads wishing to extend
+ *             backing store when we are low on backing store
+ *             (Synchronized by caller)
+ */
+void
+vm_backing_store_disable(
+       boolean_t       disable)
+{
+       if(disable) {
+               vm_backing_store_low = 1;
+       } else {
+               if(vm_backing_store_low) {
+                       vm_backing_store_low = 0;
+                       thread_wakeup((event_t) &vm_backing_store_low);
+               }
+       }
 }
 
+
 #if MACH_CLUSTER_STATS
 unsigned long vm_pageout_cluster_dirtied = 0;
 unsigned long vm_pageout_cluster_cleaned = 0;
@@ -277,8 +403,6 @@ unsigned long vm_pageout_cluster_conversions = 0;
 unsigned long vm_pageout_target_collisions = 0;
 unsigned long vm_pageout_target_page_dirtied = 0;
 unsigned long vm_pageout_target_page_freed = 0;
-unsigned long vm_pageout_target_page_pinned = 0;
-unsigned long vm_pageout_target_page_limbo = 0;
 #define CLUSTER_STAT(clause)   clause
 #else  /* MACH_CLUSTER_STATS */
 #define CLUSTER_STAT(clause)
@@ -287,8 +411,7 @@ unsigned long vm_pageout_target_page_limbo = 0;
 /* 
  *     Routine:        vm_pageout_object_terminate
  *     Purpose:
- *             Destroy the pageout_object allocated by
- *             vm_pageout_object_allocate(), and perform all of the
+ *             Destroy the pageout_object, and perform all of the
  *             required cleanup actions.
  * 
  *     In/Out conditions:
@@ -331,26 +454,23 @@ vm_pageout_object_terminate(
                if(m == VM_PAGE_NULL)
                        continue;
                assert(m->cleaning);
+               /* used as a trigger on upl_commit etc to recognize the */
+               /* pageout daemon's subseqent desire to pageout a cleaning */
+               /* page.  When the bit is on the upl commit code will   */
+               /* respect the pageout bit in the target page over the  */
+               /* caller's page list indication */
+               m->dump_cleaning = FALSE;
 
-               /*
-                * Account for the paging reference taken when
-                * m->cleaning was set on this page.
-                */
-               vm_object_paging_end(shadow_object);
                assert((m->dirty) || (m->precious) ||
                                (m->busy && m->cleaning));
 
                /*
                 * Handle the trusted pager throttle.
+                * Also decrement the burst throttle (if external).
                 */
                vm_page_lock_queues();
                if (m->laundry) {
-                   vm_page_laundry_count--;
-                   m->laundry = FALSE;
-                   if (vm_page_laundry_count < vm_page_laundry_min) {
-                       vm_page_laundry_min = 0;
-                       thread_wakeup((event_t) &vm_page_laundry_count);
-                   }
+                       vm_pageout_throttle_up(m);
                }
 
                /*
@@ -365,6 +485,7 @@ vm_pageout_object_terminate(
                        assert(m->busy);
                        assert(m->wire_count == 1);
                        m->cleaning = FALSE;
+                       m->encrypted_cleaning = FALSE;
                        m->pageout = FALSE;
 #if MACH_CLUSTER_STATS
                        if (m->wanted) vm_pageout_target_collisions++;
@@ -372,47 +493,23 @@ vm_pageout_object_terminate(
                        /*
                         * Revoke all access to the page. Since the object is
                         * locked, and the page is busy, this prevents the page
-                        * from being dirtied after the pmap_is_modified() call
+                        * from being dirtied after the pmap_disconnect() call
                         * returns.
-                        */
-                       pmap_page_protect(m->phys_addr, VM_PROT_NONE);
-
-                       /*
+                        *
                         * Since the page is left "dirty" but "not modifed", we
                         * can detect whether the page was redirtied during
                         * pageout by checking the modify state.
                         */
-                       m->dirty = pmap_is_modified(m->phys_addr);
+                       if (pmap_disconnect(m->phys_page) & VM_MEM_MODIFIED)
+                             m->dirty = TRUE;
+                       else
+                             m->dirty = FALSE;
 
                        if (m->dirty) {
                                CLUSTER_STAT(vm_pageout_target_page_dirtied++;)
-                               vm_page_unwire(m);/* reactivates */
-                               VM_STAT(reactivations++);
+                               vm_page_unwire(m, TRUE);        /* reactivates */
+                               VM_STAT_INCR(reactivations);
                                PAGE_WAKEUP_DONE(m);
-                       } else if (m->prep_pin_count != 0) {
-                               vm_page_pin_lock();
-                               if (m->pin_count != 0) {
-                                       /* page is pinned; reactivate */
-                                       CLUSTER_STAT(
-                                           vm_pageout_target_page_pinned++;)
-                                       vm_page_unwire(m);/* reactivates */
-                                       VM_STAT(reactivations++);
-                                       PAGE_WAKEUP_DONE(m);
-                               } else {
-                                       /*
-                                        * page is prepped but not pinned; send
-                                        * it into limbo.  Note that
-                                        * vm_page_free (which will be called
-                                        * after releasing the pin lock) knows
-                                        * how to handle a page with limbo set.
-                                        */
-                                       m->limbo = TRUE;
-                                       CLUSTER_STAT(
-                                           vm_pageout_target_page_limbo++;)
-                               }
-                               vm_page_pin_unlock();
-                               if (m->limbo)
-                                       vm_page_free(m);
                        } else {
                                CLUSTER_STAT(vm_pageout_target_page_freed++;)
                                vm_page_free(m);/* clears busy, etc. */
@@ -426,8 +523,8 @@ vm_pageout_object_terminate(
                 * If prep_pin_count is nonzero, then someone is using the
                 * page, so make it active.
                 */
-               if (!m->active && !m->inactive) {
-                       if (m->reference || m->prep_pin_count != 0)
+               if (!m->active && !m->inactive && !m->throttled && !m->private) {
+                       if (m->reference)
                                vm_page_activate(m);
                        else
                                vm_page_deactivate(m);
@@ -439,26 +536,21 @@ vm_pageout_object_terminate(
 
                        /* We do not re-set m->dirty ! */
                        /* The page was busy so no extraneous activity     */
-                       /* could have occured. COPY_INTO is a read into the */
+                       /* could have occurred. COPY_INTO is a read into the */
                        /* new pages. CLEAN_IN_PLACE does actually write   */
                        /* out the pages but handling outside of this code */
                        /* will take care of resetting dirty. We clear the */
                        /* modify however for the Programmed I/O case.     */ 
-                       pmap_clear_modify(m->phys_addr);
-                       if(m->absent) {
-                               m->absent = FALSE;
-                               if(shadow_object->absent_count == 1)
-                                       vm_object_absent_release(shadow_object);
-                               else
-                                       shadow_object->absent_count--;
-                       }
+                       pmap_clear_modify(m->phys_page);
+
+                       m->absent = FALSE;
                        m->overwriting = FALSE;
                } else if (m->overwriting) {
                        /* alternate request page list, write to page_list */
                        /* case.  Occurs when the original page was wired  */
                        /* at the time of the list request */
-                       assert(m->wire_count != 0);
-                       vm_page_unwire(m);/* reactivates */
+                       assert(VM_PAGE_WIRED(m));
+                       vm_page_unwire(m, TRUE);        /* reactivates */
                        m->overwriting = FALSE;
                } else {
                /*
@@ -471,7 +563,7 @@ vm_pageout_object_terminate(
                 * consulted if m->dirty is false.
                 */
 #if MACH_CLUSTER_STATS
-                       m->dirty = pmap_is_modified(m->phys_addr);
+                       m->dirty = pmap_is_modified(m->phys_page);
 
                        if (m->dirty)   vm_pageout_cluster_dirtied++;
                        else            vm_pageout_cluster_cleaned++;
@@ -481,7 +573,7 @@ vm_pageout_object_terminate(
 #endif
                }
                m->cleaning = FALSE;
-
+               m->encrypted_cleaning = FALSE;
 
                /*
                 * Wakeup any thread waiting for the page to be un-cleaning.
@@ -492,228 +584,16 @@ vm_pageout_object_terminate(
        /*
         * Account for the paging reference taken in vm_paging_object_allocate.
         */
-       vm_object_paging_end(shadow_object);
+       vm_object_activity_end(shadow_object);
        vm_object_unlock(shadow_object);
 
        assert(object->ref_count == 0);
        assert(object->paging_in_progress == 0);
+       assert(object->activity_in_progress == 0);
        assert(object->resident_page_count == 0);
        return;
 }
 
-/*
- *     Routine:        vm_pageout_setup
- *     Purpose:
- *             Set up a page for pageout (clean & flush).
- *
- *             Move the page to a new object, as part of which it will be
- *             sent to its memory manager in a memory_object_data_write or
- *             memory_object_initialize message.
- *
- *             The "new_object" and "new_offset" arguments
- *             indicate where the page should be moved.
- *
- *     In/Out conditions:
- *             The page in question must not be on any pageout queues,
- *             and must be busy.  The object to which it belongs
- *             must be unlocked, and the caller must hold a paging
- *             reference to it.  The new_object must not be locked.
- *
- *             This routine returns a pointer to a place-holder page,
- *             inserted at the same offset, to block out-of-order
- *             requests for the page.  The place-holder page must
- *             be freed after the data_write or initialize message
- *             has been sent.
- *
- *             The original page is put on a paging queue and marked
- *             not busy on exit.
- */
-vm_page_t
-vm_pageout_setup(
-       register vm_page_t      m,
-       register vm_object_t    new_object,
-       vm_object_offset_t      new_offset)
-{
-       register vm_object_t    old_object = m->object;
-       vm_object_offset_t      paging_offset;
-       vm_object_offset_t      offset;
-       register vm_page_t      holding_page;
-       register vm_page_t      new_m;
-       register vm_page_t      new_page;
-       boolean_t               need_to_wire = FALSE;
-
-
-        XPR(XPR_VM_PAGEOUT,
-     "vm_pageout_setup, obj 0x%X off 0x%X page 0x%X new obj 0x%X offset 0x%X\n",
-                (integer_t)m->object, (integer_t)m->offset, 
-               (integer_t)m, (integer_t)new_object, 
-               (integer_t)new_offset);
-       assert(m && m->busy && !m->absent && !m->fictitious && !m->error &&
-               !m->restart);
-
-       assert(m->dirty || m->precious);
-
-       /*
-        *      Create a place-holder page where the old one was, to prevent
-        *      attempted pageins of this page while we're unlocked.
-        *      If the pageout daemon put this page in limbo and we're not
-        *      going to clean in place, get another fictitious page to
-        *      exchange for it now.
-        */
-       VM_PAGE_GRAB_FICTITIOUS(holding_page);
-
-       if (m->limbo)
-               VM_PAGE_GRAB_FICTITIOUS(new_page);
-
-       vm_object_lock(old_object);
-
-       offset = m->offset;
-       paging_offset = offset + old_object->paging_offset;
-
-       if (old_object->pager_trusted) {
-               /*
-                * This pager is trusted, so we can clean this page
-                * in place. Leave it in the old object, and mark it
-                * cleaning & pageout.
-                */
-               new_m = holding_page;
-               holding_page = VM_PAGE_NULL;
-
-               /*
-                * If the pageout daemon put this page in limbo, exchange the
-                * identities of the limbo page and the new fictitious page,
-                * and continue with the new page, unless the prep count has
-                * gone to zero in the meantime (which means no one is
-                * interested in the page any more).  In that case, just clear
-                * the limbo bit and free the extra fictitious page.
-                */
-               if (m->limbo) {
-                       if (m->prep_pin_count == 0) {
-                               /* page doesn't have to be in limbo any more */
-                               m->limbo = FALSE;
-                               vm_page_lock_queues();
-                               vm_page_free(new_page);
-                               vm_page_unlock_queues();
-                               vm_pageout_setup_unprepped++;
-                       } else {
-                               vm_page_lock_queues();
-                               VM_PAGE_QUEUES_REMOVE(m);
-                               vm_page_remove(m);
-                               vm_page_limbo_exchange(m, new_page);
-                               vm_pageout_setup_limbo++;
-                               vm_page_release_limbo(m);
-                               m = new_page;
-                               vm_page_insert(m, old_object, offset);
-                               vm_page_unlock_queues();
-                       }
-               }
-
-               /*
-                * Set up new page to be private shadow of real page.
-                */
-               new_m->phys_addr = m->phys_addr;
-               new_m->fictitious = FALSE;
-               new_m->private = TRUE;
-               new_m->pageout = TRUE;
-
-               /*
-                * Mark real page as cleaning (indicating that we hold a
-                * paging reference to be released via m_o_d_r_c) and
-                * pageout (indicating that the page should be freed
-                * when the pageout completes).
-                */
-               pmap_clear_modify(m->phys_addr);
-               vm_page_lock_queues();
-               vm_page_wire(new_m);
-               m->cleaning = TRUE;
-               m->pageout = TRUE;
-
-               vm_page_wire(m);
-               assert(m->wire_count == 1);
-               vm_page_unlock_queues();
-
-               m->dirty = TRUE;
-               m->precious = FALSE;
-               m->page_lock = VM_PROT_NONE;
-               m->unusual = FALSE;
-               m->unlock_request = VM_PROT_NONE;
-       } else {
-               /*
-                * Cannot clean in place, so rip the old page out of the
-                * object, and stick the holding page in. Set new_m to the
-                * page in the new object.
-                */
-               vm_page_lock_queues();
-               VM_PAGE_QUEUES_REMOVE(m);
-               vm_page_remove(m);
-
-               /*
-                * If the pageout daemon put this page in limbo, exchange the
-                * identities of the limbo page and the new fictitious page,
-                * and continue with the new page, unless the prep count has
-                * gone to zero in the meantime (which means no one is
-                * interested in the page any more).  In that case, just clear
-                * the limbo bit and free the extra fictitious page.
-                */
-               if (m->limbo) {
-                       if (m->prep_pin_count == 0) {
-                               /* page doesn't have to be in limbo any more */
-                               m->limbo = FALSE;
-                               vm_page_free(new_page);
-                               vm_pageout_setup_unprepped++;
-                       } else {
-                               vm_page_limbo_exchange(m, new_page);
-                               vm_pageout_setup_limbo++;
-                               vm_page_release_limbo(m);
-                               m = new_page;
-                       }
-               }
-               
-               vm_page_insert(holding_page, old_object, offset);
-               vm_page_unlock_queues();
-
-               m->dirty = TRUE;
-               m->precious = FALSE;
-               new_m = m;
-               new_m->page_lock = VM_PROT_NONE;
-               new_m->unlock_request = VM_PROT_NONE;
-
-               if (old_object->internal)
-                       need_to_wire = TRUE;
-       }
-       /*
-        *      Record that this page has been written out
-        */
-#if    MACH_PAGEMAP
-       vm_external_state_set(old_object->existence_map, offset);
-#endif /* MACH_PAGEMAP */
-
-       vm_object_unlock(old_object);
-
-       vm_object_lock(new_object);
-
-       /*
-        *      Put the page into the new object. If it is a not wired
-        *      (if it's the real page) it will be activated.
-        */
-
-       vm_page_lock_queues();
-       vm_page_insert(new_m, new_object, new_offset);
-       if (need_to_wire)
-               vm_page_wire(new_m);
-       else
-               vm_page_activate(new_m);
-       PAGE_WAKEUP_DONE(new_m);
-       vm_page_unlock_queues();
-
-       vm_object_unlock(new_object);
-
-       /*
-        *      Return the placeholder page to simplify cleanup.
-        */
-       return (holding_page);
-}
-
 /*
  * Routine:    vm_pageclean_setup
  *
@@ -721,9 +601,9 @@ vm_pageout_setup(
  *             necessarily flushed from the VM page cache.
  *             This is accomplished by cleaning in place.
  *
- *             The page must not be busy, and the object and page
- *             queues must be locked.
- *             
+ *             The page must not be busy, and new_object
+ *             must be locked.
+ *
  */
 void
 vm_pageclean_setup(
@@ -732,24 +612,17 @@ vm_pageclean_setup(
        vm_object_t             new_object,
        vm_object_offset_t      new_offset)
 {
-       vm_object_t old_object = m->object;
        assert(!m->busy);
+#if 0
        assert(!m->cleaning);
+#endif
 
        XPR(XPR_VM_PAGEOUT,
     "vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n",
-               (integer_t)old_object, m->offset, (integer_t)m, 
-               (integer_t)new_m, new_offset);
-
-       pmap_clear_modify(m->phys_addr);
-       vm_object_paging_begin(old_object);
+               m->object, m->offset, m, 
+               new_m, new_offset);
 
-       /*
-        *      Record that this page has been written out
-        */
-#if    MACH_PAGEMAP
-       vm_external_state_set(old_object->existence_map, m->offset);
-#endif /*MACH_PAGEMAP*/
+       pmap_clear_modify(m->phys_page);
 
        /*
         * Mark original page as cleaning in place.
@@ -763,64 +636,21 @@ vm_pageclean_setup(
         * the real page.
         */
        assert(new_m->fictitious);
+       assert(new_m->phys_page == vm_page_fictitious_addr);
        new_m->fictitious = FALSE;
        new_m->private = TRUE;
        new_m->pageout = TRUE;
-       new_m->phys_addr = m->phys_addr;
+       new_m->phys_page = m->phys_page;
+
+       vm_page_lockspin_queues();
        vm_page_wire(new_m);
+       vm_page_unlock_queues();
 
        vm_page_insert(new_m, new_object, new_offset);
        assert(!new_m->wanted);
        new_m->busy = FALSE;
 }
 
-void
-vm_pageclean_copy(
-       vm_page_t               m,
-       vm_page_t               new_m,
-       vm_object_t             new_object,
-       vm_object_offset_t      new_offset)
-{
-       XPR(XPR_VM_PAGEOUT,
-       "vm_pageclean_copy, page 0x%X new_m 0x%X new_obj 0x%X offset 0x%X\n",
-               m, new_m, new_object, new_offset, 0);
-
-       assert((!m->busy) && (!m->cleaning));
-
-       assert(!new_m->private && !new_m->fictitious);
-
-       pmap_clear_modify(m->phys_addr);
-
-       m->busy = TRUE;
-       vm_object_paging_begin(m->object);
-       vm_page_unlock_queues();
-       vm_object_unlock(m->object);
-
-       /*
-        * Copy the original page to the new page.
-        */
-       vm_page_copy(m, new_m);
-
-       /*
-        * Mark the old page as clean. A request to pmap_is_modified
-        * will get the right answer.
-        */
-       vm_object_lock(m->object);
-       m->dirty = FALSE;
-
-       vm_object_paging_end(m->object);
-
-       vm_page_lock_queues();
-       if (!m->active && !m->inactive)
-               vm_page_activate(m);
-       PAGE_WAKEUP_DONE(m);
-
-       vm_page_insert(new_m, new_object, new_offset);
-       vm_page_activate(new_m);
-       new_m->busy = FALSE;    /* No other thread can be waiting */
-}
-
-
 /*
  *     Routine:        vm_pageout_initialize_page
  *     Purpose:
@@ -843,16 +673,14 @@ void
 vm_pageout_initialize_page(
        vm_page_t       m)
 {
-       vm_map_copy_t           copy;
-       vm_object_t             new_object;
        vm_object_t             object;
        vm_object_offset_t      paging_offset;
        vm_page_t               holding_page;
-
+       memory_object_t         pager;
 
        XPR(XPR_VM_PAGEOUT,
                "vm_pageout_initialize_page, page 0x%X\n",
-               (integer_t)m, 0, 0, 0, 0);
+               m, 0, 0, 0, 0);
        assert(m->busy);
 
        /*
@@ -867,33 +695,45 @@ vm_pageout_initialize_page(
         */
        object = m->object;
        paging_offset = m->offset + object->paging_offset;
-       vm_object_paging_begin(object);
-       vm_object_unlock(object);
-       if (m->absent || m->error || m->restart ||
-           (!m->dirty && !m->precious)) {
+
+       if (m->absent || m->error || m->restart || (!m->dirty && !m->precious)) {
                VM_PAGE_FREE(m);
                panic("reservation without pageout?"); /* alan */
+               vm_object_unlock(object);
+
+               return;
+       }
+
+       /*
+        * If there's no pager, then we can't clean the page.  This should 
+        * never happen since this should be a copy object and therefore not
+        * an external object, so the pager should always be there.
+        */
+
+       pager = object->pager;
+
+       if (pager == MEMORY_OBJECT_NULL) {
+               VM_PAGE_FREE(m);
+               panic("missing pager for copy object");
                return;
        }
 
        /* set the page for future call to vm_fault_list_request */
+       vm_object_paging_begin(object);
        holding_page = NULL;
-       vm_object_lock(m->object);
-       vm_page_lock_queues();
-       pmap_clear_modify(m->phys_addr);
+
+       pmap_clear_modify(m->phys_page);
        m->dirty = TRUE;
-               m->busy = TRUE;
-               m->list_req_pending = TRUE;
-               m->cleaning = TRUE;
+       m->busy = TRUE;
+       m->list_req_pending = TRUE;
+       m->cleaning = TRUE;
        m->pageout = TRUE;
+
+       vm_page_lockspin_queues();
        vm_page_wire(m);
        vm_page_unlock_queues();
-       vm_object_unlock(m->object);
-       vm_pageout_throttle(m);
-       copy = NULL;
 
-       VM_STAT(pageouts++);
-       /* VM_STAT(pages_pagedout++); */
+       vm_object_unlock(object);
 
        /*
         *      Write the data to its pager.
@@ -903,13 +743,10 @@ vm_pageout_initialize_page(
         *      [The object reference from its allocation is donated
         *      to the eventual recipient.]
         */
-       memory_object_data_initialize(object->pager,
-                                       object->pager_request,
-                                       paging_offset,
-                                       POINTER_T(copy),
-                                       PAGE_SIZE);
+       memory_object_data_initialize(pager, paging_offset, PAGE_SIZE);
 
        vm_object_lock(object);
+       vm_object_paging_end(object);
 }
 
 #if    MACH_CLUSTER_STATS
@@ -921,484 +758,369 @@ struct {
 } cluster_stats[MAXCLUSTERPAGES];
 #endif /* MACH_CLUSTER_STATS */
 
-boolean_t allow_clustered_pageouts = FALSE;
 
 /*
  * vm_pageout_cluster:
  *
- * Given a page, page it out, and attempt to clean adjacent pages
+ * Given a page, queue it to the appropriate I/O thread,
+ * which will page it out and attempt to clean adjacent pages
  * in the same operation.
  *
- * The page must be busy, and the object unlocked w/ paging reference
- * to prevent deallocation or collapse. The page must not be on any
- * pageout queue.
+ * The page must be busy, and the object and queues locked. We will take a
+ * paging reference to prevent deallocation or collapse when we
+ * release the object lock back at the call site.  The I/O thread
+ * is responsible for consuming this reference
+ *
+ * The page must not be on any pageout queue.
  */
+
 void
-vm_pageout_cluster(
-       vm_page_t m)
+vm_pageout_cluster(vm_page_t m)
 {
        vm_object_t     object = m->object;
-       vm_object_offset_t offset = m->offset;  /* from vm_object start */
-       vm_object_offset_t paging_offset = m->offset + object->paging_offset;
-       vm_object_t     new_object;
-       vm_object_offset_t new_offset;
-       vm_size_t       cluster_size;
-       vm_object_offset_t cluster_offset;      /* from memory_object start */
-       vm_object_offset_t cluster_lower_bound; /* from vm_object_start */
-       vm_object_offset_t cluster_upper_bound; /* from vm_object_start */
-       vm_object_offset_t cluster_start, cluster_end;/* from vm_object start */
-       vm_object_offset_t offset_within_cluster;
-       vm_size_t       length_of_data;
-       vm_page_t       friend, holding_page;
-       vm_map_copy_t   copy;
-       kern_return_t   rc;
-       boolean_t       precious_clean = TRUE;
-       int             pages_in_cluster;
-
-       CLUSTER_STAT(int pages_at_higher_offsets = 0;)
-       CLUSTER_STAT(int pages_at_lower_offsets = 0;)
+        struct         vm_pageout_queue *q;
+
 
        XPR(XPR_VM_PAGEOUT,
                "vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n",
-               (integer_t)object, offset, (integer_t)m, 0, 0);
+               object, m->offset, m, 0, 0);
+
+       VM_PAGE_CHECK(m);
 
-       CLUSTER_STAT(vm_pageout_cluster_clusters++;)
        /*
         * Only a certain kind of page is appreciated here.
         */
-       assert(m->busy && (m->dirty || m->precious) && (m->wire_count == 0));
+       assert(m->busy && (m->dirty || m->precious) && (!VM_PAGE_WIRED(m)));
        assert(!m->cleaning && !m->pageout && !m->inactive && !m->active);
+       assert(!m->throttled);
 
-       vm_object_lock(object);
-       cluster_size = object->cluster_size;
-
-       assert(cluster_size >= PAGE_SIZE);
-       if (cluster_size < PAGE_SIZE) cluster_size = PAGE_SIZE;
-       assert(object->pager_created && object->pager_initialized);
-       assert(object->internal || object->pager_ready);
+       /*
+        * protect the object from collapse - 
+        * locking in the object's paging_offset.
+        */
+       vm_object_paging_begin(object);
 
-       if (m->precious && !m->dirty)
-               precious_clean = TRUE;
+       /*
+        * set the page for future call to vm_fault_list_request
+        * page should already be marked busy
+        */
+       vm_page_wire(m);
+       m->list_req_pending = TRUE;
+       m->cleaning = TRUE;
+       m->pageout = TRUE;
 
-       if (!object->pager_trusted || !allow_clustered_pageouts)
-               cluster_size = PAGE_SIZE;
-       vm_object_unlock(object);
+       if (object->internal == TRUE)
+               q = &vm_pageout_queue_internal;
+       else
+               q = &vm_pageout_queue_external;
 
-       cluster_offset = paging_offset & (vm_object_offset_t)(cluster_size - 1);
-                       /* bytes from beginning of cluster */
-       /* 
-        * Due to unaligned mappings, we have to be careful
-        * of negative offsets into the VM object. Clip the cluster 
-        * boundary to the VM object, not the memory object.
+        /* 
+        * pgo_laundry count is tied to the laundry bit
         */
-       if (offset > cluster_offset) {
-               cluster_lower_bound = offset - cluster_offset;
-                                               /* from vm_object */
-       } else {
-               cluster_lower_bound = 0;
+        m->laundry = TRUE;
+       q->pgo_laundry++;
+
+       m->pageout_queue = TRUE;
+       queue_enter(&q->pgo_pending, m, vm_page_t, pageq);
+       
+       if (q->pgo_idle == TRUE) {
+               q->pgo_idle = FALSE;
+               thread_wakeup((event_t) &q->pgo_pending);
        }
-       cluster_upper_bound = (offset - cluster_offset) + 
-                               (vm_object_offset_t)cluster_size;
 
-       /* set the page for future call to vm_fault_list_request */
-       holding_page = NULL;
-       vm_object_lock(m->object);
-       vm_page_lock_queues();
-               m->busy = TRUE;
-               m->list_req_pending = TRUE;
-               m->cleaning = TRUE;
-       m->pageout = TRUE;
-       vm_page_wire(m);
-       vm_page_unlock_queues();
-       vm_object_unlock(m->object);
-       vm_pageout_throttle(m);
+       VM_PAGE_CHECK(m);
+}
 
-       /*
-        * Search backward for adjacent eligible pages to clean in 
-        * this operation.
-        */
 
-       cluster_start = offset;
-       if (offset) {   /* avoid wrap-around at zero */
-           for (cluster_start = offset - PAGE_SIZE_64;
-               cluster_start >= cluster_lower_bound;
-               cluster_start -= PAGE_SIZE_64) {
-               assert(cluster_size > PAGE_SIZE);
+unsigned long vm_pageout_throttle_up_count = 0;
 
-               vm_object_lock(object);
-               vm_page_lock_queues();
+/*
+ * A page is back from laundry or we are stealing it back from 
+ * the laundering state.  See if there are some pages waiting to
+ * go to laundry and if we can let some of them go now.
+ *
+ * Object and page queues must be locked.
+ */
+void
+vm_pageout_throttle_up(
+       vm_page_t       m)
+{
+        struct vm_pageout_queue *q;
 
-               if ((friend = vm_pageout_cluster_page(object, cluster_start,
-                               precious_clean)) == VM_PAGE_NULL) {
-                       vm_page_unlock_queues();
-                       vm_object_unlock(object);
-                       break;
-               }
-               new_offset = (cluster_start + object->paging_offset)
-                               & (cluster_size - 1);
-
-               assert(new_offset < cluster_offset);
-                       m->list_req_pending = TRUE;
-                       m->cleaning = TRUE;
-/* do nothing except advance the write request, all we really need to */
-/* do is push the target page and let the code at the other end decide */
-/* what is really the right size */
-               if (vm_page_free_count <= vm_page_free_reserved) {
-                               m->busy = TRUE;
-                       m->pageout = TRUE;
-                       vm_page_wire(m);
-               }
+       assert(m->object != VM_OBJECT_NULL);
+       assert(m->object != kernel_object);
 
-               vm_page_unlock_queues();
-               vm_object_unlock(object);
-               if(m->dirty || m->object->internal) {
-                       CLUSTER_STAT(pages_at_lower_offsets++;)
-               }
+       vm_pageout_throttle_up_count++;
 
-           }
-           cluster_start += PAGE_SIZE_64;
-       }
-       assert(cluster_start >= cluster_lower_bound);
-       assert(cluster_start <= offset);
-       /*
-        * Search forward for adjacent eligible pages to clean in 
-        * this operation.
-        */
-       for (cluster_end = offset + PAGE_SIZE_64;
-               cluster_end < cluster_upper_bound;
-               cluster_end += PAGE_SIZE_64) {
-               assert(cluster_size > PAGE_SIZE);
+       if (m->object->internal == TRUE)
+               q = &vm_pageout_queue_internal;
+       else
+               q = &vm_pageout_queue_external;
 
-               vm_object_lock(object);
-               vm_page_lock_queues();
+       if (m->pageout_queue == TRUE) {
 
-               if ((friend = vm_pageout_cluster_page(object, cluster_end,
-                               precious_clean)) == VM_PAGE_NULL) {
-                       vm_page_unlock_queues();
-                       vm_object_unlock(object);
-                       break;
-               }
-               new_offset = (cluster_end + object->paging_offset)
-                               & (cluster_size - 1);
-
-               assert(new_offset < cluster_size);
-                       m->list_req_pending = TRUE;
-                       m->cleaning = TRUE;
-/* do nothing except advance the write request, all we really need to */
-/* do is push the target page and let the code at the other end decide */
-/* what is really the right size */
-               if (vm_page_free_count <= vm_page_free_reserved) {
-                               m->busy = TRUE;
-                       m->pageout = TRUE;
-                       vm_page_wire(m);
-               }
+               queue_remove(&q->pgo_pending, m, vm_page_t, pageq);
+               m->pageout_queue = FALSE;
 
-               vm_page_unlock_queues();
-               vm_object_unlock(object);
-               
-               if(m->dirty || m->object->internal) {
-                       CLUSTER_STAT(pages_at_higher_offsets++;)
-               }
-       }
-       assert(cluster_end <= cluster_upper_bound);
-       assert(cluster_end >= offset + PAGE_SIZE);
+               m->pageq.next = NULL;
+               m->pageq.prev = NULL;
 
-       /*
-        * (offset - cluster_offset) is beginning of cluster_object
-        * relative to vm_object start.
-        */
-       offset_within_cluster = cluster_start - (offset - cluster_offset);
-       length_of_data = cluster_end - cluster_start;
+               vm_object_paging_end(m->object);
+       }
+       if (m->laundry == TRUE) {
+               m->laundry = FALSE;
+               q->pgo_laundry--;
 
-       assert(offset_within_cluster < cluster_size);
-       assert((offset_within_cluster + length_of_data) <= cluster_size);
+               if (q->pgo_throttled == TRUE) {
+                       q->pgo_throttled = FALSE;
+                       thread_wakeup((event_t) &q->pgo_laundry);
+               }
+               if (q->pgo_draining == TRUE && q->pgo_laundry == 0) {
+                       q->pgo_draining = FALSE;
+                       thread_wakeup((event_t) (&q->pgo_laundry+1));
+               }
+       }
+}
 
-       rc = KERN_SUCCESS;
-       assert(rc == KERN_SUCCESS);
 
-       pages_in_cluster = length_of_data/PAGE_SIZE;
-       if(m->dirty || m->object->internal) {
-               VM_STAT(pageouts++);
-       }
      /* VM_STAT(pages_pagedout += pages_in_cluster); */
+/*
+ *     vm_pageout_scan does the dirty work for the pageout daemon.
+ *     It returns with vm_page_queue_free_lock held and
+ *     vm_page_free_wanted == 0.
+ */
 
-#if    MACH_CLUSTER_STATS
-       (cluster_stats[pages_at_lower_offsets].pages_at_lower_offsets)++;
-       (cluster_stats[pages_at_higher_offsets].pages_at_higher_offsets)++;
-       (cluster_stats[pages_in_cluster].pages_in_cluster)++;
-#endif /* MACH_CLUSTER_STATS */
+#define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT  (3 * MAX_UPL_TRANSFER)
 
-       /*
-        * Send the data to the pager.
-        */
-       paging_offset = cluster_start + object->paging_offset;
-#ifdef MACH_BSD
-       if(((rpc_subsystem_t)pager_mux_hash_lookup(object->pager)) ==
-       ((rpc_subsystem_t) &vnode_pager_workaround)) {
-        rc = vnode_pager_data_return(object->pager,
-                                      object->pager_request,
-                                      paging_offset,
-                                      POINTER_T(copy),
-                                      length_of_data,
-                                      !precious_clean,
-                                      FALSE);
-       } else {
-                       rc = memory_object_data_return(object->pager,
-                                      object->pager_request,
-                                      paging_offset,
-                                      POINTER_T(copy),
-                                      length_of_data,
-                                      !precious_clean,
-                                      FALSE);
-       }
-#else
-       rc = memory_object_data_return(object->pager,
-                                      object->pager_request,
-                                      paging_offset,
-                                      POINTER_T(copy),
-                                      length_of_data,
-                                      !precious_clean,
-                                      FALSE);
-#endif
-       vm_object_lock(object);
-       vm_object_paging_end(object);
+#define        FCS_IDLE                0
+#define FCS_DELAYED            1
+#define FCS_DEADLOCK_DETECTED  2
 
-       if (holding_page) {
-               assert(!object->pager_trusted);
-               VM_PAGE_FREE(holding_page);
-               vm_object_paging_end(object);
-       }
+struct flow_control {
+        int            state;
+        mach_timespec_t        ts;
+};
 
-       vm_object_unlock(object);
-}
 
 /*
- *     vm_pageout_return_write_pages
- *     Recover pages from an aborted write attempt
+ * VM memory pressure monitoring.
+ *
+ * vm_pageout_scan() keeps track of the number of pages it considers and
+ * reclaims, in the currently active vm_pageout_stat[vm_pageout_stat_now].
  *
+ * compute_memory_pressure() is called every second from compute_averages()
+ * and moves "vm_pageout_stat_now" forward, to start accumulating the number
+ * of recalimed pages in a new vm_pageout_stat[] bucket.
+ *
+ * mach_vm_pressure_monitor() collects past statistics about memory pressure.
+ * The caller provides the number of seconds ("nsecs") worth of statistics
+ * it wants, up to 30 seconds.
+ * It computes the number of pages reclaimed in the past "nsecs" seconds and
+ * also returns the number of pages the system still needs to reclaim at this
+ * moment in time.
  */
+#define VM_PAGEOUT_STAT_SIZE   31
+struct vm_pageout_stat {
+       unsigned int considered;
+       unsigned int reclaimed;
+} vm_pageout_stats[VM_PAGEOUT_STAT_SIZE] = {{0,0}, };
+unsigned int vm_pageout_stat_now = 0;
+unsigned int vm_memory_pressure = 0;
+
+#define VM_PAGEOUT_STAT_BEFORE(i) \
+       (((i) == 0) ? VM_PAGEOUT_STAT_SIZE - 1 : (i) - 1)
+#define VM_PAGEOUT_STAT_AFTER(i) \
+       (((i) == VM_PAGEOUT_STAT_SIZE - 1) ? 0 : (i) + 1)
 
-vm_pageout_return_write_pages(
-       ipc_port_t              control_port,  
-       vm_object_offset_t      object_offset, 
-       vm_map_copy_t           copy)
+/*
+ * Called from compute_averages().
+ */
+void
+compute_memory_pressure(
+       __unused void *arg)
 {
-       vm_object_t     object;
-       int             offset;
-       int             size;
-       int             shadow_offset;
-       int             copy_offset;
-       int             j;
-       vm_page_t       m;
+       unsigned int vm_pageout_next;
+
+       vm_memory_pressure =
+               vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].reclaimed;
+
+       commpage_set_memory_pressure( vm_memory_pressure );
 
+       /* move "now" forward */
+       vm_pageout_next = VM_PAGEOUT_STAT_AFTER(vm_pageout_stat_now);
+       vm_pageout_stats[vm_pageout_next].considered = 0;
+       vm_pageout_stats[vm_pageout_next].reclaimed = 0;
+       vm_pageout_stat_now = vm_pageout_next;
+}
 
-       object = copy->cpy_object;
-       copy_offset = copy->offset;
-       size = copy->size;
+unsigned int
+mach_vm_ctl_page_free_wanted(void)
+{
+       unsigned int page_free_target, page_free_count, page_free_wanted;
 
-       if((copy->type != VM_MAP_COPY_OBJECT) || (object->shadow == 0)) {
-               object = (vm_object_t)control_port->ip_kobject;
-               shadow_offset = (object_offset - object->paging_offset)
-                                                                - copy->offset;
+       page_free_target = vm_page_free_target;
+       page_free_count = vm_page_free_count;
+       if (page_free_target > page_free_count) {
+               page_free_wanted = page_free_target - page_free_count;
        } else {
-               /* get the offset from the copy object */
-               shadow_offset = object->shadow_offset;
-               /* find the backing object */
-               object = object->shadow;
+               page_free_wanted = 0;
        }
-       vm_object_lock(object);
 
-       for(offset = 0, j=0; offset < size; offset+=page_size, j++) {
-               m = vm_page_lookup(object, 
-                               offset + shadow_offset + copy_offset);
-               if((m == VM_PAGE_NULL) || m->fictitious) {
+       return page_free_wanted;
+}
 
-                       vm_page_t       p;
-                       int             i;
-                       vm_object_t     copy_object;
+kern_return_t
+mach_vm_pressure_monitor(
+       boolean_t       wait_for_pressure,
+       unsigned int    nsecs_monitored,
+       unsigned int    *pages_reclaimed_p,
+       unsigned int    *pages_wanted_p)
+{
+       wait_result_t   wr;
+       unsigned int    vm_pageout_then, vm_pageout_now;
+       unsigned int    pages_reclaimed;
 
-                       /* m might be fictitious if the original page */
-                       /* was found to be in limbo at the time of    */
-                       /* vm_pageout_setup                           */
+       /*
+        * We don't take the vm_page_queue_lock here because we don't want
+        * vm_pressure_monitor() to get in the way of the vm_pageout_scan()
+        * thread when it's trying to reclaim memory.  We don't need fully
+        * accurate monitoring anyway...
+        */
 
-                       if((m != VM_PAGE_NULL) && m->fictitious) {
-                               m->cleaning = FALSE;
-                               vm_page_remove(m);
-                               /* if object is not pager trusted then       */
-                               /* this fictitious page will be removed      */
-                               /* as the holding page in vm_pageout_cluster */
-                               if (object->pager_trusted)
-                                       vm_page_free(m);
-                               if(vm_page_laundry_count)
-                                       vm_page_laundry_count--;
-                               if (vm_page_laundry_count 
-                                       < vm_page_laundry_min) {
-                                       vm_page_laundry_min = 0;
-                                       thread_wakeup((event_t) 
-                                               &vm_page_laundry_count);
-                               }
-                       }
-                       else if ((object->pager_trusted) &&
-                                       (copy->type == VM_MAP_COPY_OBJECT)) {
-                               vm_object_paging_end(object);
+       if (wait_for_pressure) {
+               /* wait until there's memory pressure */
+               while (vm_page_free_count >= vm_page_free_target) {
+                       wr = assert_wait((event_t) &vm_page_free_wanted,
+                                        THREAD_INTERRUPTIBLE);
+                       if (wr == THREAD_WAITING) {
+                               wr = thread_block(THREAD_CONTINUE_NULL);
                        }
-
-                       copy_object = copy->cpy_object;
-
-                       if(copy->type == VM_MAP_COPY_OBJECT) {
-                               p = (vm_page_t) queue_first(&copy_object->memq);
-
-                               for(i = 0; 
-                                       i < copy_object->resident_page_count; 
-                                       i++) {
-                                       if(p->offset == (offset + copy_offset))
-                                               break;
-                                       p = (vm_page_t) queue_next(&p->listq);
-                               }
-
-                               vm_page_remove(p);
-                       } else {
-                               p = copy->cpy_page_list[j];
-                               copy->cpy_page_list[j] = 0;
-                               p->gobbled = FALSE;
+                       if (wr == THREAD_INTERRUPTED) {
+                               return KERN_ABORTED;
                        }
-
-                       vm_page_insert(p, object, 
-                               offset + shadow_offset + copy_offset);
-                       p->busy = TRUE;
-                       p->dirty = TRUE;
-                       p->laundry = FALSE;
-                       if (p->pageout) {
-                               p->pageout = FALSE;  /*dont throw away target*/
-                               vm_page_unwire(p);/* reactivates */
+                       if (wr == THREAD_AWAKENED) {
+                               /*
+                                * The memory pressure might have already
+                                * been relieved but let's not block again
+                                * and let's report that there was memory
+                                * pressure at some point.
+                                */
+                               break;
                        }
-               } else if(m->pageout) {
-                       m->pageout = FALSE;  /* dont throw away target pages */
-                       vm_page_unwire(m);/* reactivates */
                }
        }
 
-       vm_object_unlock(object);
-       vm_map_copy_discard(copy);
-       vm_object_lock(object);
-
-       for(offset = 0; offset < size; offset+=page_size) { 
-               m = vm_page_lookup(object,
-                               offset + shadow_offset + copy_offset);
-               m->dirty = TRUE;  /* we'll send the pages home later */
-               m->busy = FALSE;  /* allow system access again */
+       /* provide the number of pages the system wants to reclaim */
+       if (pages_wanted_p != NULL) {
+               *pages_wanted_p = mach_vm_ctl_page_free_wanted();
        }
 
-       vm_object_unlock(object);
-}
-
-/*
- *     Trusted pager throttle.
- *     Object must be unlocked, page queues must be unlocked.
- */
-void
-vm_pageout_throttle(
-       register vm_page_t m)
-{
-       vm_page_lock_queues();
-       assert(!m->laundry);
-       m->laundry = TRUE;
-       while (vm_page_laundry_count >= vm_page_laundry_max) {
-               /*
-                * Set the threshold for when vm_page_free()
-                * should wake us up.
-                */
-               vm_page_laundry_min = vm_page_laundry_max/2;
-               assert_wait((event_t) &vm_page_laundry_count, THREAD_UNINT);
-               vm_page_unlock_queues();
-
-               /*
-                * Pause to let the default pager catch up.
-                */
-               thread_block((void (*)(void)) 0);
-               vm_page_lock_queues();
+       if (pages_reclaimed_p == NULL) {
+               return KERN_SUCCESS;
        }
-       vm_page_laundry_count++;
-       vm_page_unlock_queues();
-}
-
-/*
- * The global variable vm_pageout_clean_active_pages controls whether
- * active pages are considered valid to be cleaned in place during a
- * clustered pageout. Performance measurements are necessary to determine
- * the best policy.
- */
-int vm_pageout_clean_active_pages = 1;
-/*
- * vm_pageout_cluster_page: [Internal]
- *
- * return a vm_page_t to the page at (object,offset) if it is appropriate
- * to clean in place. Pages that are non-existent, busy, absent, already
- * cleaning, or not dirty are not eligible to be cleaned as an adjacent
- * page in a cluster.
- *
- * The object must be locked on entry, and remains locked throughout
- * this call.
- */
 
-vm_page_t
-vm_pageout_cluster_page(
-       vm_object_t             object,
-       vm_object_offset_t      offset,
-       boolean_t               precious_clean)
-{
-       vm_page_t m;
+       /* provide number of pages reclaimed in the last "nsecs_monitored" */
+       do {
+               vm_pageout_now = vm_pageout_stat_now;
+               pages_reclaimed = 0;
+               for (vm_pageout_then =
+                            VM_PAGEOUT_STAT_BEFORE(vm_pageout_now);
+                    vm_pageout_then != vm_pageout_now &&
+                            nsecs_monitored-- != 0;
+                    vm_pageout_then =
+                            VM_PAGEOUT_STAT_BEFORE(vm_pageout_then)) {
+                       pages_reclaimed += vm_pageout_stats[vm_pageout_then].reclaimed;
+               }
+       } while (vm_pageout_now != vm_pageout_stat_now);
+       *pages_reclaimed_p = pages_reclaimed;
 
-       XPR(XPR_VM_PAGEOUT,
-               "vm_pageout_cluster_page, object 0x%X offset 0x%X\n",
-               (integer_t)object, offset, 0, 0, 0);
+       return KERN_SUCCESS;
+}
 
-       if ((m = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
-               return(VM_PAGE_NULL);
+/* Page States: Used below to maintain the page state
+   before it's removed from it's Q. This saved state
+   helps us do the right accounting in certain cases
+*/
 
-       if (m->busy || m->absent || m->cleaning || 
-           m->prep_pin_count != 0 ||
-           (m->wire_count != 0) || m->error)
-               return(VM_PAGE_NULL);
+#define PAGE_STATE_SPECULATIVE 1
+#define PAGE_STATE_THROTTLED   2
+#define PAGE_STATE_ZEROFILL    3
+#define PAGE_STATE_INACTIVE    4
+
+#define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m)                                \
+       MACRO_BEGIN                                                     \
+       /*                                                              \
+        * If a "reusable" page somehow made it back into               \
+        * the active queue, it's been re-used and is not               \
+        * quite re-usable.                                             \
+        * If the VM object was "all_reusable", consider it             \
+        * as "all re-used" instead of converting it to                 \
+        * "partially re-used", which could be expensive.               \
+        */                                                             \
+       if ((m)->reusable ||                                            \
+           (m)->object->all_reusable) {                                \
+               vm_object_reuse_pages((m)->object,                      \
+                                     (m)->offset,                      \
+                                     (m)->offset + PAGE_SIZE_64,       \
+                                     FALSE);                           \
+       }                                                               \
+       MACRO_END
 
-       if (vm_pageout_clean_active_pages) {
-               if (!m->active && !m->inactive) return(VM_PAGE_NULL);
-       } else {
-               if (!m->inactive) return(VM_PAGE_NULL);
-       }
+void
+vm_pageout_scan(void)
+{
+       unsigned int loop_count = 0;
+       unsigned int inactive_burst_count = 0;
+       unsigned int active_burst_count = 0;
+       unsigned int reactivated_this_call;
+       unsigned int reactivate_limit;
+       vm_page_t   local_freeq = NULL;
+       int         local_freed = 0;
+       int         delayed_unlock;
+       int         refmod_state = 0;
+        int    vm_pageout_deadlock_target = 0;
+       struct  vm_pageout_queue *iq;
+       struct  vm_pageout_queue *eq;
+        struct vm_speculative_age_q *sq;
+       struct  flow_control    flow_control = { 0, { 0, 0 } };
+        boolean_t inactive_throttled = FALSE;
+       boolean_t try_failed;
+       mach_timespec_t         ts;
+       unsigned int msecs = 0;
+       vm_object_t     object;
+       vm_object_t     last_object_tried;
+#if defined(__ppc__) /* On ppc, vm statistics are still 32-bit */
+       unsigned int    zf_ratio;
+       unsigned int    zf_run_count;
+#else
+       uint64_t        zf_ratio;
+       uint64_t        zf_run_count;
+#endif
+       uint32_t        catch_up_count = 0;
+       uint32_t        inactive_reclaim_run;
+       boolean_t       forced_reclaim;
+       int             page_prev_state = 0;
 
-       assert(!m->private);
-       assert(!m->fictitious);
+       flow_control.state = FCS_IDLE;
+       iq = &vm_pageout_queue_internal;
+       eq = &vm_pageout_queue_external;
+       sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
 
-       if (!m->dirty) m->dirty = pmap_is_modified(m->phys_addr);
 
-       if (precious_clean) {
-               if (!m->precious || !m->dirty)
-                       return(VM_PAGE_NULL);
-       } else {
-               if (!m->dirty)
-                       return(VM_PAGE_NULL);
-       }
-       return(m);
-}
+        XPR(XPR_VM_PAGEOUT, "vm_pageout_scan\n", 0, 0, 0, 0, 0);
 
-/*
- *     vm_pageout_scan does the dirty work for the pageout daemon.
- *     It returns with vm_page_queue_free_lock held and
- *     vm_page_free_wanted == 0.
- */
-extern void vm_pageout_scan_continue(void);    /* forward; */
+        
+       vm_page_lock_queues();
+       delayed_unlock = 1;     /* must be nonzero if Qs are locked, 0 if unlocked */
 
-void
-vm_pageout_scan(void)
-{
-       unsigned int burst_count;
-       boolean_t now = FALSE;
-       unsigned int laundry_pages;
-       boolean_t need_more_inactive_pages;
-       unsigned int    loop_detect;
+       /*
+        *      Calculate the max number of referenced pages on the inactive
+        *      queue that we will reactivate.
+        */
+       reactivated_this_call = 0;
+       reactivate_limit = VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count +
+                                                   vm_page_inactive_count);
+       inactive_reclaim_run = 0;
 
-        XPR(XPR_VM_PAGEOUT, "vm_pageout_scan\n", 0, 0, 0, 0, 0);
 
 /*???*/        /*
         *      We want to gradually dribble pages from the active queue
@@ -1423,144 +1145,170 @@ vm_pageout_scan(void)
         *      When memory is very tight, we can't rely on external pagers to
         *      clean pages.  They probably aren't running, because they
         *      aren't vm-privileged.  If we kept sending dirty pages to them,
-        *      we could exhaust the free list.  However, we can't just ignore
-        *      pages belonging to external objects, because there might be no
-        *      pages belonging to internal objects.  Hence, we get the page
-        *      into an internal object and then immediately double-page it,
-        *      sending it to the default pager.
-        *
-        *      consider_zone_gc should be last, because the other operations
-        *      might return memory to zones.
+        *      we could exhaust the free list.
         */
 
-    Restart:
-
-       mutex_lock(&vm_page_queue_free_lock);
-       now = (vm_page_free_count < vm_page_free_min);
-       mutex_unlock(&vm_page_queue_free_lock);
-#if    THREAD_SWAPPER
-       swapout_threads(now);
-#endif /* THREAD_SWAPPER */
 
-       stack_collect();
-       consider_task_collect();
-       consider_thread_collect();
-       cleanup_limbo_queue();
-       consider_zone_gc();
-       consider_machine_collect();
-
-       loop_detect = vm_page_active_count + vm_page_inactive_count;
-#if 0
-       if (vm_page_free_count <= vm_page_free_reserved) {
-               need_more_inactive_pages = TRUE;
-       } else {
-               need_more_inactive_pages = FALSE;
-       }
+Restart:
+       assert(delayed_unlock!=0);
+       
+       /*
+        *      A page is "zero-filled" if it was not paged in from somewhere,
+        *      and it belongs to an object at least VM_ZF_OBJECT_SIZE_THRESHOLD big.
+        *      Recalculate the zero-filled page ratio.  We use this to apportion
+        *      victimized pages between the normal and zero-filled inactive
+        *      queues according to their relative abundance in memory.  Thus if a task
+        *      is flooding memory with zf pages, we begin to hunt them down.
+        *      It would be better to throttle greedy tasks at a higher level,
+        *      but at the moment mach vm cannot do this.
+        */
+       {
+#if defined(__ppc__) /* On ppc, vm statistics are still 32-bit */
+               uint32_t  total  = vm_page_active_count + vm_page_inactive_count;
+               uint32_t  normal = total - vm_zf_count;
 #else
-       need_more_inactive_pages = FALSE;
+               uint64_t  total  = vm_page_active_count + vm_page_inactive_count;
+               uint64_t  normal = total - vm_zf_count;
 #endif
 
-       for (burst_count = 0;;) {
-               register vm_page_t m;
-               register vm_object_t object;
-               unsigned int free_count;
+               /* zf_ratio is the number of zf pages we victimize per normal page */
+               
+               if (vm_zf_count < vm_accellerate_zf_pageout_trigger)
+                       zf_ratio = 0;
+               else if ((vm_zf_count <= normal) || (normal == 0))
+                       zf_ratio = 1;
+               else 
+                       zf_ratio = vm_zf_count / normal;
+                       
+               zf_run_count = 0;
+       }
+        
+       /*
+        *      Recalculate vm_page_inactivate_target.
+        */
+       vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
+                                                         vm_page_inactive_count +
+                                                         vm_page_speculative_count);
+       /*
+        * don't want to wake the pageout_scan thread up everytime we fall below
+        * the targets... set a low water mark at 0.25% below the target
+        */
+       vm_page_inactive_min = vm_page_inactive_target - (vm_page_inactive_target / 400);
+
+       vm_page_speculative_target = VM_PAGE_SPECULATIVE_TARGET(vm_page_active_count +
+                                                               vm_page_inactive_count);
+       object = NULL;
+       last_object_tried = NULL;
+       try_failed = FALSE;
+       
+       if ((vm_page_inactive_count + vm_page_speculative_count) < VM_PAGE_INACTIVE_HEALTHY_LIMIT(vm_page_active_count))
+               catch_up_count = vm_page_inactive_count + vm_page_speculative_count;
+       else
+               catch_up_count = 0;
+                   
+       for (;;) {
+               vm_page_t m;
+
+               DTRACE_VM2(rev, int, 1, (uint64_t *), NULL);
+
+               if (delayed_unlock == 0) {
+                       vm_page_lock_queues();
+                       delayed_unlock = 1;
+               }
 
                /*
-                *      Recalculate vm_page_inactivate_target.
+                *      Don't sweep through active queue more than the throttle
+                *      which should be kept relatively low
                 */
-
-               vm_page_lock_queues();
-               vm_page_inactive_target =
-                       VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
-                                               vm_page_inactive_count);
+               active_burst_count = MIN(vm_pageout_burst_active_throttle,
+                                        vm_page_active_count);
 
                /*
                 *      Move pages from active to inactive.
                 */
+               if ((vm_page_inactive_count + vm_page_speculative_count) >= vm_page_inactive_target)
+                       goto done_moving_active_pages;
 
-               while ((vm_page_inactive_count < vm_page_inactive_target ||
-                       need_more_inactive_pages) &&
-                      !queue_empty(&vm_page_queue_active)) {
-                       register vm_object_t object;
+               while (!queue_empty(&vm_page_queue_active) && active_burst_count) {
+
+                       if (active_burst_count)
+                              active_burst_count--;
 
                        vm_pageout_active++;
+
                        m = (vm_page_t) queue_first(&vm_page_queue_active);
 
+                       assert(m->active && !m->inactive);
+                       assert(!m->laundry);
+                       assert(m->object != kernel_object);
+                       assert(m->phys_page != vm_page_guard_addr);
+
+                       DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
+
                        /*
-                        * If we're getting really low on memory,
-                        * try selecting a page that will go 
-                        * directly to the default_pager.
-                        * If there are no such pages, we have to
-                        * page out a page backed by an EMM,
-                        * so that the default_pager can recover
-                        * it eventually.
+                        * Try to lock object; since we've already got the
+                        * page queues lock, we can only 'try' for this one.
+                        * if the 'try' fails, we need to do a mutex_pause
+                        * to allow the owner of the object lock a chance to
+                        * run... otherwise, we're likely to trip over this
+                        * object in the same state as we work our way through
+                        * the queue... clumps of pages associated with the same
+                        * object are fairly typical on the inactive and active queues
                         */
-                       if (need_more_inactive_pages && 
-                               (IP_VALID(memory_manager_default))) {
-                               vm_pageout_scan_active_emm_throttle++;
-                               do {
-                                       assert(m->active && !m->inactive);
-                                       object = m->object;
-
-                                       if (vm_object_lock_try(object)) {
-#if 0
-                                               if (object->pager_trusted ||
-                                                   object->internal) {
-                                                       /* found one ! */
-                                                       vm_pageout_scan_active_emm_throttle_success++;
-                                                       goto object_locked_active;
-                                               }
-#else
-                                       vm_pageout_scan_active_emm_throttle_success++;
-                                                       goto object_locked_active;
-#endif
-                                               vm_object_unlock(object);
-                                       }
-                                       m = (vm_page_t) queue_next(&m->pageq);
-                               } while (!queue_end(&vm_page_queue_active,
-                                                   (queue_entry_t) m));
-                               if (queue_end(&vm_page_queue_active,
-                                             (queue_entry_t) m)) {
-                                       vm_pageout_scan_active_emm_throttle_failure++;
-                                       m = (vm_page_t)
-                                               queue_first(&vm_page_queue_active);
+                       if (m->object != object) {
+                               if (object != NULL) {
+                                       vm_object_unlock(object);
+                                       object = NULL;
+                                       vm_pageout_scan_wants_object = VM_OBJECT_NULL;
                                }
-                       }
-
-                       assert(m->active && !m->inactive);
+                               if (!vm_object_lock_try_scan(m->object)) {
+                                       /*
+                                        * move page to end of active queue and continue
+                                        */
+                                       queue_remove(&vm_page_queue_active, m,
+                                                    vm_page_t, pageq);
+                                       queue_enter(&vm_page_queue_active, m,
+                                                   vm_page_t, pageq);
+
+                                       try_failed = TRUE;
+                                       
+                                       m = (vm_page_t) queue_first(&vm_page_queue_active);
+                                       /*
+                                        * this is the next object we're going to be interested in
+                                        * try to make sure it's available after the mutex_yield
+                                        * returns control
+                                        */
+                                       vm_pageout_scan_wants_object = m->object;
 
-                       object = m->object;
-                       if (!vm_object_lock_try(object)) {
-                               /*
-                                *      Move page to end and continue.
-                                */
+                                       goto done_with_activepage;
+                               }
+                               object = m->object;
 
-                               queue_remove(&vm_page_queue_active, m,
-                                            vm_page_t, pageq);
-                               queue_enter(&vm_page_queue_active, m,
-                                           vm_page_t, pageq);
-                               vm_page_unlock_queues();
-                               mutex_pause();
-                               vm_page_lock_queues();
-                               continue;
+                               try_failed = FALSE;
                        }
 
-                   object_locked_active:
                        /*
-                        *      If the page is busy, then we pull it
-                        *      off the active queue and leave it alone.
+                        * if the page is BUSY, then we pull it
+                        * off the active queue and leave it alone.
+                        * when BUSY is cleared, it will get stuck
+                        * back on the appropriate queue
                         */
-
                        if (m->busy) {
-                               vm_object_unlock(object);
                                queue_remove(&vm_page_queue_active, m,
                                             vm_page_t, pageq);
-                               m->active = FALSE;
+                               m->pageq.next = NULL;
+                               m->pageq.prev = NULL;
+
                                if (!m->fictitious)
                                        vm_page_active_count--;
-                               continue;
+                               m->active = FALSE;
+
+                               goto done_with_activepage;
                        }
 
+                       /* deal with a rogue "reusable" page */
+                       VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m);
+
                        /*
                         *      Deactivate the page while holding the object
                         *      locked, so we know the page is still not busy.
@@ -1569,191 +1317,605 @@ vm_pageout_scan(void)
                         *      absent or fictitious, but vm_page_deactivate
                         *      can handle that.
                         */
-
                        vm_page_deactivate(m);
-                       vm_object_unlock(object);
-               }
 
-               /*
-                *      We are done if we have met our target *and*
-                *      nobody is still waiting for a page.
-                */
+done_with_activepage:
+                       if (delayed_unlock++ > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT || try_failed == TRUE) {
 
-               mutex_lock(&vm_page_queue_free_lock);
-               free_count = vm_page_free_count;
-               if ((free_count >= vm_page_free_target) &&
-                   (vm_page_free_wanted == 0)) {
-                       vm_page_unlock_queues();
-                       break;
+                               if (object != NULL) {
+                                       vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+                                       vm_object_unlock(object);
+                                       object = NULL;
+                               }
+                               if (local_freeq) {
+                                       vm_page_unlock_queues();
+                                       vm_page_free_list(local_freeq, TRUE);
+                                       
+                                       local_freeq = NULL;
+                                       local_freed = 0;
+                                       vm_page_lock_queues();
+                               } else
+                                       lck_mtx_yield(&vm_page_queue_lock);
+
+                               delayed_unlock = 1;
+
+                               /*
+                                * continue the while loop processing
+                                * the active queue... need to hold
+                                * the page queues lock
+                                */
+                       }
                }
-               mutex_unlock(&vm_page_queue_free_lock);
+
+
+
+               /**********************************************************************
+                * above this point we're playing with the active queue
+                * below this point we're playing with the throttling mechanisms
+                * and the inactive queue
+                **********************************************************************/
+
+done_moving_active_pages:
 
                /*
-                * Sometimes we have to pause:
-                *      1) No inactive pages - nothing to do.
-                *      2) Flow control - wait for untrusted pagers to catch up.
+                *      We are done if we have met our target *and*
+                *      nobody is still waiting for a page.
                 */
+               if (vm_page_free_count + local_freed >= vm_page_free_target) {
+                       if (object != NULL) {
+                               vm_object_unlock(object);
+                               object = NULL;
+                       }
+                       vm_pageout_scan_wants_object = VM_OBJECT_NULL;
 
-               if (queue_empty(&vm_page_queue_inactive) ||
-                   ((--loop_detect) == 0)      ||
-                   (burst_count >= vm_pageout_burst_max)) {
-                       unsigned int pages, msecs;
-                       int wait_result;
-
-                       consider_machine_adjust();
+                       if (local_freeq) {
+                               vm_page_unlock_queues();
+                               vm_page_free_list(local_freeq, TRUE);
+                                       
+                               local_freeq = NULL;
+                               local_freed = 0;
+                               vm_page_lock_queues();
+                       }
                        /*
-                        *      vm_pageout_burst_wait is msecs/page.
-                        *      If there is nothing for us to do, we wait
-                        *      at least vm_pageout_empty_wait msecs.
+                        * inactive target still not met... keep going
+                        * until we get the queues balanced
                         */
-                       pages = burst_count;
-       
-                       if (loop_detect == 0) {
-                               printf("Warning: No physical memory suitable for pageout or reclaim, pageout thread temporarily going to sleep\n");
-                               msecs = vm_free_page_pause;
-                       }
-                       else {
-                               msecs = burst_count * vm_pageout_burst_wait;
-                       }
-
-                       if (queue_empty(&vm_page_queue_inactive) &&
-                           (msecs < vm_pageout_empty_wait))
-                               msecs = vm_pageout_empty_wait;
-                       vm_page_unlock_queues();
-                       assert_wait_timeout(msecs, THREAD_INTERRUPTIBLE);
-                       counter(c_vm_pageout_scan_block++);
 
                        /*
-                        *      Unfortunately, we don't have call_continuation
-                        *      so we can't rely on tail-recursion.
+                        *      Recalculate vm_page_inactivate_target.
                         */
-                       wait_result = thread_block((void (*)(void)) 0);
-                       if (wait_result != THREAD_TIMED_OUT)
-                               thread_cancel_timer();
-                       vm_pageout_scan_continue();
-                       goto Restart;
-                       /*NOTREACHED*/
-               }
+                       vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
+                                                                         vm_page_inactive_count +
+                                                                         vm_page_speculative_count);
 
-               vm_pageout_inactive++;
-               m = (vm_page_t) queue_first(&vm_page_queue_inactive);
-
-               if ((vm_page_free_count <= vm_page_free_reserved) && 
-                               (IP_VALID(memory_manager_default))) {
+#ifndef        CONFIG_EMBEDDED
                        /*
-                        * We're really low on memory. Try to select a page that
-                        * would go directly to the default_pager.
-                        * If there are no such pages, we have to page out a 
-                        * page backed by an EMM, so that the default_pager
-                        * can recover it eventually.
+                        * XXX: if no active pages can be reclaimed, pageout scan can be stuck trying 
+                        *      to balance the queues
                         */
-                       vm_pageout_scan_inactive_emm_throttle++;
-                       do {
-                               assert(!m->active && m->inactive);
-                               object = m->object;
+                       if (((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) &&
+                           !queue_empty(&vm_page_queue_active))
+                               continue;
+#endif
 
-                               if (vm_object_lock_try(object)) {
-#if 0
-                                       if (object->pager_trusted ||
-                                           object->internal) {
-                                               /* found one ! */
-                                               vm_pageout_scan_inactive_emm_throttle_success++;
-                                               goto object_locked_inactive;
-                                       }
-#else
-                               vm_pageout_scan_inactive_emm_throttle_success++;
-                                               goto object_locked_inactive;
-#endif /* 0 */
-                                       vm_object_unlock(object);
-                               }
-                               m = (vm_page_t) queue_next(&m->pageq);
-                       } while (!queue_end(&vm_page_queue_inactive,
-                                           (queue_entry_t) m));
-                       if (queue_end(&vm_page_queue_inactive,
-                                     (queue_entry_t) m)) {
-                               vm_pageout_scan_inactive_emm_throttle_failure++;
+                       lck_mtx_lock(&vm_page_queue_free_lock);
+
+                       if ((vm_page_free_count >= vm_page_free_target) &&
+                           (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
+
+                               vm_page_unlock_queues();
+
+                               thread_wakeup((event_t) &vm_pageout_garbage_collect);
+
+                               assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
+
+                               return;
+                       }
+                       lck_mtx_unlock(&vm_page_queue_free_lock);
+               }
+               
+               /*
+                * Before anything, we check if we have any ripe volatile 
+                * objects around. If so, try to purge the first object.
+                * If the purge fails, fall through to reclaim a page instead.
+                * If the purge succeeds, go back to the top and reevalute
+                * the new memory situation.
+                */
+               assert (available_for_purge>=0);
+               if (available_for_purge)
+               {
+                       if (object != NULL) {
+                               vm_object_unlock(object);
+                               object = NULL;
+                       }
+                       if(TRUE == vm_purgeable_object_purge_one()) {
+                               continue;
+                       }
+               }
+        
+               if (queue_empty(&sq->age_q) && vm_page_speculative_count) {
+                       /*
+                        * try to pull pages from the aging bins
+                        * see vm_page.h for an explanation of how
+                        * this mechanism works
+                        */
+                       struct vm_speculative_age_q     *aq;
+                       mach_timespec_t ts_fully_aged;
+                       boolean_t       can_steal = FALSE;
+                       int num_scanned_queues;
+                      
+                       aq = &vm_page_queue_speculative[speculative_steal_index];
+
+                       num_scanned_queues = 0;
+                       while (queue_empty(&aq->age_q) &&
+                              num_scanned_queues++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
+
+                               speculative_steal_index++;
+
+                               if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q)
+                                       speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
+                               
+                               aq = &vm_page_queue_speculative[speculative_steal_index];
+                       }
+
+                       if (num_scanned_queues ==
+                           VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1) {
                                /*
-                                * We should check the "active" queue
-                                * for good candidates to page out.
+                                * XXX We've scanned all the speculative
+                                * queues but still haven't found one
+                                * that is not empty, even though
+                                * vm_page_speculative_count is not 0.
                                 */
-                               need_more_inactive_pages = TRUE;
+                               /* report the anomaly... */
+                               printf("vm_pageout_scan: "
+                                      "all speculative queues empty "
+                                      "but count=%d.  Re-adjusting.\n",
+                                      vm_page_speculative_count);
+                               if (vm_page_speculative_count >
+                                   vm_page_speculative_count_drift_max)
+                                       vm_page_speculative_count_drift_max = vm_page_speculative_count;
+                               vm_page_speculative_count_drifts++;
+#if 6553678
+                               Debugger("vm_pageout_scan: no speculative pages");
+#endif
+                               /* readjust... */
+                               vm_page_speculative_count = 0;
+                               /* ... and continue */
+                               continue;
+                       }
+
+                       if (vm_page_speculative_count > vm_page_speculative_target)
+                               can_steal = TRUE;
+                       else {
+                               ts_fully_aged.tv_sec = (VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS) / 1000;
+                               ts_fully_aged.tv_nsec = ((VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS) % 1000)
+                                                     * 1000 * NSEC_PER_USEC;
+
+                               ADD_MACH_TIMESPEC(&ts_fully_aged, &aq->age_ts);
 
-                               m = (vm_page_t)
-                                       queue_first(&vm_page_queue_inactive);
+                               clock_sec_t sec;
+                               clock_nsec_t nsec;
+                               clock_get_system_nanotime(&sec, &nsec);
+                               ts.tv_sec = (unsigned int) sec;
+                               ts.tv_nsec = nsec;
+
+                               if (CMP_MACH_TIMESPEC(&ts, &ts_fully_aged) >= 0)
+                                       can_steal = TRUE;
                        }
+                       if (can_steal == TRUE)
+                               vm_page_speculate_ageit(aq);
                }
 
-               assert(!m->active && m->inactive);
-               object = m->object;
-
                /*
-                *      Try to lock object; since we've got the
-                *      page queues lock, we can only try for this one.
+                * Sometimes we have to pause:
+                *      1) No inactive pages - nothing to do.
+                *      2) Flow control - default pageout queue is full
+                *      3) Loop control - no acceptable pages found on the inactive queue
+                *         within the last vm_pageout_burst_inactive_throttle iterations
                 */
+               if (queue_empty(&vm_page_queue_inactive) && queue_empty(&vm_page_queue_zf) && queue_empty(&sq->age_q) &&
+                   (VM_PAGE_Q_THROTTLED(iq) || queue_empty(&vm_page_queue_throttled))) {
+                       vm_pageout_scan_empty_throttle++;
+                       msecs = vm_pageout_empty_wait;
+                       goto vm_pageout_scan_delay;
+
+               } else if (inactive_burst_count >= 
+                          MIN(vm_pageout_burst_inactive_throttle,
+                              (vm_page_inactive_count +
+                               vm_page_speculative_count))) {
+                       vm_pageout_scan_burst_throttle++;
+                       msecs = vm_pageout_burst_wait;
+                       goto vm_pageout_scan_delay;
+
+               } else if (VM_PAGE_Q_THROTTLED(iq) && IP_VALID(memory_manager_default)) {
+                       clock_sec_t sec;
+                       clock_nsec_t nsec;
+
+                       switch (flow_control.state) {
+
+                       case FCS_IDLE:
+reset_deadlock_timer:
+                               ts.tv_sec = vm_pageout_deadlock_wait / 1000;
+                               ts.tv_nsec = (vm_pageout_deadlock_wait % 1000) * 1000 * NSEC_PER_USEC;
+                               clock_get_system_nanotime(&sec, &nsec);
+                               flow_control.ts.tv_sec = (unsigned int) sec;
+                               flow_control.ts.tv_nsec = nsec;
+                               ADD_MACH_TIMESPEC(&flow_control.ts, &ts);
+                               
+                               flow_control.state = FCS_DELAYED;
+                               msecs = vm_pageout_deadlock_wait;
+
+                               break;
+                                       
+                       case FCS_DELAYED:
+                               clock_get_system_nanotime(&sec, &nsec);
+                               ts.tv_sec = (unsigned int) sec;
+                               ts.tv_nsec = nsec;
+
+                               if (CMP_MACH_TIMESPEC(&ts, &flow_control.ts) >= 0) {
+                                       /*
+                                        * the pageout thread for the default pager is potentially
+                                        * deadlocked since the 
+                                        * default pager queue has been throttled for more than the
+                                        * allowable time... we need to move some clean pages or dirty
+                                        * pages belonging to the external pagers if they aren't throttled
+                                        * vm_page_free_wanted represents the number of threads currently
+                                        * blocked waiting for pages... we'll move one page for each of
+                                        * these plus a fixed amount to break the logjam... once we're done
+                                        * moving this number of pages, we'll re-enter the FSC_DELAYED state
+                                        * with a new timeout target since we have no way of knowing 
+                                        * whether we've broken the deadlock except through observation
+                                        * of the queue associated with the default pager... we need to
+                                        * stop moving pages and allow the system to run to see what
+                                        * state it settles into.
+                                        */
+                                       vm_pageout_deadlock_target = vm_pageout_deadlock_relief + vm_page_free_wanted + vm_page_free_wanted_privileged;
+                                       vm_pageout_scan_deadlock_detected++;
+                                       flow_control.state = FCS_DEADLOCK_DETECTED;
+
+                                       thread_wakeup((event_t) &vm_pageout_garbage_collect);
+                                       goto consider_inactive;
+                               }
+                               /*
+                                * just resniff instead of trying
+                                * to compute a new delay time... we're going to be
+                                * awakened immediately upon a laundry completion,
+                                * so we won't wait any longer than necessary
+                                */
+                               msecs = vm_pageout_idle_wait;
+                               break;
+
+                       case FCS_DEADLOCK_DETECTED:
+                               if (vm_pageout_deadlock_target)
+                                       goto consider_inactive;
+                               goto reset_deadlock_timer;
+
+                       }
+                       vm_pageout_scan_throttle++;
+                       iq->pgo_throttled = TRUE;
+vm_pageout_scan_delay:
+                       if (object != NULL) {
+                               vm_object_unlock(object);
+                               object = NULL;
+                       }
+                       vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+
+                       if (local_freeq) {
+                               vm_page_unlock_queues();
+                               vm_page_free_list(local_freeq, TRUE);
+                                       
+                               local_freeq = NULL;
+                               local_freed = 0;
+                               vm_page_lock_queues();
+
+                               if (flow_control.state == FCS_DELAYED &&
+                                   !VM_PAGE_Q_THROTTLED(iq)) {
+                                       flow_control.state = FCS_IDLE;
+                                       vm_pageout_scan_throttle_aborted++;
+                                       goto consider_inactive;
+                               }
+                       }
+#if CONFIG_EMBEDDED
+                       {
+                       int percent_avail;
 
-               if (!vm_object_lock_try(object)) {
                        /*
-                        *      Move page to end and continue.
+                        * Decide if we need to send a memory status notification.
                         */
-                       queue_remove(&vm_page_queue_inactive, m,
-                                    vm_page_t, pageq);
-                       queue_enter(&vm_page_queue_inactive, m,
-                                   vm_page_t, pageq);
+                       percent_avail = 
+                               (vm_page_active_count + vm_page_inactive_count + 
+                                vm_page_speculative_count + vm_page_free_count +
+                                (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
+                               atop_64(max_mem);
+                       if (percent_avail >= (kern_memorystatus_level + 5) || 
+                           percent_avail <= (kern_memorystatus_level - 5)) {
+                               kern_memorystatus_level = percent_avail;
+                               thread_wakeup((event_t)&kern_memorystatus_wakeup);
+                       }
+                       }
+#endif
+                       assert_wait_timeout((event_t) &iq->pgo_laundry, THREAD_INTERRUPTIBLE, msecs, 1000*NSEC_PER_USEC);
+                       counter(c_vm_pageout_scan_block++);
+
                        vm_page_unlock_queues();
-                       mutex_pause();
-                       vm_pageout_inactive_nolock++;
-                       continue;
+
+                       assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
+
+                       thread_block(THREAD_CONTINUE_NULL);
+
+                       vm_page_lock_queues();
+                       delayed_unlock = 1;
+
+                       iq->pgo_throttled = FALSE;
+
+                       if (loop_count >= vm_page_inactive_count)
+                               loop_count = 0;
+                       inactive_burst_count = 0;
+
+                       goto Restart;
+                       /*NOTREACHED*/
+               }
+
+
+               flow_control.state = FCS_IDLE;
+consider_inactive:
+               loop_count++;
+               inactive_burst_count++;
+               vm_pageout_inactive++;
+
+               /* Choose a victim. */
+               
+               while (1) {     
+                       m = NULL;
+                       
+                       if (IP_VALID(memory_manager_default)) {
+                               assert(vm_page_throttled_count == 0);
+                               assert(queue_empty(&vm_page_queue_throttled));
+                       }
+
+                       /*
+                        * The most eligible pages are ones we paged in speculatively,
+                        * but which have not yet been touched.
+                        */
+                       if ( !queue_empty(&sq->age_q) ) {
+                               m = (vm_page_t) queue_first(&sq->age_q);
+                               break;
+                       }
+                       /*
+                        * Time for a zero-filled inactive page?
+                        */
+                       if ( ((zf_run_count < zf_ratio) && vm_zf_queue_count >= zf_queue_min_count) ||
+                            queue_empty(&vm_page_queue_inactive)) {
+                               if ( !queue_empty(&vm_page_queue_zf) ) {
+                                       m = (vm_page_t) queue_first(&vm_page_queue_zf);
+                                       zf_run_count++;
+                                       break;
+                               }
+                       }
+                       /*
+                        * It's either a normal inactive page or nothing.
+                        */
+                        if ( !queue_empty(&vm_page_queue_inactive) ) {
+                                m = (vm_page_t) queue_first(&vm_page_queue_inactive);
+                                zf_run_count = 0;
+                               break;
+                        }
+
+                        panic("vm_pageout: no victim");
+               }
+
+               assert(!m->active && (m->inactive || m->speculative || m->throttled));
+               assert(!m->laundry);
+               assert(m->object != kernel_object);
+               assert(m->phys_page != vm_page_guard_addr);
+
+               if (!m->speculative) {
+                       vm_pageout_stats[vm_pageout_stat_now].considered++;
                }
 
-           object_locked_inactive:
+               DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
+
                /*
-                *      Paging out pages of objects which pager is being
-                *      created by another thread must be avoided, because
-                *      this thread may claim for memory, thus leading to a
-                *      possible dead lock between it and the pageout thread
-                *      which will wait for pager creation, if such pages are
-                *      finally chosen. The remaining assumption is that there
-                *      will finally be enough available pages in the inactive
-                *      pool to page out in order to satisfy all memory claimed
-                *      by the thread which concurrently creates the pager.
+                * check to see if we currently are working
+                * with the same object... if so, we've
+                * already got the lock
                 */
+               if (m->object != object) {
+                       /*
+                        * the object associated with candidate page is 
+                        * different from the one we were just working
+                        * with... dump the lock if we still own it
+                        */
+                       if (object != NULL) {
+                               vm_object_unlock(object);
+                               object = NULL;
+                               vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+                       }
+                       /*
+                        * Try to lock object; since we've alread got the
+                        * page queues lock, we can only 'try' for this one.
+                        * if the 'try' fails, we need to do a mutex_pause
+                        * to allow the owner of the object lock a chance to
+                        * run... otherwise, we're likely to trip over this
+                        * object in the same state as we work our way through
+                        * the queue... clumps of pages associated with the same
+                        * object are fairly typical on the inactive and active queues
+                        */
+                       if (!vm_object_lock_try_scan(m->object)) {
+                               vm_pageout_inactive_nolock++;
+
+                       requeue_page:
+                               /*
+                                *      Move page to end and continue.
+                                *      Don't re-issue ticket
+                                */
+                               if (m->zero_fill) {
+                                       if (m->speculative) {
+                                               panic("vm_pageout_scan(): page %p speculative and zero-fill !?\n", m);
+                                       }
+                                       assert(!m->speculative);
+                                       queue_remove(&vm_page_queue_zf, m,
+                                                    vm_page_t, pageq);
+                                       queue_enter(&vm_page_queue_zf, m,
+                                                   vm_page_t, pageq);
+                               } else if (m->speculative) {
+                                       remque(&m->pageq);
+                                       m->speculative = FALSE;
+                                       vm_page_speculative_count--;
+                                       
+                                       /*
+                                        * move to the head of the inactive queue
+                                        * to get it out of the way... the speculative
+                                        * queue is generally too small to depend
+                                        * on there being enough pages from other
+                                        * objects to make cycling it back on the
+                                        * same queue a winning proposition
+                                        */
+                                       queue_enter_first(&vm_page_queue_inactive, m,
+                                                         vm_page_t, pageq);
+                                       m->inactive = TRUE;
+                                       vm_page_inactive_count++;
+                                       token_new_pagecount++;
+                               }  else if (m->throttled) {
+                                       queue_remove(&vm_page_queue_throttled, m,
+                                                    vm_page_t, pageq);
+                                       m->throttled = FALSE;
+                                       vm_page_throttled_count--;
+                                       
+                                       /*
+                                        * not throttled any more, so can stick
+                                        * it on the inactive queue.
+                                        */
+                                       queue_enter(&vm_page_queue_inactive, m,
+                                                   vm_page_t, pageq);
+                                       m->inactive = TRUE;
+                                       vm_page_inactive_count++;
+                                       token_new_pagecount++;
+                               } else {
+                                       queue_remove(&vm_page_queue_inactive, m,
+                                                    vm_page_t, pageq);
+#if MACH_ASSERT
+                                       vm_page_inactive_count--;       /* balance for purgeable queue asserts */
+#endif
+                                       vm_purgeable_q_advance_all();
+
+                                       queue_enter(&vm_page_queue_inactive, m,
+                                                   vm_page_t, pageq);
+#if MACH_ASSERT
+                                       vm_page_inactive_count++;       /* balance for purgeable queue asserts */
+#endif
+                                       token_new_pagecount++;
+                               }
+                               pmap_clear_reference(m->phys_page);
+                               m->reference = FALSE;
+
+                               if ( !queue_empty(&sq->age_q) )
+                                       m = (vm_page_t) queue_first(&sq->age_q);
+                               else if ( ((zf_run_count < zf_ratio) && vm_zf_queue_count >= zf_queue_min_count) ||
+                                         queue_empty(&vm_page_queue_inactive)) {
+                                       if ( !queue_empty(&vm_page_queue_zf) )
+                                               m = (vm_page_t) queue_first(&vm_page_queue_zf);
+                               } else if ( !queue_empty(&vm_page_queue_inactive) ) {
+                                       m = (vm_page_t) queue_first(&vm_page_queue_inactive);
+                               }
+                               /*
+                                * this is the next object we're going to be interested in
+                                * try to make sure its available after the mutex_yield
+                                * returns control
+                                */
+                               vm_pageout_scan_wants_object = m->object;
+
+                               /*
+                                * force us to dump any collected free pages
+                                * and to pause before moving on
+                                */
+                               try_failed = TRUE;
+
+                               goto done_with_inactivepage;
+                       }
+                       object = m->object;
+                       vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+
+                       try_failed = FALSE;
+               }
 
+               /*
+                *      Paging out pages of external objects which
+                *      are currently being created must be avoided.
+                *      The pager may claim for memory, thus leading to a
+                *      possible dead lock between it and the pageout thread,
+                *      if such pages are finally chosen. The remaining assumption
+                *      is that there will finally be enough available pages in the
+                *      inactive pool to page out in order to satisfy all memory
+                *      claimed by the thread which concurrently creates the pager.
+                */
                if (!object->pager_initialized && object->pager_created) {
                        /*
                         *      Move page to end and continue, hoping that
                         *      there will be enough other inactive pages to
                         *      page out so that the thread which currently
                         *      initializes the pager will succeed.
+                        *      Don't re-grant the ticket, the page should
+                        *      pulled from the queue and paged out whenever
+                        *      one of its logically adjacent fellows is
+                        *      targeted.
                         */
-                       queue_remove(&vm_page_queue_inactive, m,
-                                    vm_page_t, pageq);
-                       queue_enter(&vm_page_queue_inactive, m,
-                                   vm_page_t, pageq);
-                       vm_page_unlock_queues();
-                       vm_object_unlock(object);
                        vm_pageout_inactive_avoid++;
-                       continue;
+                       goto requeue_page;
                }
-
                /*
-                *      Remove the page from the inactive list.
+                *      Remove the page from its list.
                 */
+               if (m->speculative) {
+                       remque(&m->pageq);
+                       page_prev_state = PAGE_STATE_SPECULATIVE;
+                       m->speculative = FALSE;
+                       vm_page_speculative_count--;
+               } else if (m->throttled) {
+                       queue_remove(&vm_page_queue_throttled, m, vm_page_t, pageq);
+                       page_prev_state = PAGE_STATE_THROTTLED;
+                       m->throttled = FALSE;
+                       vm_page_throttled_count--;
+               } else {
+                       if (m->zero_fill) {
+                               queue_remove(&vm_page_queue_zf, m, vm_page_t, pageq);
+                               page_prev_state = PAGE_STATE_ZEROFILL;
+                               vm_zf_queue_count--;
+                       } else {
+                               page_prev_state = PAGE_STATE_INACTIVE;
+                               queue_remove(&vm_page_queue_inactive, m, vm_page_t, pageq);
+                       }
+                       m->inactive = FALSE;
+                       if (!m->fictitious)
+                               vm_page_inactive_count--;
+                       vm_purgeable_q_advance_all();
+               }
+
+               m->pageq.next = NULL;
+               m->pageq.prev = NULL;
 
-               queue_remove(&vm_page_queue_inactive, m, vm_page_t, pageq);
-               m->inactive = FALSE;
-               if (!m->fictitious)
-                       vm_page_inactive_count--;
+               if ( !m->fictitious && catch_up_count)
+                       catch_up_count--;
 
-               if (m->busy || !object->alive) {
+               /*
+                * ENCRYPTED SWAP:
+                * if this page has already been picked up as part of a
+                * page-out cluster, it will be busy because it is being
+                * encrypted (see vm_object_upl_request()).  But we still
+                * want to demote it from "clean-in-place" (aka "adjacent")
+                * to "clean-and-free" (aka "target"), so let's ignore its
+                * "busy" bit here and proceed to check for "cleaning" a
+                * little bit below...
+                */
+               if ( !m->encrypted_cleaning && (m->busy || !object->alive)) {
                        /*
                         *      Somebody is already playing with this page.
                         *      Leave it off the pageout queues.
+                        *
                         */
-
-                       vm_page_unlock_queues();
-                       vm_object_unlock(object);
                        vm_pageout_inactive_busy++;
-                       continue;
+
+                       goto done_with_inactivepage;
                }
 
                /*
@@ -1762,11 +1924,44 @@ vm_pageout_scan(void)
 
                if (m->absent || m->error) {
                        vm_pageout_inactive_absent++;
-                   reclaim_page:
-                       vm_page_free(m);
-                       vm_page_unlock_queues();
-                       vm_object_unlock(object);
-                       continue;
+reclaim_page:
+                       if (vm_pageout_deadlock_target) {
+                               vm_pageout_scan_inactive_throttle_success++;
+                               vm_pageout_deadlock_target--;
+                       }
+
+                       DTRACE_VM2(dfree, int, 1, (uint64_t *), NULL);
+
+                       if (object->internal) {
+                               DTRACE_VM2(anonfree, int, 1, (uint64_t *), NULL);
+                       } else {
+                               DTRACE_VM2(fsfree, int, 1, (uint64_t *), NULL);
+                       }
+                       vm_page_free_prepare_queues(m);
+
+                       /*
+                        * remove page from object here since we're already
+                        * behind the object lock... defer the rest of the work
+                        * we'd normally do in vm_page_free_prepare_object
+                        * until 'vm_page_free_list' is called
+                        */
+                       if (m->tabled)
+                               vm_page_remove(m, TRUE);
+
+                       assert(m->pageq.next == NULL &&
+                              m->pageq.prev == NULL);
+                       m->pageq.next = (queue_entry_t)local_freeq;
+                       local_freeq = m;
+                       local_freed++;
+
+                       inactive_burst_count = 0;
+
+                       if(page_prev_state != PAGE_STATE_SPECULATIVE) {
+                               vm_pageout_stats[vm_pageout_stat_now].reclaimed++;
+                               page_prev_state = 0;
+                       }
+
+                       goto done_with_inactivepage;
                }
 
                assert(!m->private);
@@ -1780,231 +1975,307 @@ vm_pageout_scan(void)
                 */
 
                if (m->cleaning) {
-#if    MACH_CLUSTER_STATS
-                       vm_pageout_cluster_conversions++;
-#endif
-                       if (m->prep_pin_count == 0) {
+                       m->busy = TRUE;
+                       m->pageout = TRUE;
+                       m->dump_cleaning = TRUE;
+                       vm_page_wire(m);
+
+                       CLUSTER_STAT(vm_pageout_cluster_conversions++);
+
+                       inactive_burst_count = 0;
+
+                       goto done_with_inactivepage;
+               }
+
+               /*
+                * If the object is empty, the page must be reclaimed even
+                * if dirty or used.
+                * If the page belongs to a volatile object, we stick it back
+                * on.
+                */
+               if (object->copy == VM_OBJECT_NULL) {
+                       if (object->purgable == VM_PURGABLE_EMPTY) {
                                m->busy = TRUE;
-                               m->pageout = TRUE;
-                               vm_page_wire(m);
+                               if (m->pmapped == TRUE) {
+                                       /* unmap the page */
+                                       refmod_state = pmap_disconnect(m->phys_page);
+                                       if (refmod_state & VM_MEM_MODIFIED) {
+                                               m->dirty = TRUE;
+                                       }
+                               }
+                               if (m->dirty || m->precious) {
+                                       /* we saved the cost of cleaning this page ! */
+                                       vm_page_purged_count++;
+                               }
+                               goto reclaim_page;
+                       }
+                       if (object->purgable == VM_PURGABLE_VOLATILE) {
+                               /* if it's wired, we can't put it on our queue */
+                               assert(!VM_PAGE_WIRED(m));
+                               /* just stick it back on! */
+                               goto reactivate_page;
                        }
-                       vm_object_unlock(object);
-                       vm_page_unlock_queues();
-                       continue;
                }
 
                /*
                 *      If it's being used, reactivate.
                 *      (Fictitious pages are either busy or absent.)
+                *      First, update the reference and dirty bits
+                *      to make sure the page is unreferenced.
                 */
-
-               if (m->reference || pmap_is_referenced(m->phys_addr)) {
-                       vm_pageout_inactive_used++;
-                   reactivate_page:
-#if    ADVISORY_PAGEOUT
-                       if (m->discard_request) {
-                               m->discard_request = FALSE;
-                       }
-#endif /* ADVISORY_PAGEOUT */
-                       vm_object_unlock(object);
-                       vm_page_activate(m);
-                       VM_STAT(reactivations++);
-                       vm_page_unlock_queues();
-                       continue;
+               refmod_state = -1;
+
+               if (m->reference == FALSE && m->pmapped == TRUE) {
+                       refmod_state = pmap_get_refmod(m->phys_page);
+                 
+                       if (refmod_state & VM_MEM_REFERENCED)
+                               m->reference = TRUE;
+                       if (refmod_state & VM_MEM_MODIFIED)
+                               m->dirty = TRUE;
                }
 
-               if (m->prep_pin_count != 0) {
-                       boolean_t pinned = FALSE;
+               if (m->reference || m->dirty) {
+                       /* deal with a rogue "reusable" page */
+                       VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m);
+               }
 
-                       vm_page_pin_lock();
-                       if (m->pin_count != 0) {
-                               /* skip and reactivate pinned page */
-                               pinned = TRUE;
-                               vm_pageout_inactive_pinned++;
+               if (m->reference && !m->no_cache) {
+                       /*
+                        * The page we pulled off the inactive list has
+                        * been referenced.  It is possible for other
+                        * processors to be touching pages faster than we
+                        * can clear the referenced bit and traverse the
+                        * inactive queue, so we limit the number of
+                        * reactivations.
+                        */
+                       if (++reactivated_this_call >= reactivate_limit) {
+                               vm_pageout_reactivation_limit_exceeded++;
+                       } else if (catch_up_count) {
+                               vm_pageout_catch_ups++;
+                       } else if (++inactive_reclaim_run >= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM) {
+                               vm_pageout_inactive_force_reclaim++;
                        } else {
-                               /* page is prepped; send it into limbo */
-                               m->limbo = TRUE;
-                               vm_pageout_inactive_limbo++;
+                               uint32_t isinuse;
+reactivate_page:
+                               if ( !object->internal && object->pager != MEMORY_OBJECT_NULL &&
+                                    vnode_pager_get_isinuse(object->pager, &isinuse) == KERN_SUCCESS && !isinuse) {
+                                       /*
+                                        * no explict mappings of this object exist
+                                        * and it's not open via the filesystem
+                                        */
+                                       vm_page_deactivate(m);
+                                       vm_pageout_inactive_deactivated++;
+                               } else {
+                                       /*
+                                        * The page was/is being used, so put back on active list.
+                                        */
+                                       vm_page_activate(m);
+                                       VM_STAT_INCR(reactivations);
+                               }
+                               vm_pageout_inactive_used++;
+                               inactive_burst_count = 0;
+
+                                goto done_with_inactivepage;
                        }
-                       vm_page_pin_unlock();
-                       if (pinned)
-                               goto reactivate_page;
+                       /* 
+                        * Make sure we call pmap_get_refmod() if it
+                        * wasn't already called just above, to update
+                        * the dirty bit.
+                        */
+                       if ((refmod_state == -1) && !m->dirty && m->pmapped) {
+                               refmod_state = pmap_get_refmod(m->phys_page);
+                               if (refmod_state & VM_MEM_MODIFIED)
+                                       m->dirty = TRUE;
+                       }
+                       forced_reclaim = TRUE;
+               } else {
+                       forced_reclaim = FALSE;
                }
 
-#if    ADVISORY_PAGEOUT
-               if (object->advisory_pageout) {
-                       boolean_t               do_throttle;
-                       ipc_port_t              port;
-                       vm_object_offset_t      discard_offset;
+                XPR(XPR_VM_PAGEOUT,
+                "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
+                object, m->offset, m, 0,0);
 
-                       if (m->discard_request) {
-                               vm_stat_discard_failure++;
-                               goto mandatory_pageout;
-                       }
+               /*
+                * we've got a candidate page to steal...
+                *
+                * m->dirty is up to date courtesy of the
+                * preceding check for m->reference... if 
+                * we get here, then m->reference had to be
+                * FALSE (or possibly "reactivate_limit" was
+                 * exceeded), but in either case we called
+                 * pmap_get_refmod() and updated both
+                 * m->reference and m->dirty
+                *
+                * if it's dirty or precious we need to
+                * see if the target queue is throtttled
+                * it if is, we need to skip over it by moving it back
+                * to the end of the inactive queue
+                */
 
-                       assert(object->pager_initialized);
-                       m->discard_request = TRUE;
-                       port = object->pager;
+               inactive_throttled = FALSE;
 
-                       /* system-wide throttle */
-                       do_throttle = (vm_page_free_count <=
-                                      vm_page_free_reserved);
-                       if (!do_throttle) {
-                               /* throttle on this pager */
-                               /* XXX lock ordering ? */
-                               ip_lock(port);
-                               do_throttle= imq_full(&port->ip_messages);
-                               ip_unlock(port);
+               if (m->dirty || m->precious) {
+                       if (object->internal) {
+                               if (VM_PAGE_Q_THROTTLED(iq))
+                                       inactive_throttled = TRUE;
+                       } else if (VM_PAGE_Q_THROTTLED(eq)) {
+                               inactive_throttled = TRUE;
                        }
-                       if (do_throttle) {
-                               vm_stat_discard_throttle++;
-#if 0
-                               /* ignore this page and skip to next */
-                               vm_page_unlock_queues();
-                               vm_object_unlock(object);
-                               continue;
-#else
-                               /* force mandatory pageout */
-                               goto mandatory_pageout;
-#endif
+               }
+               if (inactive_throttled == TRUE) {
+throttle_inactive:
+                       if (!IP_VALID(memory_manager_default) &&
+                           object->internal && m->dirty &&
+                           (object->purgable == VM_PURGABLE_DENY ||
+                            object->purgable == VM_PURGABLE_NONVOLATILE ||
+                            object->purgable == VM_PURGABLE_VOLATILE)) {
+                               queue_enter(&vm_page_queue_throttled, m,
+                                           vm_page_t, pageq);
+                               m->throttled = TRUE;
+                               vm_page_throttled_count++;
+                       } else {
+                               if (m->zero_fill) {
+                                       queue_enter(&vm_page_queue_zf, m,
+                                                   vm_page_t, pageq);
+                                       vm_zf_queue_count++;
+                               } else 
+                                       queue_enter(&vm_page_queue_inactive, m,
+                                                   vm_page_t, pageq);
+                               m->inactive = TRUE;
+                               if (!m->fictitious) {
+                                       vm_page_inactive_count++;
+                                       token_new_pagecount++;
+                               }
                        }
-
-                       /* proceed with discard_request */
-                       vm_page_activate(m);
-                       vm_stat_discard++;
-                       VM_STAT(reactivations++);
-                       discard_offset = m->offset + object->paging_offset;
-                       vm_stat_discard_sent++;
-                       vm_page_unlock_queues();
-                       vm_object_unlock(object);
-/*
-                       memory_object_discard_request(object->pager,
-                                                     object->pager_request,
-                                                     discard_offset,
-                                                     PAGE_SIZE);
-*/
-                       continue;
+                       vm_pageout_scan_inactive_throttled++;
+                       goto done_with_inactivepage;
                }
-       mandatory_pageout:
-#endif /* ADVISORY_PAGEOUT */
-                       
-                XPR(XPR_VM_PAGEOUT,
-                "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
-                (integer_t)object, (integer_t)m->offset, (integer_t)m, 0,0);
 
                /*
-                *      Eliminate all mappings.
+                * we've got a page that we can steal...
+                * eliminate all mappings and make sure
+                * we have the up-to-date modified state
+                * first take the page BUSY, so that no new
+                * mappings can be made
                 */
-
                m->busy = TRUE;
-               pmap_page_protect(m->phys_addr, VM_PROT_NONE);
-               if (!m->dirty)
-                       m->dirty = pmap_is_modified(m->phys_addr);
+               
+               /*
+                * if we need to do a pmap_disconnect then we
+                * need to re-evaluate m->dirty since the pmap_disconnect
+                * provides the true state atomically... the 
+                * page was still mapped up to the pmap_disconnect
+                * and may have been dirtied at the last microsecond
+                *
+                * we also check for the page being referenced 'late'
+                * if it was, we first need to do a WAKEUP_DONE on it
+                * since we already set m->busy = TRUE, before 
+                * going off to reactivate it
+                *
+                * Note that if 'pmapped' is FALSE then the page is not
+                * and has not been in any map, so there is no point calling
+                * pmap_disconnect().  m->dirty and/or m->reference could
+                * have been set in anticipation of likely usage of the page.
+                */
+               if (m->pmapped == TRUE) {
+                       refmod_state = pmap_disconnect(m->phys_page);
+
+                       if (refmod_state & VM_MEM_MODIFIED)
+                               m->dirty = TRUE;
+                       if (refmod_state & VM_MEM_REFERENCED) {
+                               
+                               /* If m->reference is already set, this page must have
+                                * already failed the reactivate_limit test, so don't
+                                * bump the counts twice.
+                                */
+                               if ( ! m->reference ) {
+                                       m->reference = TRUE;
+                                       if (forced_reclaim ||
+                                           ++reactivated_this_call >= reactivate_limit)
+                                               vm_pageout_reactivation_limit_exceeded++;
+                                       else {
+                                               PAGE_WAKEUP_DONE(m);
+                                               goto reactivate_page;
+                                       }
+                               }
+                       }
+               }
+               /*
+                * reset our count of pages that have been reclaimed 
+                * since the last page was 'stolen'
+                */
+               inactive_reclaim_run = 0;
 
                /*
                 *      If it's clean and not precious, we can free the page.
                 */
-
                if (!m->dirty && !m->precious) {
+                       if (m->zero_fill)
+                               vm_pageout_inactive_zf++;
                        vm_pageout_inactive_clean++;
+
                        goto reclaim_page;
                }
-               vm_page_unlock_queues();
 
                /*
-                *      If there is no memory object for the page, create
-                *      one and hand it to the default pager.
+                * The page may have been dirtied since the last check
+                * for a throttled target queue (which may have been skipped
+                * if the page was clean then).  With the dirty page
+                * disconnected here, we can make one final check.
                 */
+               {
+                       boolean_t disconnect_throttled = FALSE;
+                       if (object->internal) {
+                               if (VM_PAGE_Q_THROTTLED(iq))
+                                       disconnect_throttled = TRUE;
+                       } else if (VM_PAGE_Q_THROTTLED(eq)) {
+                               disconnect_throttled = TRUE;
+                       }
 
-               if (!object->pager_initialized)
-                       vm_object_collapse(object);
-               if (!object->pager_initialized)
-                       vm_object_pager_create(object);
-               if (!object->pager_initialized) {
-                       /*
-                        *      Still no pager for the object.
-                        *      Reactivate the page.
-                        *
-                        *      Should only happen if there is no
-                        *      default pager.
-                        */
-                       vm_page_lock_queues();
-                       vm_page_activate(m);
-                       vm_page_unlock_queues();
+                       if (disconnect_throttled == TRUE) {
+                               PAGE_WAKEUP_DONE(m);
+                               goto throttle_inactive;
+                       }
+               }
 
-                       /*
-                        *      And we are done with it.
-                        */
-                       PAGE_WAKEUP_DONE(m);
-                       vm_object_unlock(object);
+               vm_pageout_stats[vm_pageout_stat_now].reclaimed++;
 
-                       /*
-                        * break here to get back to the preemption
-                        * point in the outer loop so that we don't
-                        * spin forever if there is no default pager.
-                        */
-                       vm_pageout_dirty_no_pager++;
-                       /*
-                        * Well there's no pager, but we can still reclaim
-                        * free pages out of the inactive list.  Go back
-                        * to top of loop and look for suitable pages.
-                        */
-                       continue;
-               }
-
-               if (object->pager_initialized && object->pager == IP_NULL) {
-                       /*
-                        * This pager has been destroyed by either
-                        * memory_object_destroy or vm_object_destroy, and
-                        * so there is nowhere for the page to go.
-                        * Just free the page.
-                        */
-                       VM_PAGE_FREE(m);
-                       vm_object_unlock(object);
-                       continue;
-               }
+               vm_pageout_cluster(m);
 
+               if (m->zero_fill)
+                       vm_pageout_inactive_zf++;
                vm_pageout_inactive_dirty++;
-/*
-               if (!object->internal)
-                       burst_count++;
-*/
-               vm_object_paging_begin(object);
-               vm_object_unlock(object);
-               vm_pageout_cluster(m);  /* flush it */
-       }
-       consider_machine_adjust();
-}
 
-counter(unsigned int   c_vm_pageout_scan_continue = 0;)
+               inactive_burst_count = 0;
 
-void
-vm_pageout_scan_continue(void)
-{
-       /*
-        *      We just paused to let the pagers catch up.
-        *      If vm_page_laundry_count is still high,
-        *      then we aren't waiting long enough.
-        *      If we have paused some vm_pageout_pause_max times without
-        *      adjusting vm_pageout_burst_wait, it might be too big,
-        *      so we decrease it.
-        */
+done_with_inactivepage:
+               if (delayed_unlock++ > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT || try_failed == TRUE) {
 
-       vm_page_lock_queues();
-       counter(++c_vm_pageout_scan_continue);
-       if (vm_page_laundry_count > vm_pageout_burst_min) {
-               vm_pageout_burst_wait++;
-               vm_pageout_pause_count = 0;
-       } else if (++vm_pageout_pause_count > vm_pageout_pause_max) {
-               vm_pageout_burst_wait = (vm_pageout_burst_wait * 3) / 4;
-               if (vm_pageout_burst_wait < 1)
-                       vm_pageout_burst_wait = 1;
-               vm_pageout_pause_count = 0;
+                       if (object != NULL) {
+                               vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+                               vm_object_unlock(object);
+                               object = NULL;
+                       }
+                       if (local_freeq) {
+                               vm_page_unlock_queues();
+                               vm_page_free_list(local_freeq, TRUE);
+                               
+                               local_freeq = NULL;
+                               local_freed = 0;
+                               vm_page_lock_queues();
+                       } else
+                               lck_mtx_yield(&vm_page_queue_lock);
+
+                       delayed_unlock = 1;
+               }
+               /*
+                * back to top of pageout scan loop
+                */
        }
-       vm_page_unlock_queues();
 }
 
-void vm_page_free_reserve(int pages);
+
 int vm_page_free_count_init;
 
 void
@@ -2020,39 +2291,312 @@ vm_page_free_reserve(
        vm_page_free_min = vm_page_free_reserved +
                VM_PAGE_FREE_MIN(free_after_reserve);
 
+       if (vm_page_free_min > VM_PAGE_FREE_MIN_LIMIT)
+               vm_page_free_min = VM_PAGE_FREE_MIN_LIMIT;
+
        vm_page_free_target = vm_page_free_reserved +
                VM_PAGE_FREE_TARGET(free_after_reserve);
 
+       if (vm_page_free_target > VM_PAGE_FREE_TARGET_LIMIT)
+               vm_page_free_target = VM_PAGE_FREE_TARGET_LIMIT;
+
        if (vm_page_free_target < vm_page_free_min + 5)
                vm_page_free_target = vm_page_free_min + 5;
+
+       vm_page_throttle_limit = vm_page_free_target - (vm_page_free_target / 3);
+       vm_page_creation_throttle = vm_page_free_target / 2;
 }
 
 /*
  *     vm_pageout is the high level pageout daemon.
  */
 
+void
+vm_pageout_continue(void)
+{
+       DTRACE_VM2(pgrrun, int, 1, (uint64_t *), NULL);
+       vm_pageout_scan_event_counter++;
+       vm_pageout_scan();
+       /* we hold vm_page_queue_free_lock now */
+       assert(vm_page_free_wanted == 0);
+       assert(vm_page_free_wanted_privileged == 0);
+       assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT);
+       lck_mtx_unlock(&vm_page_queue_free_lock);
+
+       counter(c_vm_pageout_block++);
+       thread_block((thread_continue_t)vm_pageout_continue);
+       /*NOTREACHED*/
+}
+
+
+#ifdef FAKE_DEADLOCK
+
+#define FAKE_COUNT     5000
+
+int internal_count = 0;
+int fake_deadlock = 0;
+
+#endif
+
+static void
+vm_pageout_iothread_continue(struct vm_pageout_queue *q)
+{
+       vm_page_t       m = NULL;
+       vm_object_t     object;
+       memory_object_t pager;
+       thread_t        self = current_thread();
+
+       if ((vm_pageout_internal_iothread != THREAD_NULL)
+           && (self == vm_pageout_external_iothread )
+           && (self->options & TH_OPT_VMPRIV))
+               self->options &= ~TH_OPT_VMPRIV;
+
+       vm_page_lockspin_queues();
+
+        while ( !queue_empty(&q->pgo_pending) ) {
+
+                  q->pgo_busy = TRUE;
+                  queue_remove_first(&q->pgo_pending, m, vm_page_t, pageq);
+                  VM_PAGE_CHECK(m);
+                  m->pageout_queue = FALSE;
+                  m->pageq.next = NULL;
+                  m->pageq.prev = NULL;
+                  vm_page_unlock_queues();
+
+#ifdef FAKE_DEADLOCK
+                  if (q == &vm_pageout_queue_internal) {
+                          vm_offset_t addr;
+                          int  pg_count;
+
+                          internal_count++;
+
+                          if ((internal_count == FAKE_COUNT)) {
+
+                                  pg_count = vm_page_free_count + vm_page_free_reserved;
+
+                                  if (kmem_alloc(kernel_map, &addr, PAGE_SIZE * pg_count) == KERN_SUCCESS) {
+                                          kmem_free(kernel_map, addr, PAGE_SIZE * pg_count);
+                                  }
+                                  internal_count = 0;
+                                  fake_deadlock++;
+                          }
+                  }
+#endif
+                  object = m->object;
+
+                  vm_object_lock(object);
+
+                  if (!object->pager_initialized) {
+
+                          /*
+                           *   If there is no memory object for the page, create
+                           *   one and hand it to the default pager.
+                           */
+
+                          if (!object->pager_initialized)
+                                  vm_object_collapse(object,
+                                                     (vm_object_offset_t) 0,
+                                                     TRUE);
+                          if (!object->pager_initialized)
+                                  vm_object_pager_create(object);
+                          if (!object->pager_initialized) {
+                                  /*
+                                   *   Still no pager for the object.
+                                   *   Reactivate the page.
+                                   *
+                                   *   Should only happen if there is no
+                                   *   default pager.
+                                   */
+                                  vm_page_lockspin_queues();
+
+                                  vm_pageout_queue_steal(m, TRUE);
+                                  vm_pageout_dirty_no_pager++;
+                                  vm_page_activate(m);
+
+                                  vm_page_unlock_queues();
+
+                                  /*
+                                   *   And we are done with it.
+                                   */
+                                  PAGE_WAKEUP_DONE(m);
+
+                                  vm_object_paging_end(object);
+                                  vm_object_unlock(object);
+
+                                  vm_page_lockspin_queues();
+                                  continue;
+                          }
+                  }
+                  pager = object->pager;
+                  if (pager == MEMORY_OBJECT_NULL) {
+                          /*
+                           * This pager has been destroyed by either
+                           * memory_object_destroy or vm_object_destroy, and
+                           * so there is nowhere for the page to go.
+                           */
+                          if (m->pageout) {
+                                  /*
+                                   * Just free the page... VM_PAGE_FREE takes
+                                   * care of cleaning up all the state...
+                                   * including doing the vm_pageout_throttle_up
+                                   */
+                                  VM_PAGE_FREE(m);
+                          } else {
+                                  vm_page_lockspin_queues();
+
+                                  vm_pageout_queue_steal(m, TRUE);
+                                  vm_page_activate(m);
+                                  
+                                  vm_page_unlock_queues();
+
+                                  /*
+                                   *   And we are done with it.
+                                   */
+                                  PAGE_WAKEUP_DONE(m);
+                          }
+                          vm_object_paging_end(object);
+                          vm_object_unlock(object);
+
+                          vm_page_lockspin_queues();
+                          continue;
+                  }
+                  VM_PAGE_CHECK(m);
+                  vm_object_unlock(object);
+                  /*
+                   * we expect the paging_in_progress reference to have
+                   * already been taken on the object before it was added
+                   * to the appropriate pageout I/O queue... this will
+                   * keep the object from being terminated and/or the 
+                   * paging_offset from changing until the I/O has 
+                   * completed... therefore no need to lock the object to
+                   * pull the paging_offset from it.
+                   *
+                   * Send the data to the pager.
+                   * any pageout clustering happens there
+                   */
+                  memory_object_data_return(pager,
+                                            m->offset + object->paging_offset,
+                                            PAGE_SIZE,
+                                            NULL,
+                                            NULL,
+                                            FALSE,
+                                            FALSE,
+                                            0);
+
+                  vm_object_lock(object);
+                  vm_object_paging_end(object);
+                  vm_object_unlock(object);
+
+                  vm_page_lockspin_queues();
+       }
+       assert_wait((event_t) q, THREAD_UNINT);
+
+       if (q->pgo_throttled == TRUE && !VM_PAGE_Q_THROTTLED(q)) {
+               q->pgo_throttled = FALSE;
+               thread_wakeup((event_t) &q->pgo_laundry);
+       }
+       if (q->pgo_draining == TRUE && q->pgo_laundry == 0) {
+               q->pgo_draining = FALSE;
+               thread_wakeup((event_t) (&q->pgo_laundry+1));
+       }
+       q->pgo_busy = FALSE;
+       q->pgo_idle = TRUE;
+       vm_page_unlock_queues();
+
+       thread_block_parameter((thread_continue_t)vm_pageout_iothread_continue, (void *) &q->pgo_pending);
+       /*NOTREACHED*/
+}
+
+
+static void
+vm_pageout_iothread_external(void)
+{
+       thread_t        self = current_thread();
+
+       self->options |= TH_OPT_VMPRIV;
+
+       vm_pageout_iothread_continue(&vm_pageout_queue_external);
+       /*NOTREACHED*/
+}
+
+
+static void
+vm_pageout_iothread_internal(void)
+{
+       thread_t        self = current_thread();
+
+       self->options |= TH_OPT_VMPRIV;
+
+       vm_pageout_iothread_continue(&vm_pageout_queue_internal);
+       /*NOTREACHED*/
+}
+
+kern_return_t
+vm_set_buffer_cleanup_callout(boolean_t (*func)(int)) 
+{
+       if (OSCompareAndSwapPtr(NULL, func, (void * volatile *) &consider_buffer_cache_collect)) {
+               return KERN_SUCCESS;
+       } else {
+               return KERN_FAILURE; /* Already set */
+       }
+}
+
+static void
+vm_pageout_garbage_collect(int collect)
+{
+       if (collect) {
+               boolean_t buf_large_zfree = FALSE;
+               stack_collect();
+
+               /*
+                * consider_zone_gc should be last, because the other operations
+                * might return memory to zones.
+                */
+               consider_machine_collect();
+               if (consider_buffer_cache_collect != NULL) {
+                       buf_large_zfree = (*consider_buffer_cache_collect)(0);
+               }
+               consider_zone_gc(buf_large_zfree);
+
+               consider_machine_adjust();
+       }
+
+       assert_wait((event_t) &vm_pageout_garbage_collect, THREAD_UNINT);
+
+       thread_block_parameter((thread_continue_t) vm_pageout_garbage_collect, (void *)1);
+       /*NOTREACHED*/
+}
+
+
 
 void
 vm_pageout(void)
 {
        thread_t        self = current_thread();
+       thread_t        thread;
+       kern_return_t   result;
+       spl_t           s;
 
        /*
         * Set thread privileges.
         */
-       self->vm_privilege = TRUE;
-       stack_privilege(self);
-       thread_swappable(current_act(), FALSE);
+       s = splsched();
+       thread_lock(self);
+       self->priority = BASEPRI_PREEMPT - 1;
+       set_sched_pri(self, self->priority);
+       thread_unlock(self);
+
+       if (!self->reserved_stack)
+               self->reserved_stack = self->kernel_stack;
+
+       splx(s);
 
        /*
         *      Initialize some paging parameters.
         */
 
-       if (vm_page_laundry_max == 0)
-               vm_page_laundry_max = VM_PAGE_LAUNDRY_MAX;
-
-       if (vm_pageout_burst_max == 0)
-               vm_pageout_burst_max = VM_PAGEOUT_BURST_MAX;
+       if (vm_pageout_idle_wait == 0)
+               vm_pageout_idle_wait = VM_PAGEOUT_IDLE_WAIT;
 
        if (vm_pageout_burst_wait == 0)
                vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT;
@@ -2060,7 +2604,31 @@ vm_pageout(void)
        if (vm_pageout_empty_wait == 0)
                vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT;
 
+       if (vm_pageout_deadlock_wait == 0)
+               vm_pageout_deadlock_wait = VM_PAGEOUT_DEADLOCK_WAIT;
+
+       if (vm_pageout_deadlock_relief == 0)
+               vm_pageout_deadlock_relief = VM_PAGEOUT_DEADLOCK_RELIEF;
+
+       if (vm_pageout_inactive_relief == 0)
+               vm_pageout_inactive_relief = VM_PAGEOUT_INACTIVE_RELIEF;
+
+       if (vm_pageout_burst_active_throttle == 0)
+               vm_pageout_burst_active_throttle = VM_PAGEOUT_BURST_ACTIVE_THROTTLE;
+
+       if (vm_pageout_burst_inactive_throttle == 0)
+               vm_pageout_burst_inactive_throttle = VM_PAGEOUT_BURST_INACTIVE_THROTTLE;
+
+       /*
+        * Set kernel task to low backing store privileged 
+        * status
+        */
+       task_lock(kernel_task);
+       kernel_task->priv_flags |= VM_BACKING_STORE_PRIV;
+       task_unlock(kernel_task);
+
        vm_page_free_count_init = vm_page_free_count;
+
        /*
         * even if we've already called vm_page_free_reserve
         * call it again here to insure that the targets are
@@ -2068,46 +2636,267 @@ vm_pageout(void)
         * calling it with an arg of 0 will not change the reserve
         * but will re-calculate free_min and free_target
         */
-       if (vm_page_free_reserved < VM_PAGE_FREE_RESERVED)
-               vm_page_free_reserve(VM_PAGE_FREE_RESERVED - vm_page_free_reserved);
-       else
+       if (vm_page_free_reserved < VM_PAGE_FREE_RESERVED(processor_count)) {
+               vm_page_free_reserve((VM_PAGE_FREE_RESERVED(processor_count)) - vm_page_free_reserved);
+       else
                vm_page_free_reserve(0);
 
+
+       queue_init(&vm_pageout_queue_external.pgo_pending);
+       vm_pageout_queue_external.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
+       vm_pageout_queue_external.pgo_laundry = 0;
+       vm_pageout_queue_external.pgo_idle = FALSE;
+       vm_pageout_queue_external.pgo_busy = FALSE;
+       vm_pageout_queue_external.pgo_throttled = FALSE;
+       vm_pageout_queue_external.pgo_draining = FALSE;
+
+       queue_init(&vm_pageout_queue_internal.pgo_pending);
+       vm_pageout_queue_internal.pgo_maxlaundry = 0;
+       vm_pageout_queue_internal.pgo_laundry = 0;
+       vm_pageout_queue_internal.pgo_idle = FALSE;
+       vm_pageout_queue_internal.pgo_busy = FALSE;
+       vm_pageout_queue_internal.pgo_throttled = FALSE;
+       vm_pageout_queue_internal.pgo_draining = FALSE;
+
+
+       /* internal pageout thread started when default pager registered first time */
+       /* external pageout and garbage collection threads started here */
+
+       result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_external, NULL, 
+                                             BASEPRI_PREEMPT - 1, 
+                                             &vm_pageout_external_iothread);
+       if (result != KERN_SUCCESS)
+               panic("vm_pageout_iothread_external: create failed");
+
+       thread_deallocate(vm_pageout_external_iothread);
+
+       result = kernel_thread_start_priority((thread_continue_t)vm_pageout_garbage_collect, NULL,
+                                             MINPRI_KERNEL, 
+                                             &thread);
+       if (result != KERN_SUCCESS)
+               panic("vm_pageout_garbage_collect: create failed");
+
+       thread_deallocate(thread);
+
+       vm_object_reaper_init();
+
+
+       vm_pageout_continue();
+
        /*
-        *      vm_pageout_scan will set vm_page_inactive_target.
+        * Unreached code!
         *
-        *      The pageout daemon is never done, so loop forever.
-        *      We should call vm_pageout_scan at least once each
-        *      time we are woken, even if vm_page_free_wanted is
-        *      zero, to check vm_page_free_target and
-        *      vm_page_inactive_target.
+        * The vm_pageout_continue() call above never returns, so the code below is never
+        * executed.  We take advantage of this to declare several DTrace VM related probe
+        * points that our kernel doesn't have an analog for.  These are probe points that
+        * exist in Solaris and are in the DTrace documentation, so people may have written
+        * scripts that use them.  Declaring the probe points here means their scripts will
+        * compile and execute which we want for portability of the scripts, but since this
+        * section of code is never reached, the probe points will simply never fire.  Yes,
+        * this is basically a hack.  The problem is the DTrace probe points were chosen with
+        * Solaris specific VM events in mind, not portability to different VM implementations.
         */
-       for (;;) {
-               vm_pageout_scan();
-               /* we hold vm_page_queue_free_lock now */
-               assert(vm_page_free_wanted == 0);
-               assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT);
-               mutex_unlock(&vm_page_queue_free_lock);
-               counter(c_vm_pageout_block++);
-               thread_block((void (*)(void)) 0);
-       }
+
+       DTRACE_VM2(execfree, int, 1, (uint64_t *), NULL);
+       DTRACE_VM2(execpgin, int, 1, (uint64_t *), NULL);
+       DTRACE_VM2(execpgout, int, 1, (uint64_t *), NULL);
+       DTRACE_VM2(pgswapin, int, 1, (uint64_t *), NULL);
+       DTRACE_VM2(pgswapout, int, 1, (uint64_t *), NULL);
+       DTRACE_VM2(swapin, int, 1, (uint64_t *), NULL);
+       DTRACE_VM2(swapout, int, 1, (uint64_t *), NULL);
        /*NOTREACHED*/
 }
 
+kern_return_t
+vm_pageout_internal_start(void)
+{
+       kern_return_t result;
+
+       vm_pageout_queue_internal.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
+       result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_internal, NULL, BASEPRI_PREEMPT - 1, &vm_pageout_internal_iothread);
+       if (result == KERN_SUCCESS)
+               thread_deallocate(vm_pageout_internal_iothread);
+       return result;
+}
+
+
+/*
+ * when marshalling pages into a UPL and subsequently committing
+ * or aborting them, it is necessary to hold 
+ * the vm_page_queue_lock (a hot global lock) for certain operations
+ * on the page... however, the majority of the work can be done
+ * while merely holding the object lock... in fact there are certain
+ * collections of pages that don't require any work brokered by the
+ * vm_page_queue_lock... to mitigate the time spent behind the global
+ * lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
+ * while doing all of the work that doesn't require the vm_page_queue_lock...
+ * then call dw_do_work to acquire the vm_page_queue_lock and do the
+ * necessary work for each page... we will grab the busy bit on the page
+ * if it's not already held so that dw_do_work can drop the object lock
+ * if it can't immediately take the vm_page_queue_lock in order to compete
+ * for the locks in the same order that vm_pageout_scan takes them.
+ * the operation names are modeled after the names of the routines that
+ * need to be called in order to make the changes very obvious in the
+ * original loop
+ */
+
+#define DELAYED_WORK_LIMIT     32
+
+#define DW_vm_page_unwire              0x01
+#define DW_vm_page_wire                        0x02
+#define DW_vm_page_free                        0x04
+#define DW_vm_page_activate            0x08
+#define DW_vm_page_deactivate_internal 0x10
+#define DW_vm_page_speculate           0x20
+#define DW_vm_page_lru                 0x40
+#define DW_vm_pageout_throttle_up      0x80
+#define DW_PAGE_WAKEUP                 0x100
+#define DW_clear_busy                  0x200
+#define DW_clear_reference             0x400
+#define DW_set_reference               0x800
+
+struct dw {
+       vm_page_t       dw_m;
+       int             dw_mask;
+};
+
+
+static void dw_do_work(vm_object_t object, struct dw *dwp, int dw_count);
+
+
+
+static upl_t
+upl_create(int type, int flags, upl_size_t size)
+{
+       upl_t   upl;
+       int     page_field_size = 0;
+       int     upl_flags = 0;
+       int     upl_size  = sizeof(struct upl);
+
+       size = round_page_32(size);
+
+       if (type & UPL_CREATE_LITE) {
+               page_field_size = (atop(size) + 7) >> 3;
+               page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
+
+               upl_flags |= UPL_LITE;
+       }
+       if (type & UPL_CREATE_INTERNAL) {
+               upl_size += (int) sizeof(struct upl_page_info) * atop(size);
+
+               upl_flags |= UPL_INTERNAL;
+       }
+       upl = (upl_t)kalloc(upl_size + page_field_size);
+
+       if (page_field_size)
+               bzero((char *)upl + upl_size, page_field_size);
+
+       upl->flags = upl_flags | flags;
+       upl->src_object = NULL;
+       upl->kaddr = (vm_offset_t)0;
+       upl->size = 0;
+       upl->map_object = NULL;
+       upl->ref_count = 1;
+       upl->highest_page = 0;
+       upl_lock_init(upl);
+       upl->vector_upl = NULL;
+#if UPL_DEBUG
+       upl->ubc_alias1 = 0;
+       upl->ubc_alias2 = 0;
+
+       upl->upl_creator = current_thread();
+       upl->upl_state = 0;
+       upl->upl_commit_index = 0;
+       bzero(&upl->upl_commit_records[0], sizeof(upl->upl_commit_records));
+
+       (void) OSBacktrace(&upl->upl_create_retaddr[0], UPL_DEBUG_STACK_FRAMES);
+#endif /* UPL_DEBUG */
+
+       return(upl);
+}
+
+static void
+upl_destroy(upl_t upl)
+{
+       int     page_field_size;  /* bit field in word size buf */
+        int    size;
+
+#if UPL_DEBUG
+       {
+               vm_object_t     object;
+
+               if (upl->flags & UPL_SHADOWED) {
+                       object = upl->map_object->shadow;
+               } else {
+                       object = upl->map_object;
+               }
+               vm_object_lock(object);
+               queue_remove(&object->uplq, upl, upl_t, uplq);
+               vm_object_unlock(object);
+       }
+#endif /* UPL_DEBUG */
+       /*
+        * drop a reference on the map_object whether or
+        * not a pageout object is inserted
+        */
+       if (upl->flags & UPL_SHADOWED)
+               vm_object_deallocate(upl->map_object);
+
+        if (upl->flags & UPL_DEVICE_MEMORY)
+               size = PAGE_SIZE;
+       else
+               size = upl->size;
+       page_field_size = 0;
+
+       if (upl->flags & UPL_LITE) {
+               page_field_size = ((size/PAGE_SIZE) + 7) >> 3;
+               page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
+       }
+       upl_lock_destroy(upl);
+       upl->vector_upl = (vector_upl_t) 0xfeedbeef;
+       if (upl->flags & UPL_INTERNAL) {
+               kfree(upl,
+                     sizeof(struct upl) + 
+                     (sizeof(struct upl_page_info) * (size/PAGE_SIZE))
+                     + page_field_size);
+       } else {
+               kfree(upl, sizeof(struct upl) + page_field_size);
+       }
+}
+
+void uc_upl_dealloc(upl_t upl);
+__private_extern__ void
+uc_upl_dealloc(upl_t upl)
+{
+       if (--upl->ref_count == 0)
+               upl_destroy(upl);
+}
 
 void
-upl_dealloc(
-       upl_t   upl)
+upl_deallocate(upl_t upl)
 {
-       upl->ref_count -= 1;
-       if(upl->ref_count == 0) {
+       if (--upl->ref_count == 0) {
+               if(vector_upl_is_valid(upl))
+                       vector_upl_deallocate(upl);
                upl_destroy(upl);
        }
 }
 
+#if DEVELOPMENT || DEBUG
+/*/*
+ * Statistics about UPL enforcement of copy-on-write obligations.
+ */
+unsigned long upl_cow = 0;
+unsigned long upl_cow_again = 0;
+unsigned long upl_cow_pages = 0;
+unsigned long upl_cow_again_pages = 0;
+
+unsigned long iopl_cow = 0;
+unsigned long iopl_cow_pages = 0;
+#endif
 
 /*  
- *     Routine:        vm_fault_list_request
+ *     Routine:        vm_object_upl_request 
  *     Purpose:        
  *             Cause the population of a portion of a vm_object.
  *             Depending on the nature of the request, the pages
@@ -2149,1187 +2938,4456 @@ upl_dealloc(
  *             the vm_objects (cache objects), they support.
  *
  */
-kern_return_t
-vm_fault_list_request(
+
+__private_extern__ kern_return_t
+vm_object_upl_request(
        vm_object_t             object,
        vm_object_offset_t      offset,
-       vm_size_t               size,
+       upl_size_t              size,
        upl_t                   *upl_ptr,
-       upl_page_info_t         **user_page_list_ptr,
-       int                     page_list_count,
+       upl_page_info_array_t   user_page_list,
+       unsigned int            *page_list_count,
        int                     cntrl_flags)
 {
-       vm_page_t               dst_page;
-       vm_object_offset_t      dst_offset = offset;
-       upl_page_info_t         *user_page_list;
-       vm_size_t               xfer_size = size;
-       boolean_t               do_m_lock = FALSE;
+       vm_page_t               dst_page = VM_PAGE_NULL;
+       vm_object_offset_t      dst_offset;
+       upl_size_t              xfer_size;
        boolean_t               dirty;
+       boolean_t               hw_dirty;
        upl_t                   upl = NULL;
-       int                     entry;
+       unsigned int            entry;
+#if MACH_CLUSTER_STATS
        boolean_t               encountered_lrp = FALSE;
-
+#endif
        vm_page_t               alias_page = NULL;
+        int                    refmod_state = 0;
+       wpl_array_t             lite_list = NULL;
+       vm_object_t             last_copy_object;
+       struct  dw              dw_array[DELAYED_WORK_LIMIT];
+       struct  dw              *dwp;
+       int                     dw_count;
+
+       if (cntrl_flags & ~UPL_VALID_FLAGS) {
+               /*
+                * For forward compatibility's sake,
+                * reject any unknown flag.
+                */
+               return KERN_INVALID_VALUE;
+       }
+       if ( (!object->internal) && (object->paging_offset != 0) )
+               panic("vm_object_upl_request: external object with non-zero paging offset\n");
+       if (object->phys_contiguous)
+               panic("vm_object_upl_request: contiguous object specified\n");
 
-       if(cntrl_flags & UPL_SET_INTERNAL)
-               page_list_count = MAX_UPL_TRANSFER;
-       if(((user_page_list_ptr || (cntrl_flags & UPL_SET_INTERNAL)) && 
-               !(object->private)) && (page_list_count < (size/page_size)))
-               return KERN_INVALID_ARGUMENT;
 
-       if((!object->internal) && (object->paging_offset != 0))
-               panic("vm_fault_list_request: vnode object with non-zero paging offset\n");
+       if ((size / PAGE_SIZE) > MAX_UPL_SIZE)
+               size = MAX_UPL_SIZE * PAGE_SIZE;
 
-       if((cntrl_flags & UPL_COPYOUT_FROM) && (upl_ptr == NULL)) {
-               return KERN_SUCCESS;
-       }
-       if(upl_ptr) {
-               if((cntrl_flags & UPL_SET_INTERNAL) && !(object->private)) {
-                       upl = upl_create(TRUE);
-                       user_page_list = (upl_page_info_t *)
-                               (((vm_offset_t)upl) + sizeof(struct upl));
-                       if(user_page_list_ptr)
-                               *user_page_list_ptr = user_page_list;
-                       upl->flags |= UPL_INTERNAL;
+       if ( (cntrl_flags & UPL_SET_INTERNAL) && page_list_count != NULL)
+               *page_list_count = MAX_UPL_SIZE;
+
+       if (cntrl_flags & UPL_SET_INTERNAL) {
+               if (cntrl_flags & UPL_SET_LITE) {
+
+                       upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE, 0, size);
+
+                       user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
+                       lite_list = (wpl_array_t)
+                                       (((uintptr_t)user_page_list) + 
+                                       ((size/PAGE_SIZE) * sizeof(upl_page_info_t)));
+                       if (size == 0) {
+                               user_page_list = NULL;
+                               lite_list = NULL;
+                       }
                } else {
-                       upl = upl_create(FALSE);
-                       if(user_page_list_ptr)
-                               user_page_list = *user_page_list_ptr;
-                       else
+                       upl = upl_create(UPL_CREATE_INTERNAL, 0, size);
+
+                       user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
+                       if (size == 0) {
                                user_page_list = NULL;
-                       if(object->private) {
-                               upl->size = size;
-                               upl->offset = offset;
-                               *upl_ptr = upl;
-                               if(user_page_list) {
-                                       user_page_list[0].phys_addr = offset;
-                                       user_page_list[0].device = TRUE;
-                               }
-                               upl->flags = UPL_DEVICE_MEMORY;
-                               return KERN_SUCCESS;
                        }
-               
-                       
                }
-               upl->map_object = vm_object_allocate(size);
-               vm_object_lock(upl->map_object);
+       } else {
+               if (cntrl_flags & UPL_SET_LITE) {
+
+                       upl = upl_create(UPL_CREATE_EXTERNAL | UPL_CREATE_LITE, 0, size);
+
+                       lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
+                       if (size == 0) {
+                               lite_list = NULL;
+                       }
+               } else {
+                       upl = upl_create(UPL_CREATE_EXTERNAL, 0, size);
+               }
+       }
+       *upl_ptr = upl;
+       
+       if (user_page_list)
+               user_page_list[0].device = FALSE;
+
+       if (cntrl_flags & UPL_SET_LITE) {
+               upl->map_object = object;
+       } else {
+               upl->map_object = vm_object_allocate(size);
+               /*
+                * No neeed to lock the new object: nobody else knows
+                * about it yet, so it's all ours so far.
+                */
                upl->map_object->shadow = object;
-               upl->size = size;
-               upl->offset = offset + object->paging_offset;
                upl->map_object->pageout = TRUE;
                upl->map_object->can_persist = FALSE;
                upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
                upl->map_object->shadow_offset = offset;
-               vm_object_unlock(upl->map_object);
-               *upl_ptr = upl;
-       }
-       VM_PAGE_GRAB_FICTITIOUS(alias_page);
+               upl->map_object->wimg_bits = object->wimg_bits;
+
+               VM_PAGE_GRAB_FICTITIOUS(alias_page);
+
+               upl->flags |= UPL_SHADOWED;
+       }
+       /*
+        * ENCRYPTED SWAP:
+        * Just mark the UPL as "encrypted" here.
+        * We'll actually encrypt the pages later,
+        * in upl_encrypt(), when the caller has
+        * selected which pages need to go to swap.
+        */
+       if (cntrl_flags & UPL_ENCRYPT)
+               upl->flags |= UPL_ENCRYPTED;
+
+       if (cntrl_flags & UPL_FOR_PAGEOUT)
+               upl->flags |= UPL_PAGEOUT;
+
        vm_object_lock(object);
-#ifdef UBC_DEBUG
-       if(upl_ptr)
-               queue_enter(&object->uplq, upl, upl_t, uplq);
-#endif /* UBC_DEBUG */
-       vm_object_paging_begin(object);
+       vm_object_activity_begin(object);
+
+       /*
+        * we can lock in the paging_offset once paging_in_progress is set
+        */
+       upl->size = size;
+       upl->offset = offset + object->paging_offset;
+
+#if UPL_DEBUG
+       queue_enter(&object->uplq, upl, upl_t, uplq);
+#endif /* UPL_DEBUG */
+
+       if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != VM_OBJECT_NULL) {
+               /*
+                * Honor copy-on-write obligations
+                *
+                * The caller is gathering these pages and
+                * might modify their contents.  We need to
+                * make sure that the copy object has its own
+                * private copies of these pages before we let
+                * the caller modify them.
+                */
+               vm_object_update(object,
+                                offset,
+                                size,
+                                NULL,
+                                NULL,
+                                FALSE, /* should_return */
+                                MEMORY_OBJECT_COPY_SYNC,
+                                VM_PROT_NO_CHANGE);
+#if DEVELOPMENT || DEBUG
+               upl_cow++;
+               upl_cow_pages += size >> PAGE_SHIFT;
+#endif
+       }
+       /*
+        * remember which copy object we synchronized with
+        */
+       last_copy_object = object->copy;
        entry = 0;
-       if(cntrl_flags & UPL_COPYOUT_FROM) {
-               upl->flags |= UPL_PAGE_SYNC_DONE;
-               while (xfer_size) {
-                       if(alias_page == NULL) {
-                               vm_object_unlock(object);
-                               VM_PAGE_GRAB_FICTITIOUS(alias_page);
-                               vm_object_lock(object);
-                       }
-                       if(((dst_page = vm_page_lookup(object, 
-                               dst_offset)) == VM_PAGE_NULL) ||
+
+       xfer_size = size;
+       dst_offset = offset;
+
+       dwp = &dw_array[0];
+       dw_count = 0;
+
+       while (xfer_size) {
+
+               dwp->dw_mask = 0;
+
+               if ((alias_page == NULL) && !(cntrl_flags & UPL_SET_LITE)) {
+                       vm_object_unlock(object);
+                       VM_PAGE_GRAB_FICTITIOUS(alias_page);
+                       vm_object_lock(object);
+               }
+               if (cntrl_flags & UPL_COPYOUT_FROM) {
+                       upl->flags |= UPL_PAGE_SYNC_DONE;
+
+                       if ( ((dst_page = vm_page_lookup(object, dst_offset)) == VM_PAGE_NULL) ||
                                dst_page->fictitious ||
                                dst_page->absent ||
                                dst_page->error ||
-                               (dst_page->wire_count != 0 && 
-                                                       !dst_page->pageout) ||
-                               ((!(dst_page->dirty || dst_page->precious ||
-                                     pmap_is_modified(dst_page->phys_addr)))
-                                     && (cntrl_flags & UPL_RET_ONLY_DIRTY))) {
-                               if(user_page_list)
+                              (VM_PAGE_WIRED(dst_page) && !dst_page->pageout && !dst_page->list_req_pending)) {
+
+                               if (user_page_list)
                                        user_page_list[entry].phys_addr = 0;
-                       } else {
-                               
-                               if(dst_page->busy && 
-                                       (!(dst_page->list_req_pending && 
-                                               dst_page->pageout))) {
-                                       if(cntrl_flags & UPL_NOBLOCK) {
-                                               if(user_page_list)
-                                                       user_page_list[entry]
-                                                               .phys_addr = 0;
-                                               entry++;
-                                               dst_offset += PAGE_SIZE_64;
-                                               xfer_size -= PAGE_SIZE;
-                                               continue;
-                                       }
-                                       /*someone else is playing with the */
-                                       /* page.  We will have to wait.    */
-                                       PAGE_ASSERT_WAIT(
-                                               dst_page, THREAD_UNINT);
-                                       vm_object_unlock(object);
-                                       thread_block((void(*)(void))0);
-                                       vm_object_lock(object);
-                                       continue;
+
+                               goto try_next_page;
+                       }
+                       /*
+                        * grab this up front...
+                        * a high percentange of the time we're going to
+                        * need the hardware modification state a bit later
+                        * anyway... so we can eliminate an extra call into
+                        * the pmap layer by grabbing it here and recording it
+                        */
+                       if (dst_page->pmapped)
+                               refmod_state = pmap_get_refmod(dst_page->phys_page);
+                       else
+                               refmod_state = 0;
+
+                       if ( (refmod_state & VM_MEM_REFERENCED) && dst_page->inactive ) {
+                               /*
+                                * page is on inactive list and referenced...
+                                * reactivate it now... this gets it out of the
+                                * way of vm_pageout_scan which would have to
+                                * reactivate it upon tripping over it
+                                */
+                               dwp->dw_mask |= DW_vm_page_activate;
+                       }
+                       if (cntrl_flags & UPL_RET_ONLY_DIRTY) {
+                               /*
+                                * we're only asking for DIRTY pages to be returned
+                                */
+                               if (dst_page->list_req_pending || !(cntrl_flags & UPL_FOR_PAGEOUT)) {
+                                       /*
+                                        * if we were the page stolen by vm_pageout_scan to be
+                                        * cleaned (as opposed to a buddy being clustered in 
+                                        * or this request is not being driven by a PAGEOUT cluster
+                                        * then we only need to check for the page being dirty or
+                                        * precious to decide whether to return it
+                                        */
+                                       if (dst_page->dirty || dst_page->precious || (refmod_state & VM_MEM_MODIFIED))
+                                               goto check_busy;
+                                       goto dont_return;
                                }
-                               /* Someone else already cleaning the page? */
-                               if((dst_page->cleaning || dst_page->absent ||
-                                       dst_page->prep_pin_count != 0 ||
-                                       dst_page->wire_count != 0) && 
-                                       !dst_page->list_req_pending) {
-                                  if(user_page_list)
-                                          user_page_list[entry].phys_addr = 0;
-                                  entry++;
-                                  dst_offset += PAGE_SIZE_64;
-                                  xfer_size -= PAGE_SIZE;
-                                  continue;
+                               /*
+                                * this is a request for a PAGEOUT cluster and this page
+                                * is merely along for the ride as a 'buddy'... not only
+                                * does it have to be dirty to be returned, but it also
+                                * can't have been referenced recently... note that we've
+                                * already filtered above based on whether this page is
+                                * currently on the inactive queue or it meets the page
+                                * ticket (generation count) check
+                                */
+                               if ( (cntrl_flags & UPL_CLEAN_IN_PLACE || !(refmod_state & VM_MEM_REFERENCED)) && 
+                                    ((refmod_state & VM_MEM_MODIFIED) || dst_page->dirty || dst_page->precious) ) {
+                                       goto check_busy;
                                }
-                               /* eliminate all mappings from the */
-                               /* original object and its prodigy */
-                               
-                               vm_page_lock_queues();
-                               pmap_page_protect(dst_page->phys_addr, 
-                                                               VM_PROT_NONE);
-
-                               /* pageout statistics gathering.  count  */
-                               /* all the pages we will page out that   */
-                               /* were not counted in the initial       */
-                               /* vm_pageout_scan work                  */
-                               if(dst_page->list_req_pending)
-                                       encountered_lrp = TRUE;
-                               if((dst_page->dirty ||
-                                       (dst_page->object->internal &&
-                                       dst_page->precious)) &&
-                                       (dst_page->list_req_pending 
-                                       == FALSE)) {
-                                       if(encountered_lrp) {
-                                               CLUSTER_STAT
-                                               (pages_at_higher_offsets++;)
-                                       } else {
-                                               CLUSTER_STAT
-                                               (pages_at_lower_offsets++;)
-                                       }
+dont_return:
+                               /*
+                                * if we reach here, we're not to return
+                                * the page... go on to the next one
+                                */
+                               if (user_page_list)
+                                       user_page_list[entry].phys_addr = 0;
+
+                               goto try_next_page;
+                       }
+check_busy:                    
+                       if (dst_page->busy && (!(dst_page->list_req_pending && (dst_page->pageout || dst_page->cleaning)))) {
+                               if (cntrl_flags & UPL_NOBLOCK) {
+                                       if (user_page_list)
+                                               user_page_list[entry].phys_addr = 0;
+
+                                       goto try_next_page;
                                }
+                               /*
+                                * someone else is playing with the
+                                * page.  We will have to wait.
+                                */
+                               PAGE_SLEEP(object, dst_page, THREAD_UNINT);
 
-                               /* Turn off busy indication on pending */
-                               /* pageout.  Note: we can only get here */
-                               /* in the request pending case.  */
-                               dst_page->list_req_pending = FALSE;
-                               dst_page->busy = FALSE;
-                               dst_page->cleaning = FALSE;
-
-                               dirty = pmap_is_modified(dst_page->phys_addr);
-                               dirty = dirty ? TRUE : dst_page->dirty;
-
-                               /* use pageclean setup, it is more convenient */
-                               /* even for the pageout cases here */
-                               vm_pageclean_setup(dst_page, alias_page,
-                                       upl->map_object, size - xfer_size);
-                                               
-                               if(!dirty) {
-                                       dst_page->dirty = FALSE;
-                                       dst_page->precious = TRUE;
+                               continue;
+                       }
+                       /*
+                        * Someone else already cleaning the page?
+                        */
+                       if ((dst_page->cleaning || dst_page->absent || VM_PAGE_WIRED(dst_page)) && !dst_page->list_req_pending) {
+                               if (user_page_list)
+                                       user_page_list[entry].phys_addr = 0;
+
+                               goto try_next_page;
+                       }
+                       /*
+                        * ENCRYPTED SWAP:
+                        * The caller is gathering this page and might
+                        * access its contents later on.  Decrypt the
+                        * page before adding it to the UPL, so that
+                        * the caller never sees encrypted data.
+                        */
+                       if (! (cntrl_flags & UPL_ENCRYPT) && dst_page->encrypted) {
+                               int  was_busy;
+
+                               /*
+                                * save the current state of busy
+                                * mark page as busy while decrypt
+                                * is in progress since it will drop
+                                * the object lock...
+                                */
+                               was_busy = dst_page->busy;
+                               dst_page->busy = TRUE;
+
+                               vm_page_decrypt(dst_page, 0);
+                               vm_page_decrypt_for_upl_counter++;
+                               /*
+                                * restore to original busy state
+                                */
+                               dst_page->busy = was_busy;
+                       }
+                       if (dst_page->pageout_queue == TRUE) {
+
+                               vm_page_lockspin_queues();
+
+#if CONFIG_EMBEDDED
+                               if (dst_page->laundry)
+#else
+                               if (dst_page->pageout_queue == TRUE)
+#endif
+                               {
+                                       /*
+                                        * we've buddied up a page for a clustered pageout
+                                        * that has already been moved to the pageout
+                                        * queue by pageout_scan... we need to remove
+                                        * it from the queue and drop the laundry count
+                                        * on that queue
+                                        */
+                                       vm_pageout_throttle_up(dst_page);
                                }
+                               vm_page_unlock_queues();
+                       }
+#if MACH_CLUSTER_STATS
+                       /*
+                        * pageout statistics gathering.  count
+                        * all the pages we will page out that
+                        * were not counted in the initial
+                        * vm_pageout_scan work
+                        */
+                       if (dst_page->list_req_pending)
+                               encountered_lrp = TRUE;
+                       if ((dst_page->dirty || (dst_page->object->internal && dst_page->precious)) && !dst_page->list_req_pending) {
+                               if (encountered_lrp)
+                                       CLUSTER_STAT(pages_at_higher_offsets++;)
+                               else
+                                       CLUSTER_STAT(pages_at_lower_offsets++;)
+                       }
+#endif
+                       /*
+                        * Turn off busy indication on pending
+                        * pageout.  Note: we can only get here
+                        * in the request pending case.
+                        */
+                       dst_page->list_req_pending = FALSE;
+                       dst_page->busy = FALSE;
+
+                       hw_dirty = refmod_state & VM_MEM_MODIFIED;
+                       dirty = hw_dirty ? TRUE : dst_page->dirty;
 
-                               if(dst_page->pageout)
-                                       dst_page->busy = TRUE;
+                       if (dst_page->phys_page > upl->highest_page)
+                               upl->highest_page = dst_page->phys_page;
+
+                       if (cntrl_flags & UPL_SET_LITE) {
+                               unsigned int    pg_num;
+
+                               pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE);
+                               assert(pg_num == (dst_offset-offset)/PAGE_SIZE);
+                               lite_list[pg_num>>5] |= 1 << (pg_num & 31);
+
+                               if (hw_dirty)
+                                       pmap_clear_modify(dst_page->phys_page);
+
+                               /*
+                                * Mark original page as cleaning 
+                                * in place.
+                                */
+                               dst_page->cleaning = TRUE;
+                               dst_page->precious = FALSE;
+                       } else {
+                               /*
+                                * use pageclean setup, it is more
+                                * convenient even for the pageout
+                                * cases here
+                                */
+                               vm_object_lock(upl->map_object);
+                               vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
+                               vm_object_unlock(upl->map_object);
 
                                alias_page->absent = FALSE;
                                alias_page = NULL;
-                               if(!(cntrl_flags & UPL_CLEAN_IN_PLACE)) {
-                                       /* deny access to the target page */
-                                       /* while it is being worked on    */
-                                       if((!dst_page->pageout) &&
-                                               (dst_page->wire_count == 0)) {
-                                               dst_page->busy = TRUE;
-                                               dst_page->pageout = TRUE;
-                                               vm_page_wire(dst_page);
-                                       }
-                               }
-                               if(user_page_list) {
-                                       user_page_list[entry].phys_addr
-                                               = dst_page->phys_addr;
-                                       user_page_list[entry].dirty =   
-                                                       dst_page->dirty;
-                                       user_page_list[entry].pageout =
-                                                       dst_page->pageout;
-                                       user_page_list[entry].absent =
-                                                       dst_page->absent;
-                                       user_page_list[entry].precious =
-                                                       dst_page->precious;
-                               }
+                       }
+#if     MACH_PAGEMAP
+                       /*
+                        * Record that this page has been 
+                        * written out
+                        */
+                       vm_external_state_set(object->existence_map, dst_page->offset);
+#endif  /*MACH_PAGEMAP*/
+                       dst_page->dirty = dirty;
+
+                       if (!dirty)
+                               dst_page->precious = TRUE;
+
+                       if (dst_page->pageout)
+                               dst_page->busy = TRUE;
+
+                       if ( (cntrl_flags & UPL_ENCRYPT) ) {
+                               /*
+                                * ENCRYPTED SWAP:
+                                * We want to deny access to the target page
+                                * because its contents are about to be
+                                * encrypted and the user would be very
+                                * confused to see encrypted data instead
+                                * of their data.
+                                * We also set "encrypted_cleaning" to allow
+                                * vm_pageout_scan() to demote that page
+                                * from "adjacent/clean-in-place" to
+                                * "target/clean-and-free" if it bumps into
+                                * this page during its scanning while we're
+                                * still processing this cluster.
+                                */
+                               dst_page->busy = TRUE;
+                               dst_page->encrypted_cleaning = TRUE;
+                       }
+                       if ( !(cntrl_flags & UPL_CLEAN_IN_PLACE) ) {
+                               /*
+                                * deny access to the target page
+                                * while it is being worked on
+                                */
+                               if ((!dst_page->pageout) && ( !VM_PAGE_WIRED(dst_page))) {
+                                       dst_page->busy = TRUE;
+                                       dst_page->pageout = TRUE;
 
-                               vm_page_unlock_queues();
+                                       dwp->dw_mask |= DW_vm_page_wire;
+                               }
                        }
-                       entry++;
-                       dst_offset += PAGE_SIZE_64;
-                       xfer_size -= PAGE_SIZE;
-               }
-       } else {
-               while (xfer_size) {
-                       if(alias_page == NULL) {
-                               vm_object_unlock(object);
-                               VM_PAGE_GRAB_FICTITIOUS(alias_page);
-                               vm_object_lock(object);
+               } else {
+                       if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != last_copy_object) {
+                               /*
+                                * Honor copy-on-write obligations
+                                *
+                                * The copy object has changed since we
+                                * last synchronized for copy-on-write.
+                                * Another copy object might have been
+                                * inserted while we released the object's
+                                * lock.  Since someone could have seen the
+                                * original contents of the remaining pages
+                                * through that new object, we have to
+                                * synchronize with it again for the remaining
+                                * pages only.  The previous pages are "busy"
+                                * so they can not be seen through the new
+                                * mapping.  The new mapping will see our
+                                * upcoming changes for those previous pages,
+                                * but that's OK since they couldn't see what
+                                * was there before.  It's just a race anyway
+                                * and there's no guarantee of consistency or
+                                * atomicity.  We just don't want new mappings
+                                * to see both the *before* and *after* pages.
+                                */
+                               if (object->copy != VM_OBJECT_NULL) {
+                                       vm_object_update(
+                                               object,
+                                               dst_offset,/* current offset */
+                                               xfer_size, /* remaining size */
+                                               NULL,
+                                               NULL,
+                                               FALSE,     /* should_return */
+                                               MEMORY_OBJECT_COPY_SYNC,
+                                               VM_PROT_NO_CHANGE);
+
+#if DEVELOPMENT || DEBUG
+                                       upl_cow_again++;
+                                       upl_cow_again_pages += xfer_size >> PAGE_SHIFT;
+#endif
+                               }
+                               /*
+                                * remember the copy object we synced with
+                                */
+                               last_copy_object = object->copy;
                        }
                        dst_page = vm_page_lookup(object, dst_offset);
-                       if(dst_page != VM_PAGE_NULL) {
-                          if((dst_page->cleaning) && 
-                                       !(dst_page->list_req_pending)) {
-                                  /*someone else is writing to the */
-                                  /* page.  We will have to wait.  */
-                                  PAGE_ASSERT_WAIT(dst_page, THREAD_UNINT);
-                                  vm_object_unlock(object);
-                                  thread_block((void(*)(void))0);
-                                  vm_object_lock(object);
-                                  continue;
-                          }
-                          if ((dst_page->fictitious && 
-                                       dst_page->list_req_pending)) {
-                               /* dump the fictitious page */
-                               dst_page->list_req_pending = FALSE;
-                               dst_page->clustered = FALSE;
-                               vm_page_lock_queues();
-                               vm_page_free(dst_page);
-                               vm_page_unlock_queues();
-                          } else if ((dst_page->absent && 
-                                       dst_page->list_req_pending)) {
-                               /* the default_pager case */
-                               dst_page->list_req_pending = FALSE;
-                               dst_page->busy = FALSE;
-                               dst_page->clustered = FALSE;
-                          }
+                       
+                       if (dst_page != VM_PAGE_NULL) {
+
+                               if ((cntrl_flags & UPL_RET_ONLY_ABSENT)) {
+
+                                       if ( !(dst_page->absent && dst_page->list_req_pending) ) {
+                                               /*
+                                                * skip over pages already present in the cache
+                                                */
+                                               if (user_page_list)
+                                                       user_page_list[entry].phys_addr = 0;
+
+                                               goto try_next_page;
+                                       }
+                               }
+                               if ( !(dst_page->list_req_pending) ) {
+
+                                       if (dst_page->cleaning) {
+                                               /*
+                                                * someone else is writing to the page... wait...
+                                                */
+                                               PAGE_SLEEP(object, dst_page, THREAD_UNINT);
+
+                                               continue;
+                                       }
+                               } else {
+                                       if (dst_page->fictitious &&
+                                           dst_page->phys_page == vm_page_fictitious_addr) {
+                                               assert( !dst_page->speculative);
+                                               /*
+                                                * dump the fictitious page
+                                                */
+                                               dst_page->list_req_pending = FALSE;
+
+                                               VM_PAGE_FREE(dst_page);
+
+                                               dst_page = NULL;
+
+                                       } else if (dst_page->absent) {
+                                               /*
+                                                * the default_pager case
+                                                */
+                                               dst_page->list_req_pending = FALSE;
+                                               dst_page->busy = FALSE;
+
+                                       } else if (dst_page->pageout || dst_page->cleaning) {
+                                               /*
+                                                * page was earmarked by vm_pageout_scan
+                                                * to be cleaned and stolen... we're going
+                                                * to take it back since we are not attempting
+                                                * to read that page and we don't want to stall
+                                                * waiting for it to be cleaned for 2 reasons...
+                                                * 1 - no use paging it out and back in
+                                                * 2 - if we stall, we may casue a deadlock in 
+                                                *     the FS trying to acquire the its locks
+                                                *     on the VNOP_PAGEOUT path presuming that
+                                                *     those locks are already held on the read
+                                                *     path before trying to create this UPL
+                                                *
+                                                * so undo all of the state that vm_pageout_scan
+                                                * hung on this page
+                                                */
+                                               dst_page->busy = FALSE;
+
+                                               vm_pageout_queue_steal(dst_page, FALSE);
+                                       }
+                               }
                        }
-                       if((dst_page = vm_page_lookup(
-                               object, dst_offset)) == VM_PAGE_NULL) {
-                               /* need to allocate a page */
-                               dst_page = vm_page_alloc(object, dst_offset);
+                       if (dst_page == VM_PAGE_NULL) {
+                               if (object->private) {
+                                       /* 
+                                        * This is a nasty wrinkle for users 
+                                        * of upl who encounter device or 
+                                        * private memory however, it is 
+                                        * unavoidable, only a fault can
+                                        * resolve the actual backing
+                                        * physical page by asking the
+                                        * backing device.
+                                        */
+                                       if (user_page_list)
+                                               user_page_list[entry].phys_addr = 0;
+
+                                       goto try_next_page;
+                               }
+                               /*
+                                * need to allocate a page
+                                */
+                               dst_page = vm_page_grab();
+
                                if (dst_page == VM_PAGE_NULL) {
-                                  vm_object_unlock(object);
-                                  VM_PAGE_WAIT();
-                                  vm_object_lock(object);
-                                  continue;
+                                       if ( (cntrl_flags & (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) == (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) {
+                                              /*
+                                               * we don't want to stall waiting for pages to come onto the free list
+                                               * while we're already holding absent pages in this UPL
+                                               * the caller will deal with the empty slots
+                                               */
+                                               if (user_page_list)
+                                                       user_page_list[entry].phys_addr = 0;
+
+                                               goto try_next_page;
+                                       }
+                                       /*
+                                        * no pages available... wait
+                                        * then try again for the same
+                                        * offset...
+                                        */
+                                       vm_object_unlock(object);
+                                       VM_PAGE_WAIT();
+                                       vm_object_lock(object);
+
+                                       continue;
                                }
+                               vm_page_insert(dst_page, object, dst_offset);
+
+                               dst_page->absent = TRUE;
                                dst_page->busy = FALSE;
-#if 0
-                               if(cntrl_flags & UPL_NO_SYNC) {
-                                       dst_page->page_lock = 0;
-                                       dst_page->unlock_request = 0;
+
+                               if (cntrl_flags & UPL_RET_ONLY_ABSENT) {
+                                       /*
+                                        * if UPL_RET_ONLY_ABSENT was specified,
+                                        * than we're definitely setting up a
+                                        * upl for a clustered read/pagein 
+                                        * operation... mark the pages as clustered
+                                        * so upl_commit_range can put them on the
+                                        * speculative list
+                                        */
+                                       dst_page->clustered = TRUE;
                                }
-#endif
-                               dst_page->absent = TRUE;
-                               object->absent_count++;
                        }
-#if 1
-                       if(cntrl_flags & UPL_NO_SYNC) {
-                               dst_page->page_lock = 0;
-                               dst_page->unlock_request = 0;
+                       if (dst_page->fictitious) {
+                               panic("need corner case for fictitious page");
+                       }
+                       if (dst_page->busy) {
+                               /*
+                                * someone else is playing with the
+                                * page.  We will have to wait.
+                                */
+                               PAGE_SLEEP(object, dst_page, THREAD_UNINT);
+
+                               continue;
+                       }
+                       /*
+                        * ENCRYPTED SWAP:
+                        */
+                       if (cntrl_flags & UPL_ENCRYPT) {
+                               /*
+                                * The page is going to be encrypted when we
+                                * get it from the pager, so mark it so.
+                                */
+                               dst_page->encrypted = TRUE;
+                       } else {
+                               /*
+                                * Otherwise, the page will not contain
+                                * encrypted data.
+                                */
+                               dst_page->encrypted = FALSE;
                        }
-#endif /* 1 */
                        dst_page->overwriting = TRUE;
-                       if(dst_page->fictitious) {
-                               panic("need corner case for fictitious page");
+
+                       if (dst_page->pmapped) {
+                               if ( !(cntrl_flags & UPL_FILE_IO))
+                                       /*
+                                        * eliminate all mappings from the
+                                        * original object and its prodigy
+                                        */
+                                       refmod_state = pmap_disconnect(dst_page->phys_page);
+                               else
+                                       refmod_state = pmap_get_refmod(dst_page->phys_page);
+                       } else
+                               refmod_state = 0;
+
+                       hw_dirty = refmod_state & VM_MEM_MODIFIED;
+                       dirty = hw_dirty ? TRUE : dst_page->dirty;
+
+                       if (cntrl_flags & UPL_SET_LITE) {
+                               unsigned int    pg_num;
+
+                               pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE);
+                               assert(pg_num == (dst_offset-offset)/PAGE_SIZE);
+                               lite_list[pg_num>>5] |= 1 << (pg_num & 31);
+
+                               if (hw_dirty)
+                                       pmap_clear_modify(dst_page->phys_page);
+
+                               /*
+                                * Mark original page as cleaning 
+                                * in place.
+                                */
+                               dst_page->cleaning = TRUE;
+                               dst_page->precious = FALSE;
+                       } else {
+                               /*
+                                * use pageclean setup, it is more
+                                * convenient even for the pageout
+                                * cases here
+                                */
+                               vm_object_lock(upl->map_object);
+                               vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
+                               vm_object_unlock(upl->map_object);
+
+                               alias_page->absent = FALSE;
+                               alias_page = NULL;
+                       }
+
+                       if (cntrl_flags & UPL_CLEAN_IN_PLACE) {
+                               /*
+                                * clean in place for read implies
+                                * that a write will be done on all
+                                * the pages that are dirty before
+                                * a upl commit is done.  The caller
+                                * is obligated to preserve the
+                                * contents of all pages marked dirty
+                                */
+                               upl->flags |= UPL_CLEAR_DIRTY;
+                       }
+                       dst_page->dirty = dirty;
+
+                       if (!dirty)
+                               dst_page->precious = TRUE;
+
+                       if ( !VM_PAGE_WIRED(dst_page)) {
+                               /*
+                                * deny access to the target page while
+                                * it is being worked on
+                                */
+                               dst_page->busy = TRUE;
+                       } else
+                               dwp->dw_mask |= DW_vm_page_wire;
+
+                       /*
+                        * We might be about to satisfy a fault which has been
+                        * requested. So no need for the "restart" bit.
+                        */
+                       dst_page->restart = FALSE;
+                       if (!dst_page->absent && !(cntrl_flags & UPL_WILL_MODIFY)) {
+                               /*
+                                * expect the page to be used
+                                */
+                               dwp->dw_mask |= DW_set_reference;
+                       }
+                       dst_page->precious = (cntrl_flags & UPL_PRECIOUS) ? TRUE : FALSE;
+               }
+               if (dst_page->busy)
+                       upl->flags |= UPL_HAS_BUSY;
+
+               if (dst_page->phys_page > upl->highest_page)
+                       upl->highest_page = dst_page->phys_page;
+               if (user_page_list) {
+                       user_page_list[entry].phys_addr = dst_page->phys_page;
+                       user_page_list[entry].pageout   = dst_page->pageout;
+                       user_page_list[entry].absent    = dst_page->absent;
+                       user_page_list[entry].dirty     = dst_page->dirty;
+                       user_page_list[entry].precious  = dst_page->precious;
+                       user_page_list[entry].device    = FALSE;
+                       if (dst_page->clustered == TRUE)
+                               user_page_list[entry].speculative = dst_page->speculative;
+                       else
+                               user_page_list[entry].speculative = FALSE;
+                       user_page_list[entry].cs_validated = dst_page->cs_validated;
+                       user_page_list[entry].cs_tainted = dst_page->cs_tainted;
+               }
+               /*
+                * if UPL_RET_ONLY_ABSENT is set, then
+                * we are working with a fresh page and we've
+                * just set the clustered flag on it to
+                * indicate that it was drug in as part of a
+                * speculative cluster... so leave it alone
+                */
+               if ( !(cntrl_flags & UPL_RET_ONLY_ABSENT)) {
+                       /*
+                        * someone is explicitly grabbing this page...
+                        * update clustered and speculative state
+                        * 
+                        */
+                       VM_PAGE_CONSUME_CLUSTERED(dst_page);
+               }
+try_next_page:
+               if (dwp->dw_mask) {
+                       if (dwp->dw_mask & DW_vm_page_activate)
+                               VM_STAT_INCR(reactivations);
+
+                       if (dst_page->busy == FALSE) {
+                               /*
+                                * dw_do_work may need to drop the object lock
+                                * if it does, we need the pages it's looking at to
+                                * be held stable via the busy bit.
+                                */
+                               dst_page->busy = TRUE;
+                               dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
                        }
-                       if(dst_page->page_lock) {
-                               do_m_lock = TRUE;
+                       dwp->dw_m = dst_page;
+                       dwp++;
+                       dw_count++;
+
+                       if (dw_count >= DELAYED_WORK_LIMIT) {
+                               dw_do_work(object, &dw_array[0], dw_count);
+
+                               dwp = &dw_array[0];
+                               dw_count = 0;
                        }
-                       if(upl_ptr) {
+               }
+               entry++;
+               dst_offset += PAGE_SIZE_64;
+               xfer_size -= PAGE_SIZE;
+       }
+       if (dw_count)
+               dw_do_work(object, &dw_array[0], dw_count);
+
+       if (alias_page != NULL) {
+               VM_PAGE_FREE(alias_page);
+       }
+
+       if (page_list_count != NULL) {
+               if (upl->flags & UPL_INTERNAL)
+                       *page_list_count = 0;
+               else if (*page_list_count > entry)
+                       *page_list_count = entry;
+       }
+#if UPL_DEBUG
+       upl->upl_state = 1;
+#endif
+       vm_object_unlock(object);
+
+       return KERN_SUCCESS;
+}
+
+/* JMM - Backward compatability for now */
+kern_return_t
+vm_fault_list_request(                 /* forward */
+       memory_object_control_t         control,
+       vm_object_offset_t      offset,
+       upl_size_t              size,
+       upl_t                   *upl_ptr,
+       upl_page_info_t         **user_page_list_ptr,
+       unsigned int            page_list_count,
+       int                     cntrl_flags);
+kern_return_t
+vm_fault_list_request(
+       memory_object_control_t         control,
+       vm_object_offset_t      offset,
+       upl_size_t              size,
+       upl_t                   *upl_ptr,
+       upl_page_info_t         **user_page_list_ptr,
+       unsigned int            page_list_count,
+       int                     cntrl_flags)
+{
+       unsigned int            local_list_count;
+       upl_page_info_t         *user_page_list;
+       kern_return_t           kr;
+
+       if((cntrl_flags & UPL_VECTOR)==UPL_VECTOR)
+                return KERN_INVALID_ARGUMENT;
+
+       if (user_page_list_ptr != NULL) {
+               local_list_count = page_list_count;
+               user_page_list = *user_page_list_ptr;
+       } else {
+               local_list_count = 0;
+               user_page_list = NULL;
+       }
+       kr =  memory_object_upl_request(control,
+                               offset,
+                               size,
+                               upl_ptr,
+                               user_page_list,
+                               &local_list_count,
+                               cntrl_flags);
+
+       if(kr != KERN_SUCCESS)
+               return kr;
+
+       if ((user_page_list_ptr != NULL) && (cntrl_flags & UPL_INTERNAL)) {
+               *user_page_list_ptr = UPL_GET_INTERNAL_PAGE_LIST(*upl_ptr);
+       }
+
+       return KERN_SUCCESS;
+}
+
+               
+
+/*  
+ *     Routine:        vm_object_super_upl_request
+ *     Purpose:        
+ *             Cause the population of a portion of a vm_object
+ *             in much the same way as memory_object_upl_request.
+ *             Depending on the nature of the request, the pages
+ *             returned may be contain valid data or be uninitialized.
+ *             However, the region may be expanded up to the super
+ *             cluster size provided.
+ */
+
+__private_extern__ kern_return_t
+vm_object_super_upl_request(
+       vm_object_t object,
+       vm_object_offset_t      offset,
+       upl_size_t              size,
+       upl_size_t              super_cluster,
+       upl_t                   *upl,
+       upl_page_info_t         *user_page_list,
+       unsigned int            *page_list_count,
+       int                     cntrl_flags)
+{
+       if (object->paging_offset > offset  || ((cntrl_flags & UPL_VECTOR)==UPL_VECTOR))
+               return KERN_FAILURE;
+
+       assert(object->paging_in_progress);
+       offset = offset - object->paging_offset;
+
+       if (super_cluster > size) {
+
+               vm_object_offset_t      base_offset;
+               upl_size_t              super_size;
+               vm_object_size_t        super_size_64;
+
+               base_offset = (offset & ~((vm_object_offset_t) super_cluster - 1));
+               super_size = (offset + size) > (base_offset + super_cluster) ? super_cluster<<1 : super_cluster;
+               super_size_64 = ((base_offset + super_size) > object->size) ? (object->size - base_offset) : super_size;
+               super_size = (upl_size_t) super_size_64;
+               assert(super_size == super_size_64);
+
+               if (offset > (base_offset + super_size)) {
+                       panic("vm_object_super_upl_request: Missed target pageout"
+                             " %#llx,%#llx, %#x, %#x, %#x, %#llx\n",
+                             offset, base_offset, super_size, super_cluster,
+                             size, object->paging_offset);
+               }
+               /*
+                * apparently there is a case where the vm requests a
+                * page to be written out who's offset is beyond the
+                * object size
+                */
+               if ((offset + size) > (base_offset + super_size)) {
+                       super_size_64 = (offset + size) - base_offset;
+                       super_size = (upl_size_t) super_size_64;
+                       assert(super_size == super_size_64);
+               }
+
+               offset = base_offset;
+               size = super_size;
+       }
+       return vm_object_upl_request(object, offset, size, upl, user_page_list, page_list_count, cntrl_flags);
+}
+
+
+kern_return_t
+vm_map_create_upl(
+       vm_map_t                map,
+       vm_map_address_t        offset,
+       upl_size_t              *upl_size,
+       upl_t                   *upl,
+       upl_page_info_array_t   page_list,
+       unsigned int            *count,
+       int                     *flags)
+{
+       vm_map_entry_t  entry;
+       int             caller_flags;
+       int             force_data_sync;
+       int             sync_cow_data;
+       vm_object_t     local_object;
+       vm_map_offset_t local_offset;
+       vm_map_offset_t local_start;
+       kern_return_t   ret;
+
+       caller_flags = *flags;
+
+       if (caller_flags & ~UPL_VALID_FLAGS) {
+               /*
+                * For forward compatibility's sake,
+                * reject any unknown flag.
+                */
+               return KERN_INVALID_VALUE;
+       }
+       force_data_sync = (caller_flags & UPL_FORCE_DATA_SYNC);
+       sync_cow_data = !(caller_flags & UPL_COPYOUT_FROM);
+
+       if (upl == NULL)
+               return KERN_INVALID_ARGUMENT;
+
+REDISCOVER_ENTRY:
+       vm_map_lock_read(map);
+
+       if (vm_map_lookup_entry(map, offset, &entry)) {
+
+               if ((entry->vme_end - offset) < *upl_size) {
+                       *upl_size = (upl_size_t) (entry->vme_end - offset);
+                       assert(*upl_size == entry->vme_end - offset);
+               }
+
+               if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
+                       *flags = 0;
+
+                       if ( !entry->is_sub_map && entry->object.vm_object != VM_OBJECT_NULL) {
+                               if (entry->object.vm_object->private)
+                                       *flags = UPL_DEV_MEMORY;
+
+                               if (entry->object.vm_object->phys_contiguous)
+                                       *flags |= UPL_PHYS_CONTIG;
+                       }
+                       vm_map_unlock_read(map);
+
+                       return KERN_SUCCESS;
+               }
+               if (entry->object.vm_object == VM_OBJECT_NULL || !entry->object.vm_object->phys_contiguous) {
+                       if ((*upl_size/PAGE_SIZE) > MAX_UPL_SIZE)
+                                       *upl_size = MAX_UPL_SIZE * PAGE_SIZE;
+               }
+               /*
+                *      Create an object if necessary.
+                */
+               if (entry->object.vm_object == VM_OBJECT_NULL) {
+
+                       if (vm_map_lock_read_to_write(map))
+                               goto REDISCOVER_ENTRY;
+
+                       entry->object.vm_object = vm_object_allocate((vm_size_t)(entry->vme_end - entry->vme_start));
+                       entry->offset = 0;
+
+                       vm_map_lock_write_to_read(map);
+               }
+               if (!(caller_flags & UPL_COPYOUT_FROM)) {
+                       if (!(entry->protection & VM_PROT_WRITE)) {
+                               vm_map_unlock_read(map);
+                               return KERN_PROTECTION_FAILURE;
+                       }
+                       if (entry->needs_copy)  {
+                               /*
+                                * Honor copy-on-write for COPY_SYMMETRIC
+                                * strategy.
+                                */
+                               vm_map_t                local_map;
+                               vm_object_t             object;
+                               vm_object_offset_t      new_offset;
+                               vm_prot_t               prot;
+                               boolean_t               wired;
+                               vm_map_version_t        version;
+                               vm_map_t                real_map;
+
+                               local_map = map;
+
+                               if (vm_map_lookup_locked(&local_map,
+                                                        offset, VM_PROT_WRITE,
+                                                        OBJECT_LOCK_EXCLUSIVE,
+                                                        &version, &object,
+                                                        &new_offset, &prot, &wired,
+                                                        NULL,
+                                                        &real_map) != KERN_SUCCESS) {
+                                       vm_map_unlock_read(local_map);
+                                       return KERN_FAILURE;
+                               }
+                               if (real_map != map)
+                                       vm_map_unlock(real_map);
+                               vm_map_unlock_read(local_map);
+
+                               vm_object_unlock(object);
+
+                               goto REDISCOVER_ENTRY;
+                       }
+               }
+               if (entry->is_sub_map) {
+                       vm_map_t        submap;
+
+                       submap = entry->object.sub_map;
+                       local_start = entry->vme_start;
+                       local_offset = entry->offset;
+
+                       vm_map_reference(submap);
+                       vm_map_unlock_read(map);
+
+                       ret = vm_map_create_upl(submap, 
+                                               local_offset + (offset - local_start), 
+                                               upl_size, upl, page_list, count, flags);
+                       vm_map_deallocate(submap);
+
+                       return ret;
+               }
+               if (sync_cow_data) {
+                       if (entry->object.vm_object->shadow || entry->object.vm_object->copy) {
+                               local_object = entry->object.vm_object;
+                               local_start = entry->vme_start;
+                               local_offset = entry->offset;
+
+                               vm_object_reference(local_object);
+                               vm_map_unlock_read(map);
+
+                               if (local_object->shadow && local_object->copy) {
+                                       vm_object_lock_request(
+                                                              local_object->shadow,
+                                                              (vm_object_offset_t)
+                                                              ((offset - local_start) +
+                                                               local_offset) +
+                                                              local_object->shadow_offset,
+                                                              *upl_size, FALSE, 
+                                                              MEMORY_OBJECT_DATA_SYNC,
+                                                              VM_PROT_NO_CHANGE);
+                               }
+                               sync_cow_data = FALSE;
+                               vm_object_deallocate(local_object);
+
+                               goto REDISCOVER_ENTRY;
+                       }
+               }
+               if (force_data_sync) {
+                       local_object = entry->object.vm_object;
+                       local_start = entry->vme_start;
+                       local_offset = entry->offset;
+
+                       vm_object_reference(local_object);
+                       vm_map_unlock_read(map);
+
+                       vm_object_lock_request(
+                                              local_object,
+                                              (vm_object_offset_t)
+                                              ((offset - local_start) + local_offset),
+                                              (vm_object_size_t)*upl_size, FALSE, 
+                                              MEMORY_OBJECT_DATA_SYNC,
+                                              VM_PROT_NO_CHANGE);
+
+                       force_data_sync = FALSE;
+                       vm_object_deallocate(local_object);
+
+                       goto REDISCOVER_ENTRY;
+               }
+               if (entry->object.vm_object->private)
+                       *flags = UPL_DEV_MEMORY;
+               else
+                       *flags = 0;
+
+               if (entry->object.vm_object->phys_contiguous)
+                       *flags |= UPL_PHYS_CONTIG;
+
+               local_object = entry->object.vm_object;
+               local_offset = entry->offset;
+               local_start = entry->vme_start;
+
+               vm_object_reference(local_object);
+               vm_map_unlock_read(map);
+
+               ret = vm_object_iopl_request(local_object, 
+                                             (vm_object_offset_t) ((offset - local_start) + local_offset),
+                                             *upl_size,
+                                             upl,
+                                             page_list,
+                                             count,
+                                             caller_flags);
+               vm_object_deallocate(local_object);
+
+               return(ret);
+       } 
+       vm_map_unlock_read(map);
+
+       return(KERN_FAILURE);
+}
+
+/*
+ * Internal routine to enter a UPL into a VM map.
+ * 
+ * JMM - This should just be doable through the standard
+ * vm_map_enter() API.
+ */
+kern_return_t
+vm_map_enter_upl(
+       vm_map_t                map, 
+       upl_t                   upl, 
+       vm_map_offset_t         *dst_addr)
+{
+       vm_map_size_t           size;
+       vm_object_offset_t      offset;
+       vm_map_offset_t         addr;
+       vm_page_t               m;
+       kern_return_t           kr;
+       int                     isVectorUPL = 0, curr_upl=0;
+       upl_t                   vector_upl = NULL;
+       vm_offset_t             vector_upl_dst_addr = 0;
+       vm_map_t                vector_upl_submap = NULL;
+       upl_offset_t            subupl_offset = 0;
+       upl_size_t              subupl_size = 0;
+
+       if (upl == UPL_NULL)
+               return KERN_INVALID_ARGUMENT;
+
+       if((isVectorUPL = vector_upl_is_valid(upl))) {
+               int mapped=0,valid_upls=0;
+               vector_upl = upl;
+
+               upl_lock(vector_upl);
+               for(curr_upl=0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
+                       upl =  vector_upl_subupl_byindex(vector_upl, curr_upl );
+                       if(upl == NULL)
+                               continue;
+                       valid_upls++;
+                       if (UPL_PAGE_LIST_MAPPED & upl->flags)
+                               mapped++;
+               }
+
+               if(mapped) { 
+                       if(mapped != valid_upls)
+                               panic("Only %d of the %d sub-upls within the Vector UPL are alread mapped\n", mapped, valid_upls);
+                       else {
+                               upl_unlock(vector_upl);
+                               return KERN_FAILURE;
+                       }
+               }
+
+               kr = kmem_suballoc(map, &vector_upl_dst_addr, vector_upl->size, FALSE, VM_FLAGS_ANYWHERE, &vector_upl_submap);
+               if( kr != KERN_SUCCESS )
+                       panic("Vector UPL submap allocation failed\n");
+               map = vector_upl_submap;
+               vector_upl_set_submap(vector_upl, vector_upl_submap, vector_upl_dst_addr);
+               curr_upl=0;
+       }
+       else
+               upl_lock(upl);
+
+process_upl_to_enter:
+       if(isVectorUPL){
+               if(curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
+                       *dst_addr = vector_upl_dst_addr;
+                       upl_unlock(vector_upl);
+                       return KERN_SUCCESS;
+               }
+               upl =  vector_upl_subupl_byindex(vector_upl, curr_upl++ );
+               if(upl == NULL)
+                       goto process_upl_to_enter;
+               vector_upl_get_iostate(vector_upl, upl, &subupl_offset, &subupl_size);
+               *dst_addr = (vm_map_offset_t)(vector_upl_dst_addr + (vm_map_offset_t)subupl_offset);
+       } else {
+               /*
+                * check to see if already mapped
+                */
+               if (UPL_PAGE_LIST_MAPPED & upl->flags) {
+                       upl_unlock(upl);
+                       return KERN_FAILURE;
+               }
+       }
+       if ((!(upl->flags & UPL_SHADOWED)) &&
+           ((upl->flags & UPL_HAS_BUSY) ||
+            !((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) || (upl->map_object->phys_contiguous)))) {
+
+               vm_object_t             object;
+               vm_page_t               alias_page;
+               vm_object_offset_t      new_offset;
+               unsigned int            pg_num;
+               wpl_array_t             lite_list;
+
+               if (upl->flags & UPL_INTERNAL) {
+                       lite_list = (wpl_array_t) 
+                               ((((uintptr_t)upl) + sizeof(struct upl))
+                                + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
+               } else {
+                       lite_list = (wpl_array_t)(((uintptr_t)upl) + sizeof(struct upl));
+               }
+               object = upl->map_object;
+               upl->map_object = vm_object_allocate(upl->size);
+
+               vm_object_lock(upl->map_object);
+
+               upl->map_object->shadow = object;
+               upl->map_object->pageout = TRUE;
+               upl->map_object->can_persist = FALSE;
+               upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
+               upl->map_object->shadow_offset = upl->offset - object->paging_offset;
+               upl->map_object->wimg_bits = object->wimg_bits;
+               offset = upl->map_object->shadow_offset;
+               new_offset = 0;
+               size = upl->size;
+
+               upl->flags |= UPL_SHADOWED;
+
+               while (size) {
+                       pg_num = (unsigned int) (new_offset / PAGE_SIZE);
+                       assert(pg_num == new_offset / PAGE_SIZE);
+
+                       if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
+
+                               VM_PAGE_GRAB_FICTITIOUS(alias_page);
+
+                               vm_object_lock(object);
+
+                               m = vm_page_lookup(object, offset);
+                               if (m == VM_PAGE_NULL) {
+                                       panic("vm_upl_map: page missing\n");
+                               }
+
+                               /*
+                                * Convert the fictitious page to a private 
+                                * shadow of the real page.
+                                */
+                               assert(alias_page->fictitious);
+                               alias_page->fictitious = FALSE;
+                               alias_page->private = TRUE;
+                               alias_page->pageout = TRUE;
+                               /*
+                                * since m is a page in the upl it must
+                                * already be wired or BUSY, so it's
+                                * safe to assign the underlying physical
+                                * page to the alias
+                                */
+                               alias_page->phys_page = m->phys_page;
+
+                               vm_object_unlock(object);
+
+                               vm_page_lockspin_queues();
+                               vm_page_wire(alias_page);
+                               vm_page_unlock_queues();
+                               
+                               /*
+                                * ENCRYPTED SWAP:
+                                * The virtual page ("m") has to be wired in some way
+                                * here or its physical page ("m->phys_page") could
+                                * be recycled at any time.
+                                * Assuming this is enforced by the caller, we can't
+                                * get an encrypted page here.  Since the encryption
+                                * key depends on the VM page's "pager" object and
+                                * the "paging_offset", we couldn't handle 2 pageable
+                                * VM pages (with different pagers and paging_offsets)
+                                * sharing the same physical page:  we could end up
+                                * encrypting with one key (via one VM page) and
+                                * decrypting with another key (via the alias VM page).
+                                */
+                               ASSERT_PAGE_DECRYPTED(m);
+
+                               vm_page_insert(alias_page, upl->map_object, new_offset);
+
+                               assert(!alias_page->wanted);
+                               alias_page->busy = FALSE;
+                               alias_page->absent = FALSE;
+                       }
+                       size -= PAGE_SIZE;
+                       offset += PAGE_SIZE_64;
+                       new_offset += PAGE_SIZE_64;
+               }
+               vm_object_unlock(upl->map_object);
+       }
+       if (upl->flags & UPL_SHADOWED)
+               offset = 0;
+       else
+               offset = upl->offset - upl->map_object->paging_offset;
+       size = upl->size;
+       
+       vm_object_reference(upl->map_object);
+
+       if(!isVectorUPL) {
+               *dst_addr = 0;
+               /*
+               * NEED A UPL_MAP ALIAS
+               */
+               kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
+                                 VM_FLAGS_ANYWHERE, upl->map_object, offset, FALSE,
+                                 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
+
+               if (kr != KERN_SUCCESS) {
+                       upl_unlock(upl);
+                       return(kr);
+               }
+       }
+       else {
+               kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
+                                 VM_FLAGS_FIXED, upl->map_object, offset, FALSE,
+                                 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
+               if(kr)
+                       panic("vm_map_enter failed for a Vector UPL\n");
+       }
+       vm_object_lock(upl->map_object);
+
+       for (addr = *dst_addr; size > 0; size -= PAGE_SIZE, addr += PAGE_SIZE) {
+               m = vm_page_lookup(upl->map_object, offset);
+
+               if (m) {
+                       unsigned int    cache_attr;
+                       cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
+
+                       m->pmapped = TRUE;
+
+                       /* CODE SIGNING ENFORCEMENT: page has been wpmapped, 
+                        * but only in kernel space. If this was on a user map,
+                        * we'd have to set the wpmapped bit. */
+                       /* m->wpmapped = TRUE; */
+                       assert(map==kernel_map);
+       
+                       PMAP_ENTER(map->pmap, addr, m, VM_PROT_ALL, cache_attr, TRUE);
+               }
+               offset += PAGE_SIZE_64;
+       }
+       vm_object_unlock(upl->map_object);
+
+       /*
+        * hold a reference for the mapping
+        */
+       upl->ref_count++;
+       upl->flags |= UPL_PAGE_LIST_MAPPED;
+       upl->kaddr = (vm_offset_t) *dst_addr;
+       assert(upl->kaddr == *dst_addr);
+       
+       if(isVectorUPL)
+               goto process_upl_to_enter;
+
+       upl_unlock(upl);
+
+       return KERN_SUCCESS;
+}
+       
+/*
+ * Internal routine to remove a UPL mapping from a VM map.
+ *
+ * XXX - This should just be doable through a standard
+ * vm_map_remove() operation.  Otherwise, implicit clean-up
+ * of the target map won't be able to correctly remove
+ * these (and release the reference on the UPL).  Having
+ * to do this means we can't map these into user-space
+ * maps yet.
+ */
+kern_return_t
+vm_map_remove_upl(
+       vm_map_t        map, 
+       upl_t           upl)
+{
+       vm_address_t    addr;
+       upl_size_t      size;
+       int             isVectorUPL = 0, curr_upl = 0;
+       upl_t           vector_upl = NULL;
+
+       if (upl == UPL_NULL)
+               return KERN_INVALID_ARGUMENT;
+
+       if((isVectorUPL = vector_upl_is_valid(upl))) {
+               int     unmapped=0, valid_upls=0;
+               vector_upl = upl;
+               upl_lock(vector_upl);
+               for(curr_upl=0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
+                       upl =  vector_upl_subupl_byindex(vector_upl, curr_upl );
+                       if(upl == NULL)
+                               continue;
+                       valid_upls++;
+                       if (!(UPL_PAGE_LIST_MAPPED & upl->flags))
+                               unmapped++;
+               }
+
+               if(unmapped) {
+                       if(unmapped != valid_upls)
+                               panic("%d of the %d sub-upls within the Vector UPL is/are not mapped\n", unmapped, valid_upls);
+                       else {
+                               upl_unlock(vector_upl);
+                               return KERN_FAILURE;
+                       }
+               }
+               curr_upl=0;
+       }
+       else
+               upl_lock(upl);
+
+process_upl_to_remove:
+       if(isVectorUPL) {
+               if(curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
+                       vm_map_t v_upl_submap;
+                       vm_offset_t v_upl_submap_dst_addr;
+                       vector_upl_get_submap(vector_upl, &v_upl_submap, &v_upl_submap_dst_addr);
+
+                       vm_map_remove(map, v_upl_submap_dst_addr, v_upl_submap_dst_addr + vector_upl->size, VM_MAP_NO_FLAGS);
+                       vm_map_deallocate(v_upl_submap);
+                       upl_unlock(vector_upl);
+                       return KERN_SUCCESS;
+               }
+
+               upl =  vector_upl_subupl_byindex(vector_upl, curr_upl++ );
+               if(upl == NULL)
+                       goto process_upl_to_remove;     
+       }
+
+       if (upl->flags & UPL_PAGE_LIST_MAPPED) {
+               addr = upl->kaddr;
+               size = upl->size;
+
+               assert(upl->ref_count > 1);
+               upl->ref_count--;               /* removing mapping ref */
+
+               upl->flags &= ~UPL_PAGE_LIST_MAPPED;
+               upl->kaddr = (vm_offset_t) 0;
+               
+               if(!isVectorUPL) {
+                       upl_unlock(upl);
+               
+                       vm_map_remove(map,
+                               vm_map_trunc_page(addr),
+                               vm_map_round_page(addr + size),
+                               VM_MAP_NO_FLAGS);
+               
+                       return KERN_SUCCESS;
+               }
+               else {
+                       /*
+                       * If it's a Vectored UPL, we'll be removing the entire
+                       * submap anyways, so no need to remove individual UPL
+                       * element mappings from within the submap
+                       */      
+                       goto process_upl_to_remove;
+               }
+       }
+       upl_unlock(upl);
+
+       return KERN_FAILURE;
+}
+
+static void
+dw_do_work(
+       vm_object_t     object,
+       struct dw       *dwp,
+       int             dw_count)
+{
+       int             j;
+       boolean_t       held_as_spin = TRUE;
+
+       /*
+        * pageout_scan takes the vm_page_lock_queues first
+        * then tries for the object lock... to avoid what
+        * is effectively a lock inversion, we'll go to the
+        * trouble of taking them in that same order... otherwise
+        * if this object contains the majority of the pages resident
+        * in the UBC (or a small set of large objects actively being
+        * worked on contain the majority of the pages), we could
+        * cause the pageout_scan thread to 'starve' in its attempt
+        * to find pages to move to the free queue, since it has to
+        * successfully acquire the object lock of any candidate page
+        * before it can steal/clean it.
+        */
+       if (!vm_page_trylockspin_queues()) {
+               vm_object_unlock(object);
+
+               vm_page_lockspin_queues();
+
+               for (j = 0; ; j++) {
+                       if (!vm_object_lock_avoid(object) &&
+                           _vm_object_lock_try(object))
+                               break;
+                       vm_page_unlock_queues();
+                       mutex_pause(j);
+                       vm_page_lockspin_queues();
+               }
+       }
+       for (j = 0; j < dw_count; j++, dwp++) {
+
+               if (dwp->dw_mask & DW_vm_pageout_throttle_up)
+                       vm_pageout_throttle_up(dwp->dw_m);
+
+               if (dwp->dw_mask & DW_vm_page_wire)
+                       vm_page_wire(dwp->dw_m);
+               else if (dwp->dw_mask & DW_vm_page_unwire) {
+                       boolean_t       queueit;
+
+                       queueit = (dwp->dw_mask & DW_vm_page_free) ? FALSE : TRUE;
+
+                       vm_page_unwire(dwp->dw_m, queueit);
+               }
+               if (dwp->dw_mask & DW_vm_page_free) {
+                       if (held_as_spin == TRUE) {
+                               vm_page_lockconvert_queues();
+                               held_as_spin = FALSE;
+                       }
+                       vm_page_free(dwp->dw_m);
+               } else {
+                       if (dwp->dw_mask & DW_vm_page_deactivate_internal)
+                               vm_page_deactivate_internal(dwp->dw_m, FALSE);
+                       else if (dwp->dw_mask & DW_vm_page_activate)
+                               vm_page_activate(dwp->dw_m);
+                       else if (dwp->dw_mask & DW_vm_page_speculate)
+                               vm_page_speculate(dwp->dw_m, TRUE);
+                       else if (dwp->dw_mask & DW_vm_page_lru)
+                               vm_page_lru(dwp->dw_m);
+                       
+                       if (dwp->dw_mask & DW_set_reference)
+                               dwp->dw_m->reference = TRUE;
+                       else if (dwp->dw_mask & DW_clear_reference)
+                               dwp->dw_m->reference = FALSE;
+
+                       if (dwp->dw_mask & DW_clear_busy)
+                               dwp->dw_m->busy = FALSE;
+
+                       if (dwp->dw_mask & DW_PAGE_WAKEUP)
+                               PAGE_WAKEUP(dwp->dw_m);
+               }
+       }
+       vm_page_unlock_queues();
+}
+
+
+
+kern_return_t
+upl_commit_range(
+       upl_t                   upl, 
+       upl_offset_t            offset, 
+       upl_size_t              size,
+       int                     flags,
+       upl_page_info_t         *page_list,
+       mach_msg_type_number_t  count,
+       boolean_t               *empty) 
+{
+       upl_size_t              xfer_size, subupl_size = size;
+       vm_object_t             shadow_object;
+       vm_object_t             object;
+       vm_object_offset_t      target_offset;
+       upl_offset_t            subupl_offset = offset;
+       int                     entry;
+       wpl_array_t             lite_list;
+       int                     occupied;
+       int                     clear_refmod = 0;
+       int                     pgpgout_count = 0;
+       struct  dw              dw_array[DELAYED_WORK_LIMIT];
+       struct  dw              *dwp;
+       int                     dw_count, isVectorUPL = 0;
+       upl_t                   vector_upl = NULL;
+
+       *empty = FALSE;
+
+       if (upl == UPL_NULL)
+               return KERN_INVALID_ARGUMENT;
+
+       if (count == 0)
+               page_list = NULL;
+
+       if((isVectorUPL = vector_upl_is_valid(upl))) {
+               vector_upl = upl;
+               upl_lock(vector_upl);
+       }
+       else
+               upl_lock(upl);
+
+process_upl_to_commit:
+
+       if(isVectorUPL) {
+               size = subupl_size;
+               offset = subupl_offset;
+               if(size == 0) {
+                       upl_unlock(vector_upl);
+                       return KERN_SUCCESS;
+               }
+               upl =  vector_upl_subupl_byoffset(vector_upl, &offset, &size);
+               if(upl == NULL) {
+                       upl_unlock(vector_upl);
+                       return KERN_FAILURE;
+               }
+               page_list = UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl);
+               subupl_size -= size;
+               subupl_offset += size;
+       }
+
+#if UPL_DEBUG
+       if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
+               (void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
+               
+               upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
+               upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
+
+               upl->upl_commit_index++;
+       }
+#endif
+       if (upl->flags & UPL_DEVICE_MEMORY)
+               xfer_size = 0;
+       else if ((offset + size) <= upl->size)
+               xfer_size = size;
+       else {
+               if(!isVectorUPL)
+                       upl_unlock(upl);
+               else {
+                       upl_unlock(vector_upl);
+               }
+               return KERN_FAILURE;
+       }
+       if (upl->flags & UPL_CLEAR_DIRTY)
+               flags |= UPL_COMMIT_CLEAR_DIRTY;
+
+       if (upl->flags & UPL_INTERNAL)
+               lite_list = (wpl_array_t) ((((uintptr_t)upl) + sizeof(struct upl))
+                                          + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
+       else
+               lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
+
+       object = upl->map_object;
+
+       if (upl->flags & UPL_SHADOWED) {
+               vm_object_lock(object);
+               shadow_object = object->shadow;
+       } else {
+               shadow_object = object;
+       }
+       entry = offset/PAGE_SIZE;
+       target_offset = (vm_object_offset_t)offset;
+
+       if (upl->flags & UPL_KERNEL_OBJECT)
+               vm_object_lock_shared(shadow_object);
+       else
+               vm_object_lock(shadow_object);
+
+       if (upl->flags & UPL_ACCESS_BLOCKED) {
+               assert(shadow_object->blocked_access);
+               shadow_object->blocked_access = FALSE;
+               vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
+       }
+
+       if (shadow_object->code_signed) {
+               /*
+                * CODE SIGNING:
+                * If the object is code-signed, do not let this UPL tell
+                * us if the pages are valid or not.  Let the pages be
+                * validated by VM the normal way (when they get mapped or
+                * copied).
+                */
+               flags &= ~UPL_COMMIT_CS_VALIDATED;
+       }
+       if (! page_list) {
+               /*
+                * No page list to get the code-signing info from !?
+                */
+               flags &= ~UPL_COMMIT_CS_VALIDATED;
+       }
+
+       dwp = &dw_array[0];
+       dw_count = 0;
+
+       while (xfer_size) {
+               vm_page_t       t, m;
+
+               dwp->dw_mask = 0;
+               clear_refmod = 0;
+
+               m = VM_PAGE_NULL;
+
+               if (upl->flags & UPL_LITE) {
+                       unsigned int    pg_num;
+
+                       pg_num = (unsigned int) (target_offset/PAGE_SIZE);
+                       assert(pg_num == target_offset/PAGE_SIZE);
+
+                       if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
+                               lite_list[pg_num>>5] &= ~(1 << (pg_num & 31));
+
+                               if (!(upl->flags & UPL_KERNEL_OBJECT))
+                                       m = vm_page_lookup(shadow_object, target_offset + (upl->offset - shadow_object->paging_offset));
+                       }
+               }
+               if (upl->flags & UPL_SHADOWED) {
+                       if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
+
+                               t->pageout = FALSE;
+
+                               VM_PAGE_FREE(t);
+
+                               if (m == VM_PAGE_NULL)
+                                       m = vm_page_lookup(shadow_object, target_offset + object->shadow_offset);
+                       }
+               }
+               if ((upl->flags & UPL_KERNEL_OBJECT) || m == VM_PAGE_NULL)
+                       goto commit_next_page;
+
+               if (flags & UPL_COMMIT_CS_VALIDATED) {
+                       /*
+                        * CODE SIGNING:
+                        * Set the code signing bits according to
+                        * what the UPL says they should be.
+                        */
+                       m->cs_validated = page_list[entry].cs_validated;
+                       m->cs_tainted = page_list[entry].cs_tainted;
+               }
+               if (upl->flags & UPL_IO_WIRE) {
+
+                       if (page_list)
+                               page_list[entry].phys_addr = 0;
+
+                       if (flags & UPL_COMMIT_SET_DIRTY)
+                               m->dirty = TRUE;
+                       else if (flags & UPL_COMMIT_CLEAR_DIRTY) {
+                               m->dirty = FALSE;
+
+                               if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
+                                   m->cs_validated && !m->cs_tainted) {
+                                       /*
+                                        * CODE SIGNING:
+                                        * This page is no longer dirty
+                                        * but could have been modified,
+                                        * so it will need to be
+                                        * re-validated.
+                                        */
+                                       m->cs_validated = FALSE;
+#if DEVELOPMENT || DEBUG
+                                       vm_cs_validated_resets++;
+#endif
+                                       pmap_disconnect(m->phys_page);
+                               }
+                               clear_refmod |= VM_MEM_MODIFIED;
+                       }
+                       if (flags & UPL_COMMIT_INACTIVATE) {
+                               dwp->dw_mask |= DW_vm_page_deactivate_internal;
+                               clear_refmod |= VM_MEM_REFERENCED;
+                       }
+                       if (upl->flags & UPL_ACCESS_BLOCKED) {
+                               /*
+                                * We blocked access to the pages in this UPL.
+                                * Clear the "busy" bit and wake up any waiter
+                                * for this page.
+                                */
+                               dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
+                       }
+                       if (m->absent) {
+                               if (flags & UPL_COMMIT_FREE_ABSENT)
+                                       dwp->dw_mask |= DW_vm_page_free;
+                               else {
+                                       m->absent = FALSE;
+                                       dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
+                               }
+                       } else
+                               dwp->dw_mask |= DW_vm_page_unwire;
+
+                       goto commit_next_page;
+               }
+               /*
+                * make sure to clear the hardware
+                * modify or reference bits before
+                * releasing the BUSY bit on this page
+                * otherwise we risk losing a legitimate
+                * change of state
+                */
+               if (flags & UPL_COMMIT_CLEAR_DIRTY) {
+                       m->dirty = FALSE;
+
+                       if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
+                           m->cs_validated && !m->cs_tainted) {
+                               /*
+                                * CODE SIGNING:
+                                * This page is no longer dirty
+                                * but could have been modified,
+                                * so it will need to be
+                                * re-validated.
+                                */
+                               m->cs_validated = FALSE;
+#if DEVELOPMENT || DEBUG
+                               vm_cs_validated_resets++;
+#endif
+                               pmap_disconnect(m->phys_page);
+                       }
+                       clear_refmod |= VM_MEM_MODIFIED;
+               }
+               if (page_list) {
+                       upl_page_info_t *p;
+
+                       p = &(page_list[entry]);
+
+                       if (p->phys_addr && p->pageout && !m->pageout) {
+                               m->busy = TRUE;
+                               m->pageout = TRUE;
+
+                               dwp->dw_mask |= DW_vm_page_wire;
+
+                       } else if (p->phys_addr &&
+                                  !p->pageout && m->pageout &&
+                                  !m->dump_cleaning) {
+                               m->pageout = FALSE;
+                               m->absent = FALSE;
+                               m->overwriting = FALSE;
+
+                               dwp->dw_mask |= (DW_vm_page_unwire | DW_clear_busy | DW_PAGE_WAKEUP);
+                       }
+                       page_list[entry].phys_addr = 0;
+               }
+               m->dump_cleaning = FALSE;
+
+               if (m->laundry)
+                       dwp->dw_mask |= DW_vm_pageout_throttle_up;
+
+               if (m->pageout) {
+                       m->cleaning = FALSE;
+                       m->encrypted_cleaning = FALSE;
+                       m->pageout = FALSE;
+#if MACH_CLUSTER_STATS
+                       if (m->wanted) vm_pageout_target_collisions++;
+#endif
+                       m->dirty = FALSE;
+
+                       if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
+                           m->cs_validated && !m->cs_tainted) {
+                               /*
+                                * CODE SIGNING:
+                                * This page is no longer dirty
+                                * but could have been modified,
+                                * so it will need to be
+                                * re-validated.
+                                */
+                               m->cs_validated = FALSE;
+#if DEVELOPMENT || DEBUG
+                               vm_cs_validated_resets++;
+#endif
+                               pmap_disconnect(m->phys_page);
+                       }
+
+                       if ((flags & UPL_COMMIT_SET_DIRTY) ||
+                           (m->pmapped && (pmap_disconnect(m->phys_page) & VM_MEM_MODIFIED)))
+                               m->dirty = TRUE;
+
+                       if (m->dirty) {
+                               /*
+                                * page was re-dirtied after we started
+                                * the pageout... reactivate it since 
+                                * we don't know whether the on-disk
+                                * copy matches what is now in memory
+                                */
+                               dwp->dw_mask |= (DW_vm_page_unwire | DW_clear_busy | DW_PAGE_WAKEUP);
+
+                               if (upl->flags & UPL_PAGEOUT) {
+                                       CLUSTER_STAT(vm_pageout_target_page_dirtied++;)
+                                       VM_STAT_INCR(reactivations);
+                                       DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
+                               }
+                       } else {
+                               /*
+                                * page has been successfully cleaned
+                                * go ahead and free it for other use
+                                */
+
+                               if (m->object->internal) {
+                                       DTRACE_VM2(anonpgout, int, 1, (uint64_t *), NULL);
+                               } else {
+                                       DTRACE_VM2(fspgout, int, 1, (uint64_t *), NULL);
+                               }
+                               dwp->dw_mask |= DW_vm_page_free;
+                               if (upl->flags & UPL_PAGEOUT) {
+                                       CLUSTER_STAT(vm_pageout_target_page_freed++;)
+
+                                       if (page_list[entry].dirty) {
+                                               VM_STAT_INCR(pageouts);
+                                               DTRACE_VM2(pgout, int, 1, (uint64_t *), NULL);
+                                               pgpgout_count++;
+                                       }
+                               }
+                       }
+                       goto commit_next_page;
+               }
+#if MACH_CLUSTER_STATS
+               if (m->wpmapped)
+                       m->dirty = pmap_is_modified(m->phys_page);
+
+               if (m->dirty)   vm_pageout_cluster_dirtied++;
+               else            vm_pageout_cluster_cleaned++;
+               if (m->wanted)  vm_pageout_cluster_collisions++;
+#endif
+               m->dirty = FALSE;
+
+               if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
+                   m->cs_validated && !m->cs_tainted) {
+                       /*
+                        * CODE SIGNING:
+                        * This page is no longer dirty
+                        * but could have been modified,
+                        * so it will need to be
+                        * re-validated.
+                        */
+                       m->cs_validated = FALSE;
+#if DEVELOPMENT || DEBUG
+                       vm_cs_validated_resets++;
+#endif
+                       pmap_disconnect(m->phys_page);
+               }
+
+               if ((m->busy) && (m->cleaning)) {
+                       /*
+                        * the request_page_list case
+                        */
+                       m->absent = FALSE;
+                       m->overwriting = FALSE;
+
+                       dwp->dw_mask |= DW_clear_busy;
+
+               } else if (m->overwriting) {
+                       /*
+                        * alternate request page list, write to 
+                        * page_list case.  Occurs when the original
+                        * page was wired at the time of the list
+                        * request
+                        */
+                       assert(VM_PAGE_WIRED(m));
+                       m->overwriting = FALSE;
+
+                       dwp->dw_mask |= DW_vm_page_unwire; /* reactivates */
+               }
+               m->cleaning = FALSE;
+               m->encrypted_cleaning = FALSE;
+
+               /*
+                * It is a part of the semantic of COPYOUT_FROM
+                * UPLs that a commit implies cache sync
+                * between the vm page and the backing store
+                * this can be used to strip the precious bit
+                * as well as clean
+                */
+               if ((upl->flags & UPL_PAGE_SYNC_DONE) || (flags & UPL_COMMIT_CLEAR_PRECIOUS))
+                       m->precious = FALSE;
+
+               if (flags & UPL_COMMIT_SET_DIRTY)
+                       m->dirty = TRUE;
+
+               if ((flags & UPL_COMMIT_INACTIVATE) && !m->clustered && !m->speculative) {
+                       dwp->dw_mask |= DW_vm_page_deactivate_internal;
+                       clear_refmod |= VM_MEM_REFERENCED;
+
+               } else if (!m->active && !m->inactive && !m->speculative) {
+
+                       if (m->clustered || (flags & UPL_COMMIT_SPECULATE))
+                               dwp->dw_mask |= DW_vm_page_speculate;
+                       else if (m->reference)
+                               dwp->dw_mask |= DW_vm_page_activate;
+                       else {
+                               dwp->dw_mask |= DW_vm_page_deactivate_internal;
+                               clear_refmod |= VM_MEM_REFERENCED;
+                       }
+               }
+               if (upl->flags & UPL_ACCESS_BLOCKED) {
+                       /*
+                        * We blocked access to the pages in this URL.
+                        * Clear the "busy" bit on this page before we
+                        * wake up any waiter.
+                        */
+                       dwp->dw_mask |= DW_clear_busy;
+               }
+               /*
+                * Wakeup any thread waiting for the page to be un-cleaning.
+                */
+               dwp->dw_mask |= DW_PAGE_WAKEUP;
+
+commit_next_page:
+               if (clear_refmod)
+                       pmap_clear_refmod(m->phys_page, clear_refmod);
+
+               target_offset += PAGE_SIZE_64;
+               xfer_size -= PAGE_SIZE;
+               entry++;
+
+               if (dwp->dw_mask) {
+                       if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
+                               if (m->busy == FALSE) {
+                                       /*
+                                        * dw_do_work may need to drop the object lock
+                                        * if it does, we need the pages it's looking at to
+                                        * be held stable via the busy bit.
+                                        */
+                                       m->busy = TRUE;
+                                       dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
+                               }
+                               dwp->dw_m = m;
+                               dwp++;
+                               dw_count++;
+
+                               if (dw_count >= DELAYED_WORK_LIMIT) {
+                                       dw_do_work(shadow_object, &dw_array[0], dw_count);
+                       
+                                       dwp = &dw_array[0];
+                                       dw_count = 0;
+                               }
+                       } else {
+                               if (dwp->dw_mask & DW_clear_busy)
+                                       m->busy = FALSE;
+
+                               if (dwp->dw_mask & DW_PAGE_WAKEUP)
+                                       PAGE_WAKEUP(m);
+                       }
+               }
+       }
+       if (dw_count)
+               dw_do_work(shadow_object, &dw_array[0], dw_count);
+
+       occupied = 1;
+
+       if (upl->flags & UPL_DEVICE_MEMORY)  {
+               occupied = 0;
+       } else if (upl->flags & UPL_LITE) {
+               int     pg_num;
+               int     i;
+
+               pg_num = upl->size/PAGE_SIZE;
+               pg_num = (pg_num + 31) >> 5;
+               occupied = 0;
+
+               for (i = 0; i < pg_num; i++) {
+                       if (lite_list[i] != 0) {
+                               occupied = 1;
+                               break;
+                       }
+               }
+       } else {
+               if (queue_empty(&upl->map_object->memq))
+                       occupied = 0;
+       }
+       if (occupied == 0) {
+               /*
+                * If this UPL element belongs to a Vector UPL and is
+                * empty, then this is the right function to deallocate
+                * it. So go ahead set the *empty variable. The flag
+                * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
+                * should be considered relevant for the Vector UPL and not
+                * the internal UPLs.
+                */
+               if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL)
+                       *empty = TRUE;
+
+               if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
+                       /*
+                        * this is not a paging object
+                        * so we need to drop the paging reference
+                        * that was taken when we created the UPL
+                        * against this object
+                        */
+                       vm_object_activity_end(shadow_object);
+               } else {
+                        /*
+                         * we dontated the paging reference to
+                         * the map object... vm_pageout_object_terminate
+                         * will drop this reference
+                         */
+               }
+       }
+       vm_object_unlock(shadow_object);
+       if (object != shadow_object)
+               vm_object_unlock(object);
+       
+       if(!isVectorUPL)
+               upl_unlock(upl);
+       else {
+               /* 
+                * If we completed our operations on an UPL that is
+                * part of a Vectored UPL and if empty is TRUE, then
+                * we should go ahead and deallocate this UPL element. 
+                * Then we check if this was the last of the UPL elements
+                * within that Vectored UPL. If so, set empty to TRUE
+                * so that in ubc_upl_commit_range or ubc_upl_commit, we
+                * can go ahead and deallocate the Vector UPL too.
+                */
+               if(*empty==TRUE) {
+                       *empty = vector_upl_set_subupl(vector_upl, upl, 0);
+                       upl_deallocate(upl);
+               }
+               goto process_upl_to_commit;
+       }
+
+       if (pgpgout_count) {
+               DTRACE_VM2(pgpgout, int, pgpgout_count, (uint64_t *), NULL);
+       }
+
+       return KERN_SUCCESS;
+}
+
+kern_return_t
+upl_abort_range(
+       upl_t                   upl, 
+       upl_offset_t            offset, 
+       upl_size_t              size,
+       int                     error,
+       boolean_t               *empty) 
+{
+       upl_size_t              xfer_size, subupl_size = size;
+       vm_object_t             shadow_object;
+       vm_object_t             object;
+       vm_object_offset_t      target_offset;
+       upl_offset_t            subupl_offset = offset;
+       int                     entry;
+       wpl_array_t             lite_list;
+       int                     occupied;
+       struct  dw              dw_array[DELAYED_WORK_LIMIT];
+       struct  dw              *dwp;
+       int                     dw_count, isVectorUPL = 0;
+       upl_t                   vector_upl = NULL;
+
+       *empty = FALSE;
+
+       if (upl == UPL_NULL)
+               return KERN_INVALID_ARGUMENT;
+
+       if ( (upl->flags & UPL_IO_WIRE) && !(error & UPL_ABORT_DUMP_PAGES) )
+               return upl_commit_range(upl, offset, size, UPL_COMMIT_FREE_ABSENT, NULL, 0, empty);
+
+       if((isVectorUPL = vector_upl_is_valid(upl))) {
+               vector_upl = upl;
+               upl_lock(vector_upl);
+       }
+       else
+               upl_lock(upl);
+
+process_upl_to_abort:
+       if(isVectorUPL) {
+               size = subupl_size;
+               offset = subupl_offset;
+               if(size == 0) {
+                       upl_unlock(vector_upl);
+                       return KERN_SUCCESS;
+               }
+               upl =  vector_upl_subupl_byoffset(vector_upl, &offset, &size);
+               if(upl == NULL) {
+                       upl_unlock(vector_upl);
+                       return KERN_FAILURE;
+               }
+               subupl_size -= size;
+               subupl_offset += size;
+       }
+
+       *empty = FALSE;
+
+#if UPL_DEBUG
+       if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
+               (void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
+               
+               upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
+               upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
+               upl->upl_commit_records[upl->upl_commit_index].c_aborted = 1;
+
+               upl->upl_commit_index++;
+       }
+#endif
+       if (upl->flags & UPL_DEVICE_MEMORY)
+               xfer_size = 0;
+       else if ((offset + size) <= upl->size)
+               xfer_size = size;
+       else {
+               if(!isVectorUPL)
+                       upl_unlock(upl);
+               else {
+                       upl_unlock(vector_upl);
+               }
+
+               return KERN_FAILURE;
+       }
+       if (upl->flags & UPL_INTERNAL) {
+               lite_list = (wpl_array_t) 
+                       ((((uintptr_t)upl) + sizeof(struct upl))
+                       + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
+       } else {
+               lite_list = (wpl_array_t) 
+                       (((uintptr_t)upl) + sizeof(struct upl));
+       }
+       object = upl->map_object;
+
+       if (upl->flags & UPL_SHADOWED) {
+               vm_object_lock(object);
+               shadow_object = object->shadow;
+       } else
+               shadow_object = object;
+
+       entry = offset/PAGE_SIZE;
+       target_offset = (vm_object_offset_t)offset;
+
+       if (upl->flags & UPL_KERNEL_OBJECT)
+               vm_object_lock_shared(shadow_object);
+       else
+               vm_object_lock(shadow_object);
+
+       if (upl->flags & UPL_ACCESS_BLOCKED) {
+               assert(shadow_object->blocked_access);
+               shadow_object->blocked_access = FALSE;
+               vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
+       }
+
+       dwp = &dw_array[0];
+       dw_count = 0;
+
+       if ((error & UPL_ABORT_DUMP_PAGES) && (upl->flags & UPL_KERNEL_OBJECT))
+               panic("upl_abort_range: kernel_object being DUMPED");
+
+       while (xfer_size) {
+               vm_page_t       t, m;
+
+               dwp->dw_mask = 0;
+
+               m = VM_PAGE_NULL;
+
+               if (upl->flags & UPL_LITE) {
+                       unsigned int    pg_num;
+
+                       pg_num = (unsigned int) (target_offset/PAGE_SIZE);
+                       assert(pg_num == target_offset/PAGE_SIZE);
+                       
+
+                       if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
+                               lite_list[pg_num>>5] &= ~(1 << (pg_num & 31));
+
+                               if ( !(upl->flags & UPL_KERNEL_OBJECT))
+                                       m = vm_page_lookup(shadow_object, target_offset +
+                                                          (upl->offset - shadow_object->paging_offset));
+                       }
+               }
+               if (upl->flags & UPL_SHADOWED) {
+                       if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
+                               t->pageout = FALSE;
+
+                               VM_PAGE_FREE(t);
+
+                               if (m == VM_PAGE_NULL)
+                                       m = vm_page_lookup(shadow_object, target_offset + object->shadow_offset);
+                       }
+               }
+               if ((upl->flags & UPL_KERNEL_OBJECT))
+                       goto abort_next_page;
+
+               if (m != VM_PAGE_NULL) {
+
+                       if (m->absent) {
+                               boolean_t must_free = TRUE;
+
+                               m->clustered = FALSE;
+                               /*
+                                * COPYOUT = FALSE case
+                                * check for error conditions which must
+                                * be passed back to the pages customer
+                                */
+                               if (error & UPL_ABORT_RESTART) {
+                                       m->restart = TRUE;
+                                       m->absent = FALSE;
+                                       m->unusual = TRUE;
+                                       must_free = FALSE;
+                               } else if (error & UPL_ABORT_UNAVAILABLE) {
+                                       m->restart = FALSE;
+                                       m->unusual = TRUE;
+                                       must_free = FALSE;
+                               } else if (error & UPL_ABORT_ERROR) {
+                                       m->restart = FALSE;
+                                       m->absent = FALSE;
+                                       m->error = TRUE;
+                                       m->unusual = TRUE;
+                                       must_free = FALSE;
+                               }
+
+                               /*
+                                * ENCRYPTED SWAP:
+                                * If the page was already encrypted,
+                                * we don't really need to decrypt it
+                                * now.  It will get decrypted later,
+                                * on demand, as soon as someone needs
+                                * to access its contents.
+                                */
+
+                               m->cleaning = FALSE;
+                               m->encrypted_cleaning = FALSE;
+                               m->overwriting = FALSE;
+
+                               dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
+
+                               if (must_free == TRUE)
+                                       dwp->dw_mask |= DW_vm_page_free;
+                               else
+                                       dwp->dw_mask |= DW_vm_page_activate;
+                       } else {
+                               /*                          
+                                * Handle the trusted pager throttle.
+                                */                     
+                               if (m->laundry)
+                                       dwp->dw_mask |= DW_vm_pageout_throttle_up;
+
+                               if (m->pageout) {
+                                       assert(m->busy);
+                                       assert(m->wire_count == 1);
+                                       m->pageout = FALSE;
+
+                                       dwp->dw_mask |= DW_vm_page_unwire;
+                               }
+                               m->dump_cleaning = FALSE;
+                               m->cleaning = FALSE;
+                               m->encrypted_cleaning = FALSE;
+                               m->overwriting = FALSE;
+#if    MACH_PAGEMAP
+                               vm_external_state_clr(m->object->existence_map, m->offset);
+#endif /* MACH_PAGEMAP */
+                               if (error & UPL_ABORT_DUMP_PAGES) {
+                                       pmap_disconnect(m->phys_page);
+
+                                       dwp->dw_mask |= DW_vm_page_free;
+                               } else {
+                                       if (error & UPL_ABORT_REFERENCE) {
+                                               /*
+                                                * we've been told to explictly
+                                                * reference this page... for 
+                                                * file I/O, this is done by
+                                                * implementing an LRU on the inactive q
+                                                */
+                                               dwp->dw_mask |= DW_vm_page_lru;
+                                       }
+                                       dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
+                               }
+                       }
+               }
+abort_next_page:
+               target_offset += PAGE_SIZE_64;
+               xfer_size -= PAGE_SIZE;
+               entry++;
+
+               if (dwp->dw_mask) {
+                       if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
+                               if (m->busy == FALSE) {
+                                       /*
+                                        * dw_do_work may need to drop the object lock
+                                        * if it does, we need the pages it's looking at to
+                                        * be held stable via the busy bit.
+                                        */
+                                       m->busy = TRUE;
+                                       dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
+                               }
+                               dwp->dw_m = m;
+                               dwp++;
+                               dw_count++;
+
+                               if (dw_count >= DELAYED_WORK_LIMIT) {
+                                       dw_do_work(shadow_object, &dw_array[0], dw_count);
+                               
+                                       dwp = &dw_array[0];
+                                       dw_count = 0;
+                               }
+                       } else {
+                               if (dwp->dw_mask & DW_clear_busy)
+                                       m->busy = FALSE;
+
+                               if (dwp->dw_mask & DW_PAGE_WAKEUP)
+                                       PAGE_WAKEUP(m);
+                       }
+               }
+       }
+       if (dw_count)
+               dw_do_work(shadow_object, &dw_array[0], dw_count);
+
+       occupied = 1;
+
+       if (upl->flags & UPL_DEVICE_MEMORY)  {
+               occupied = 0;
+       } else if (upl->flags & UPL_LITE) {
+               int     pg_num;
+               int     i;
+
+               pg_num = upl->size/PAGE_SIZE;
+               pg_num = (pg_num + 31) >> 5;
+               occupied = 0;
+
+               for (i = 0; i < pg_num; i++) {
+                       if (lite_list[i] != 0) {
+                               occupied = 1;
+                               break;
+                       }
+               }
+       } else {
+               if (queue_empty(&upl->map_object->memq))
+                       occupied = 0;
+       }
+       if (occupied == 0) {
+               /*
+                * If this UPL element belongs to a Vector UPL and is
+                * empty, then this is the right function to deallocate
+                * it. So go ahead set the *empty variable. The flag
+                * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
+                * should be considered relevant for the Vector UPL and
+                * not the internal UPLs.
+                */
+               if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL)
+                       *empty = TRUE;
+
+               if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
+                       /*
+                        * this is not a paging object
+                        * so we need to drop the paging reference
+                        * that was taken when we created the UPL
+                        * against this object
+                        */
+                       vm_object_activity_end(shadow_object);
+               } else {
+                        /*
+                         * we dontated the paging reference to
+                         * the map object... vm_pageout_object_terminate
+                         * will drop this reference
+                         */
+               }
+       }
+       vm_object_unlock(shadow_object);
+       if (object != shadow_object)
+               vm_object_unlock(object);
+       
+       if(!isVectorUPL)
+               upl_unlock(upl);
+       else {
+               /* 
+               * If we completed our operations on an UPL that is
+               * part of a Vectored UPL and if empty is TRUE, then
+               * we should go ahead and deallocate this UPL element. 
+               * Then we check if this was the last of the UPL elements
+               * within that Vectored UPL. If so, set empty to TRUE
+               * so that in ubc_upl_abort_range or ubc_upl_abort, we
+               * can go ahead and deallocate the Vector UPL too.
+               */
+               if(*empty == TRUE) {
+                       *empty = vector_upl_set_subupl(vector_upl, upl,0);
+                       upl_deallocate(upl);
+               }
+               goto process_upl_to_abort;
+       }
+
+       return KERN_SUCCESS;
+}
+
+
+kern_return_t
+upl_abort(
+       upl_t   upl,
+       int     error)
+{
+       boolean_t       empty;
+
+       return upl_abort_range(upl, 0, upl->size, error, &empty);
+}
+
+
+/* an option on commit should be wire */
+kern_return_t
+upl_commit(
+       upl_t                   upl,
+       upl_page_info_t         *page_list,
+       mach_msg_type_number_t  count)
+{
+       boolean_t       empty;
+
+       return upl_commit_range(upl, 0, upl->size, 0, page_list, count, &empty);
+}
+
+
+unsigned int vm_object_iopl_request_sleep_for_cleaning = 0;
+
+kern_return_t
+vm_object_iopl_request(
+       vm_object_t             object,
+       vm_object_offset_t      offset,
+       upl_size_t              size,
+       upl_t                   *upl_ptr,
+       upl_page_info_array_t   user_page_list,
+       unsigned int            *page_list_count,
+       int                     cntrl_flags)
+{
+       vm_page_t               dst_page;
+       vm_object_offset_t      dst_offset;
+       upl_size_t              xfer_size;
+       upl_t                   upl = NULL;
+       unsigned int            entry;
+       wpl_array_t             lite_list = NULL;
+       int                     no_zero_fill = FALSE;
+       u_int32_t               psize;
+       kern_return_t           ret;
+       vm_prot_t               prot;
+       struct vm_object_fault_info fault_info;
+       struct  dw              dw_array[DELAYED_WORK_LIMIT];
+       struct  dw              *dwp;
+       int                     dw_count;
+       int                     dw_index;
+
+       if (cntrl_flags & ~UPL_VALID_FLAGS) {
+               /*
+                * For forward compatibility's sake,
+                * reject any unknown flag.
+                */
+               return KERN_INVALID_VALUE;
+       }
+       if (vm_lopage_needed == FALSE)
+               cntrl_flags &= ~UPL_NEED_32BIT_ADDR;
+
+       if (cntrl_flags & UPL_NEED_32BIT_ADDR) {
+               if ( (cntrl_flags & (UPL_SET_IO_WIRE | UPL_SET_LITE)) != (UPL_SET_IO_WIRE | UPL_SET_LITE))
+                       return KERN_INVALID_VALUE;
+
+               if (object->phys_contiguous) {
+                       if ((offset + object->shadow_offset) >= (vm_object_offset_t)max_valid_dma_address)
+                               return KERN_INVALID_ADDRESS;
+             
+                       if (((offset + object->shadow_offset) + size) >= (vm_object_offset_t)max_valid_dma_address)
+                               return KERN_INVALID_ADDRESS;
+               }
+       }
+
+       if (cntrl_flags & UPL_ENCRYPT) {
+               /*
+                * ENCRYPTED SWAP:
+                * The paging path doesn't use this interface,
+                * so we don't support the UPL_ENCRYPT flag
+                * here.  We won't encrypt the pages.
+                */
+               assert(! (cntrl_flags & UPL_ENCRYPT));
+       }
+       if (cntrl_flags & UPL_NOZEROFILL)
+               no_zero_fill = TRUE;
+
+       if (cntrl_flags & UPL_COPYOUT_FROM)
+               prot = VM_PROT_READ;
+       else
+               prot = VM_PROT_READ | VM_PROT_WRITE;
+
+       if (((size/PAGE_SIZE) > MAX_UPL_SIZE) && !object->phys_contiguous)
+               size = MAX_UPL_SIZE * PAGE_SIZE;
+
+       if (cntrl_flags & UPL_SET_INTERNAL) {
+               if (page_list_count != NULL)
+                       *page_list_count = MAX_UPL_SIZE;
+       }
+       if (((cntrl_flags & UPL_SET_INTERNAL) && !(object->phys_contiguous)) &&
+           ((page_list_count != NULL) && (*page_list_count != 0) && *page_list_count < (size/page_size)))
+               return KERN_INVALID_ARGUMENT;
+
+       if ((!object->internal) && (object->paging_offset != 0))
+               panic("vm_object_iopl_request: external object with non-zero paging offset\n");
+
+
+       if (object->phys_contiguous)
+               psize = PAGE_SIZE;
+       else
+               psize = size;
+
+       if (cntrl_flags & UPL_SET_INTERNAL) {
+               upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE, UPL_IO_WIRE, psize);
+
+               user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
+               lite_list = (wpl_array_t) (((uintptr_t)user_page_list) +
+                                          ((psize / PAGE_SIZE) * sizeof(upl_page_info_t)));
+               if (size == 0) {
+                       user_page_list = NULL;
+                       lite_list = NULL;
+               }
+       } else {
+               upl = upl_create(UPL_CREATE_LITE, UPL_IO_WIRE, psize);
+
+               lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
+               if (size == 0) {
+                       lite_list = NULL;
+               }
+       }
+       if (user_page_list)
+               user_page_list[0].device = FALSE;
+       *upl_ptr = upl;
+
+       upl->map_object = object;
+       upl->size = size;
+
+       if (object == kernel_object &&
+           !(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS))) {
+               upl->flags |= UPL_KERNEL_OBJECT;
+#if UPL_DEBUG
+               vm_object_lock(object);
+#else
+               vm_object_lock_shared(object);
+#endif
+       } else {
+               vm_object_lock(object);
+               vm_object_activity_begin(object);
+       }
+       /*
+        * paging in progress also protects the paging_offset
+        */
+       upl->offset = offset + object->paging_offset;
+
+       if (cntrl_flags & UPL_BLOCK_ACCESS) {
+               /*
+                * The user requested that access to the pages in this URL
+                * be blocked until the UPL is commited or aborted.
+                */
+               upl->flags |= UPL_ACCESS_BLOCKED;
+       }
+
+       if (object->phys_contiguous) {
+#if UPL_DEBUG
+               queue_enter(&object->uplq, upl, upl_t, uplq);
+#endif /* UPL_DEBUG */
+
+               if (upl->flags & UPL_ACCESS_BLOCKED) {
+                       assert(!object->blocked_access);
+                       object->blocked_access = TRUE;
+               }
+
+               vm_object_unlock(object);
+
+               /*
+                * don't need any shadow mappings for this one
+                * since it is already I/O memory
+                */
+               upl->flags |= UPL_DEVICE_MEMORY;
+
+               upl->highest_page = (ppnum_t) ((offset + object->shadow_offset + size - 1)>>PAGE_SHIFT);
+
+               if (user_page_list) {
+                       user_page_list[0].phys_addr = (ppnum_t) ((offset + object->shadow_offset)>>PAGE_SHIFT);
+                       user_page_list[0].device = TRUE;
+               }
+               if (page_list_count != NULL) {
+                       if (upl->flags & UPL_INTERNAL)
+                               *page_list_count = 0;
+                       else
+                               *page_list_count = 1;
+               }
+               return KERN_SUCCESS;
+       }
+       if (object != kernel_object) {
+               /*
+                * Protect user space from future COW operations
+                */
+               object->true_share = TRUE;
+
+               if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
+                       object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
+       }
+
+#if UPL_DEBUG
+       queue_enter(&object->uplq, upl, upl_t, uplq);
+#endif /* UPL_DEBUG */
+
+       if (!(cntrl_flags & UPL_COPYOUT_FROM) &&
+           object->copy != VM_OBJECT_NULL) {
+               /*
+                * Honor copy-on-write obligations
+                *
+                * The caller is gathering these pages and
+                * might modify their contents.  We need to
+                * make sure that the copy object has its own
+                * private copies of these pages before we let
+                * the caller modify them.
+                *
+                * NOTE: someone else could map the original object
+                * after we've done this copy-on-write here, and they
+                * could then see an inconsistent picture of the memory
+                * while it's being modified via the UPL.  To prevent this,
+                * we would have to block access to these pages until the
+                * UPL is released.  We could use the UPL_BLOCK_ACCESS
+                * code path for that...
+                */
+               vm_object_update(object,
+                                offset,
+                                size,
+                                NULL,
+                                NULL,
+                                FALSE, /* should_return */
+                                MEMORY_OBJECT_COPY_SYNC,
+                                VM_PROT_NO_CHANGE);
+#if DEVELOPMENT || DEBUG
+               iopl_cow++;
+               iopl_cow_pages += size >> PAGE_SHIFT;
+#endif
+       }
+
+
+       entry = 0;
+
+       xfer_size = size;
+       dst_offset = offset;
+
+       fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
+       fault_info.user_tag  = 0;
+       fault_info.lo_offset = offset;
+       fault_info.hi_offset = offset + xfer_size;
+       fault_info.no_cache  = FALSE;
+       fault_info.stealth = FALSE;
+       fault_info.mark_zf_absent = TRUE;
+
+       dwp = &dw_array[0];
+       dw_count = 0;
+
+       while (xfer_size) {
+               vm_fault_return_t       result;
+               unsigned int            pg_num;
+
+               dwp->dw_mask = 0;
+
+               dst_page = vm_page_lookup(object, dst_offset);
+
+               /*
+                * ENCRYPTED SWAP:
+                * If the page is encrypted, we need to decrypt it,
+                * so force a soft page fault.
+                */
+               if (dst_page == VM_PAGE_NULL ||
+                   dst_page->busy ||
+                   dst_page->encrypted ||
+                   dst_page->error || 
+                   dst_page->restart ||
+                   dst_page->absent ||
+                   dst_page->fictitious) {
+
+                  if (object == kernel_object)
+                          panic("vm_object_iopl_request: missing/bad page in kernel object\n");
+
+                  do {
+                       vm_page_t       top_page;
+                       kern_return_t   error_code;
+                       int             interruptible;
+
+                       if (cntrl_flags & UPL_SET_INTERRUPTIBLE)
+                               interruptible = THREAD_ABORTSAFE;
+                       else
+                               interruptible = THREAD_UNINT;
+
+                       fault_info.interruptible = interruptible;
+                       fault_info.cluster_size = xfer_size;
+
+                       vm_object_paging_begin(object);
+
+                       result = vm_fault_page(object, dst_offset,
+                                              prot | VM_PROT_WRITE, FALSE, 
+                                              &prot, &dst_page, &top_page,
+                                              (int *)0,
+                                              &error_code, no_zero_fill,
+                                              FALSE, &fault_info);
+
+                       switch (result) {
+
+                       case VM_FAULT_SUCCESS:
+
+                               if ( !dst_page->absent) {
+                                       PAGE_WAKEUP_DONE(dst_page);
+                               } else {
+                                       /*
+                                        * we only get back an absent page if we
+                                        * requested that it not be zero-filled
+                                        * because we are about to fill it via I/O
+                                        * 
+                                        * absent pages should be left BUSY
+                                        * to prevent them from being faulted
+                                        * into an address space before we've
+                                        * had a chance to complete the I/O on
+                                        * them since they may contain info that
+                                        * shouldn't be seen by the faulting task
+                                        */
+                               }
+                               /*
+                                *      Release paging references and
+                                *      top-level placeholder page, if any.
+                                */
+                               if (top_page != VM_PAGE_NULL) {
+                                       vm_object_t local_object;
+
+                                       local_object = top_page->object;
+
+                                       if (top_page->object != dst_page->object) {
+                                               vm_object_lock(local_object);
+                                               VM_PAGE_FREE(top_page);
+                                               vm_object_paging_end(local_object);
+                                               vm_object_unlock(local_object);
+                                       } else {
+                                               VM_PAGE_FREE(top_page);
+                                               vm_object_paging_end(local_object);
+                                       }
+                               }
+                               vm_object_paging_end(object);
+                               break;
+                       
+                       case VM_FAULT_RETRY:
+                               vm_object_lock(object);
+                               break;
+
+                       case VM_FAULT_FICTITIOUS_SHORTAGE:
+                               vm_page_more_fictitious();
+
+                               vm_object_lock(object);
+                               break;
+
+                       case VM_FAULT_MEMORY_SHORTAGE:
+                               if (vm_page_wait(interruptible)) {
+                                       vm_object_lock(object);
+                                       break;
+                               }
+                               /* fall thru */
+
+                       case VM_FAULT_INTERRUPTED:
+                               error_code = MACH_SEND_INTERRUPTED;
+                       case VM_FAULT_MEMORY_ERROR:
+                       memory_error:
+                               ret = (error_code ? error_code: KERN_MEMORY_ERROR);
+
+                               vm_object_lock(object);
+                               goto return_err;
+
+                       case VM_FAULT_SUCCESS_NO_VM_PAGE:
+                               /* success but no page: fail */
+                               vm_object_paging_end(object);
+                               vm_object_unlock(object);
+                               goto memory_error;
+
+                       default:
+                               panic("vm_object_iopl_request: unexpected error"
+                                     " 0x%x from vm_fault_page()\n", result);
+                       }
+                  } while (result != VM_FAULT_SUCCESS);
+
+               }
+
+               if (upl->flags & UPL_KERNEL_OBJECT)
+                       goto record_phys_addr;
+
+               if (dst_page->cleaning) {
+                       /*
+                        * Someone else is cleaning this page in place.as
+                        * In theory, we should be able to  proceed and use this
+                        * page but they'll probably end up clearing the "busy"
+                        * bit on it in upl_commit_range() but they didn't set
+                        * it, so they would clear our "busy" bit and open
+                        * us to race conditions.
+                        * We'd better wait for the cleaning to complete and
+                        * then try again.
+                        */
+                       vm_object_iopl_request_sleep_for_cleaning++;
+                       PAGE_SLEEP(object, dst_page, THREAD_UNINT);
+                       continue;
+               }
+               if ( (cntrl_flags & UPL_NEED_32BIT_ADDR) &&
+                    dst_page->phys_page >= (max_valid_dma_address >> PAGE_SHIFT) ) {
+                       vm_page_t       low_page;
+                       int             refmod;
+
+                       /*
+                        * support devices that can't DMA above 32 bits
+                        * by substituting pages from a pool of low address
+                        * memory for any pages we find above the 4G mark
+                        * can't substitute if the page is already wired because
+                        * we don't know whether that physical address has been
+                        * handed out to some other 64 bit capable DMA device to use
+                        */
+                       if (VM_PAGE_WIRED(dst_page)) {
+                               ret = KERN_PROTECTION_FAILURE;
+                               goto return_err;
+                       }
+                       low_page = vm_page_grablo();
+
+                       if (low_page == VM_PAGE_NULL) {
+                               ret = KERN_RESOURCE_SHORTAGE;
+                               goto return_err;
+                       }
+                       /*
+                        * from here until the vm_page_replace completes
+                        * we musn't drop the object lock... we don't
+                        * want anyone refaulting this page in and using
+                        * it after we disconnect it... we want the fault
+                        * to find the new page being substituted.
+                        */
+                       if (dst_page->pmapped)
+                               refmod = pmap_disconnect(dst_page->phys_page);
+                       else
+                               refmod = 0;
+
+                       if ( !dst_page->absent)
+                               vm_page_copy(dst_page, low_page);
+                 
+                       low_page->reference = dst_page->reference;
+                       low_page->dirty     = dst_page->dirty;
+                       low_page->absent    = dst_page->absent;
+
+                       if (refmod & VM_MEM_REFERENCED)
+                               low_page->reference = TRUE;
+                       if (refmod & VM_MEM_MODIFIED)
+                               low_page->dirty = TRUE;
+
+                       vm_page_replace(low_page, object, dst_offset);
+
+                       dst_page = low_page;
+                       /*
+                        * vm_page_grablo returned the page marked
+                        * BUSY... we don't need a PAGE_WAKEUP_DONE
+                        * here, because we've never dropped the object lock
+                        */
+                       if ( !dst_page->absent)
+                               dst_page->busy = FALSE;
+               }
+               if ( !dst_page->busy)
+                       dwp->dw_mask |= DW_vm_page_wire;
+
+               if (cntrl_flags & UPL_BLOCK_ACCESS) {
+                       /*
+                        * Mark the page "busy" to block any future page fault
+                        * on this page.  We'll also remove the mapping
+                        * of all these pages before leaving this routine.
+                        */
+                       assert(!dst_page->fictitious);
+                       dst_page->busy = TRUE;
+               }
+               /*
+                * expect the page to be used
+                * page queues lock must be held to set 'reference'
+                */
+               dwp->dw_mask |= DW_set_reference;
+
+               if (!(cntrl_flags & UPL_COPYOUT_FROM))
+                       dst_page->dirty = TRUE;
+record_phys_addr:
+               if (dst_page->busy)
+                       upl->flags |= UPL_HAS_BUSY;
+
+               pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE);
+               assert(pg_num == (dst_offset-offset)/PAGE_SIZE);
+               lite_list[pg_num>>5] |= 1 << (pg_num & 31);
+
+               if (dst_page->phys_page > upl->highest_page)
+                       upl->highest_page = dst_page->phys_page;
+
+               if (user_page_list) {
+                       user_page_list[entry].phys_addr = dst_page->phys_page;
+                       user_page_list[entry].pageout   = dst_page->pageout;
+                       user_page_list[entry].absent    = dst_page->absent;
+                       user_page_list[entry].dirty     = dst_page->dirty;
+                       user_page_list[entry].precious  = dst_page->precious;
+                       user_page_list[entry].device    = FALSE;
+                       if (dst_page->clustered == TRUE)
+                               user_page_list[entry].speculative = dst_page->speculative;
+                       else
+                               user_page_list[entry].speculative = FALSE;
+                       user_page_list[entry].cs_validated = dst_page->cs_validated;
+                       user_page_list[entry].cs_tainted = dst_page->cs_tainted;
+               }
+               if (object != kernel_object) {
+                       /*
+                        * someone is explicitly grabbing this page...
+                        * update clustered and speculative state
+                        * 
+                        */
+                       VM_PAGE_CONSUME_CLUSTERED(dst_page);
+               }
+               entry++;
+               dst_offset += PAGE_SIZE_64;
+               xfer_size -= PAGE_SIZE;
+
+               if (dwp->dw_mask) {
+                       if (dst_page->busy == FALSE) {
+                               /*
+                                * dw_do_work may need to drop the object lock
+                                * if it does, we need the pages it's looking at to
+                                * be held stable via the busy bit.
+                                */
+                               dst_page->busy = TRUE;
+                               dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
+                       }
+                       dwp->dw_m = dst_page;
+                       dwp++;
+                       dw_count++;
+
+                       if (dw_count >= DELAYED_WORK_LIMIT) {
+                               dw_do_work(object, &dw_array[0], dw_count);
+                               
+                               dwp = &dw_array[0];
+                               dw_count = 0;
+                       }
+               }
+       }
+       if (dw_count)
+               dw_do_work(object, &dw_array[0], dw_count);
+
+       if (page_list_count != NULL) {
+               if (upl->flags & UPL_INTERNAL)
+                       *page_list_count = 0;
+               else if (*page_list_count > entry)
+                       *page_list_count = entry;
+       }
+       vm_object_unlock(object);
+
+       if (cntrl_flags & UPL_BLOCK_ACCESS) {
+               /*
+                * We've marked all the pages "busy" so that future
+                * page faults will block.
+                * Now remove the mapping for these pages, so that they
+                * can't be accessed without causing a page fault.
+                */
+               vm_object_pmap_protect(object, offset, (vm_object_size_t)size,
+                                      PMAP_NULL, 0, VM_PROT_NONE);
+               assert(!object->blocked_access);
+               object->blocked_access = TRUE;
+       }
+       return KERN_SUCCESS;
+
+return_err:
+       dw_index = 0;
+
+       for (; offset < dst_offset; offset += PAGE_SIZE) {
+               boolean_t need_unwire;
+
+               dst_page = vm_page_lookup(object, offset);
+
+               if (dst_page == VM_PAGE_NULL)
+                       panic("vm_object_iopl_request: Wired page missing. \n");
+
+               /*
+                * if we've already processed this page in an earlier 
+                * dw_do_work, we need to undo the wiring... we will
+                * leave the dirty and reference bits on if they
+                * were set, since we don't have a good way of knowing
+                * what the previous state was and we won't get here
+                * under any normal circumstances...  we will always
+                * clear BUSY and wakeup any waiters via vm_page_free
+                * or PAGE_WAKEUP_DONE
+                */
+               need_unwire = TRUE;
+
+               if (dw_count) {
+                       if (dw_array[dw_index].dw_m == dst_page) {
+                               /*
+                                * still in the deferred work list
+                                * which means we haven't yet called
+                                * vm_page_wire on this page
+                                */
+                               need_unwire = FALSE;
+
+                               dw_index++;
+                               dw_count--;
+                       }
+               }
+               vm_page_lock_queues();
+
+               if (dst_page->absent) {
+                       vm_page_free(dst_page);
+
+                       need_unwire = FALSE;
+               } else {
+                       if (need_unwire == TRUE)
+                               vm_page_unwire(dst_page, TRUE);
+
+                       PAGE_WAKEUP_DONE(dst_page);
+               }       
+               vm_page_unlock_queues();
+
+               if (need_unwire == TRUE)
+                       VM_STAT_INCR(reactivations);
+       }
+#if UPL_DEBUG
+       upl->upl_state = 2;
+#endif
+       if (! (upl->flags & UPL_KERNEL_OBJECT)) {
+               vm_object_activity_end(object);
+       }
+       vm_object_unlock(object);
+       upl_destroy(upl);
+
+       return ret;
+}
+
+kern_return_t
+upl_transpose(
+       upl_t           upl1,
+       upl_t           upl2)
+{
+       kern_return_t           retval;
+       boolean_t               upls_locked;
+       vm_object_t             object1, object2;
+
+       if (upl1 == UPL_NULL || upl2 == UPL_NULL || upl1 == upl2  || ((upl1->flags & UPL_VECTOR)==UPL_VECTOR)  || ((upl2->flags & UPL_VECTOR)==UPL_VECTOR)) {
+               return KERN_INVALID_ARGUMENT;
+       }
+       
+       upls_locked = FALSE;
+
+       /*
+        * Since we need to lock both UPLs at the same time,
+        * avoid deadlocks by always taking locks in the same order.
+        */
+       if (upl1 < upl2) {
+               upl_lock(upl1);
+               upl_lock(upl2);
+       } else {
+               upl_lock(upl2);
+               upl_lock(upl1);
+       }
+       upls_locked = TRUE;     /* the UPLs will need to be unlocked */
+
+       object1 = upl1->map_object;
+       object2 = upl2->map_object;
+
+       if (upl1->offset != 0 || upl2->offset != 0 ||
+           upl1->size != upl2->size) {
+               /*
+                * We deal only with full objects, not subsets.
+                * That's because we exchange the entire backing store info
+                * for the objects: pager, resident pages, etc...  We can't do
+                * only part of it.
+                */
+               retval = KERN_INVALID_VALUE;
+               goto done;
+       }
+
+       /*
+        * Tranpose the VM objects' backing store.
+        */
+       retval = vm_object_transpose(object1, object2,
+                                    (vm_object_size_t) upl1->size);
+
+       if (retval == KERN_SUCCESS) {
+               /*
+                * Make each UPL point to the correct VM object, i.e. the
+                * object holding the pages that the UPL refers to...
+                */
+#if UPL_DEBUG
+               queue_remove(&object1->uplq, upl1, upl_t, uplq);
+               queue_remove(&object2->uplq, upl2, upl_t, uplq);
+#endif
+               upl1->map_object = object2;
+               upl2->map_object = object1;
+#if UPL_DEBUG
+               queue_enter(&object1->uplq, upl2, upl_t, uplq);
+               queue_enter(&object2->uplq, upl1, upl_t, uplq);
+#endif
+       }
+
+done:
+       /*
+        * Cleanup.
+        */
+       if (upls_locked) {
+               upl_unlock(upl1);
+               upl_unlock(upl2);
+               upls_locked = FALSE;
+       }
+
+       return retval;
+}
+
+/*
+ * ENCRYPTED SWAP:
+ *
+ * Rationale:  the user might have some encrypted data on disk (via
+ * FileVault or any other mechanism).  That data is then decrypted in
+ * memory, which is safe as long as the machine is secure.  But that
+ * decrypted data in memory could be paged out to disk by the default
+ * pager.  The data would then be stored on disk in clear (not encrypted)
+ * and it could be accessed by anyone who gets physical access to the
+ * disk (if the laptop or the disk gets stolen for example).  This weakens
+ * the security offered by FileVault.
+ *
+ * Solution:  the default pager will optionally request that all the
+ * pages it gathers for pageout be encrypted, via the UPL interfaces,
+ * before it sends this UPL to disk via the vnode_pageout() path.
+ * 
+ * Notes:
+ * 
+ * To avoid disrupting the VM LRU algorithms, we want to keep the
+ * clean-in-place mechanisms, which allow us to send some extra pages to 
+ * swap (clustering) without actually removing them from the user's
+ * address space.  We don't want the user to unknowingly access encrypted
+ * data, so we have to actually remove the encrypted pages from the page
+ * table.  When the user accesses the data, the hardware will fail to
+ * locate the virtual page in its page table and will trigger a page
+ * fault.  We can then decrypt the page and enter it in the page table
+ * again.  Whenever we allow the user to access the contents of a page,
+ * we have to make sure it's not encrypted.
+ *
+ * 
+ */
+/*
+ * ENCRYPTED SWAP:
+ * Reserve of virtual addresses in the kernel address space.
+ * We need to map the physical pages in the kernel, so that we
+ * can call the encryption/decryption routines with a kernel
+ * virtual address.  We keep this pool of pre-allocated kernel
+ * virtual addresses so that we don't have to scan the kernel's
+ * virtaul address space each time we need to encrypt or decrypt
+ * a physical page.
+ * It would be nice to be able to encrypt and decrypt in physical
+ * mode but that might not always be more efficient...
+ */
+decl_simple_lock_data(,vm_paging_lock)
+#define VM_PAGING_NUM_PAGES    64
+vm_map_offset_t vm_paging_base_address = 0;
+boolean_t      vm_paging_page_inuse[VM_PAGING_NUM_PAGES] = { FALSE, };
+int            vm_paging_max_index = 0;
+int            vm_paging_page_waiter = 0;
+int            vm_paging_page_waiter_total = 0;
+unsigned long  vm_paging_no_kernel_page = 0;
+unsigned long  vm_paging_objects_mapped = 0;
+unsigned long  vm_paging_pages_mapped = 0;
+unsigned long  vm_paging_objects_mapped_slow = 0;
+unsigned long  vm_paging_pages_mapped_slow = 0;
+
+void
+vm_paging_map_init(void)
+{
+       kern_return_t   kr;
+       vm_map_offset_t page_map_offset;
+       vm_map_entry_t  map_entry;
+
+       assert(vm_paging_base_address == 0);
+
+       /*
+        * Initialize our pool of pre-allocated kernel
+        * virtual addresses.
+        */
+       page_map_offset = 0;
+       kr = vm_map_find_space(kernel_map,
+                              &page_map_offset,
+                              VM_PAGING_NUM_PAGES * PAGE_SIZE,
+                              0,
+                              0,
+                              &map_entry);
+       if (kr != KERN_SUCCESS) {
+               panic("vm_paging_map_init: kernel_map full\n");
+       }
+       map_entry->object.vm_object = kernel_object;
+       map_entry->offset = page_map_offset;
+       vm_object_reference(kernel_object);
+       vm_map_unlock(kernel_map);
+
+       assert(vm_paging_base_address == 0);
+       vm_paging_base_address = page_map_offset;
+}
+
+/*
+ * ENCRYPTED SWAP:
+ * vm_paging_map_object:
+ *     Maps part of a VM object's pages in the kernel
+ *     virtual address space, using the pre-allocated
+ *     kernel virtual addresses, if possible.
+ * Context:
+ *     The VM object is locked.  This lock will get
+ *     dropped and re-acquired though, so the caller
+ *     must make sure the VM object is kept alive
+ *     (by holding a VM map that has a reference
+ *     on it, for example, or taking an extra reference).
+ *     The page should also be kept busy to prevent
+ *     it from being reclaimed.
+ */
+kern_return_t
+vm_paging_map_object(
+       vm_map_offset_t         *address,
+       vm_page_t               page,
+       vm_object_t             object,
+       vm_object_offset_t      offset,
+       vm_map_size_t           *size,
+       vm_prot_t               protection,
+       boolean_t               can_unlock_object)
+{
+       kern_return_t           kr;
+       vm_map_offset_t         page_map_offset;
+       vm_map_size_t           map_size;
+       vm_object_offset_t      object_offset;
+       int                     i;
+
+       
+       if (page != VM_PAGE_NULL && *size == PAGE_SIZE) {
+               assert(page->busy);
+               /*
+                * Use one of the pre-allocated kernel virtual addresses
+                * and just enter the VM page in the kernel address space
+                * at that virtual address.
+                */
+               simple_lock(&vm_paging_lock);
+
+               /*
+                * Try and find an available kernel virtual address
+                * from our pre-allocated pool.
+                */
+               page_map_offset = 0;
+               for (;;) {
+                       for (i = 0; i < VM_PAGING_NUM_PAGES; i++) {
+                               if (vm_paging_page_inuse[i] == FALSE) {
+                                       page_map_offset =
+                                               vm_paging_base_address +
+                                               (i * PAGE_SIZE);
+                                       break;
+                               }
+                       }
+                       if (page_map_offset != 0) {
+                               /* found a space to map our page ! */
+                               break;
+                       }
+
+                       if (can_unlock_object) {
+                               /*
+                                * If we can afford to unlock the VM object,
+                                * let's take the slow path now...
+                                */
+                               break;
+                       }
+                       /*
+                        * We can't afford to unlock the VM object, so
+                        * let's wait for a space to become available...
+                        */
+                       vm_paging_page_waiter_total++;
+                       vm_paging_page_waiter++;
+                       thread_sleep_fast_usimple_lock(&vm_paging_page_waiter,
+                                                      &vm_paging_lock,
+                                                      THREAD_UNINT);
+                       vm_paging_page_waiter--;
+                       /* ... and try again */
+               }
+
+               if (page_map_offset != 0) {
+                       /*
+                        * We found a kernel virtual address;
+                        * map the physical page to that virtual address.
+                        */
+                       if (i > vm_paging_max_index) {
+                               vm_paging_max_index = i;
+                       }
+                       vm_paging_page_inuse[i] = TRUE;
+                       simple_unlock(&vm_paging_lock);
+
+                       if (page->pmapped == FALSE) {
+                               pmap_sync_page_data_phys(page->phys_page);
+                       }
+                       page->pmapped = TRUE;
+
+                       /*
+                        * Keep the VM object locked over the PMAP_ENTER
+                        * and the actual use of the page by the kernel,
+                        * or this pmap mapping might get undone by a 
+                        * vm_object_pmap_protect() call...
+                        */
+                       PMAP_ENTER(kernel_pmap,
+                                  page_map_offset,
+                                  page,
+                                  protection,
+                                  ((int) page->object->wimg_bits &
+                                   VM_WIMG_MASK),
+                                  TRUE);
+                       vm_paging_objects_mapped++;
+                       vm_paging_pages_mapped++; 
+                       *address = page_map_offset;
+
+                       /* all done and mapped, ready to use ! */
+                       return KERN_SUCCESS;
+               }
+
+               /*
+                * We ran out of pre-allocated kernel virtual
+                * addresses.  Just map the page in the kernel
+                * the slow and regular way.
+                */
+               vm_paging_no_kernel_page++;
+               simple_unlock(&vm_paging_lock);
+       }
+
+       if (! can_unlock_object) {
+               return KERN_NOT_SUPPORTED;
+       }
+
+       object_offset = vm_object_trunc_page(offset);
+       map_size = vm_map_round_page(*size);
+
+       /*
+        * Try and map the required range of the object
+        * in the kernel_map
+        */
+
+       vm_object_reference_locked(object);     /* for the map entry */
+       vm_object_unlock(object);
+
+       kr = vm_map_enter(kernel_map,
+                         address,
+                         map_size,
+                         0,
+                         VM_FLAGS_ANYWHERE,
+                         object,
+                         object_offset,
+                         FALSE,
+                         protection,
+                         VM_PROT_ALL,
+                         VM_INHERIT_NONE);
+       if (kr != KERN_SUCCESS) {
+               *address = 0;
+               *size = 0;
+               vm_object_deallocate(object);   /* for the map entry */
+               vm_object_lock(object);
+               return kr;
+       }
 
-                               /* eliminate all mappings from the */
-                               /* original object and its prodigy */
-                               
-                               if(dst_page->busy) {
-                                       /*someone else is playing with the */
-                                       /* page.  We will have to wait.    */
-                                       PAGE_ASSERT_WAIT(
-                                               dst_page, THREAD_UNINT);
-                                       vm_object_unlock(object);
-                                       thread_block((void(*)(void))0);
-                                       vm_object_lock(object);
-                                       continue;
-                               }
-                               
-                               vm_page_lock_queues();
-                               pmap_page_protect(dst_page->phys_addr, 
-                                                               VM_PROT_NONE);
-                               dirty = pmap_is_modified(dst_page->phys_addr);
-                               dirty = dirty ? TRUE : dst_page->dirty;
-
-                               vm_pageclean_setup(dst_page, alias_page,
-                                       upl->map_object, size - xfer_size);
-
-                               if(cntrl_flags & UPL_CLEAN_IN_PLACE) {
-                                       /* clean in place for read implies   */
-                                       /* that a write will be done on all  */
-                                       /* the pages that are dirty before   */
-                                       /* a upl commit is done.  The caller */
-                                       /* is obligated to preserve the      */
-                                       /* contents of all pages marked      */
-                                       /* dirty. */
-                                       upl->flags |= UPL_CLEAR_DIRTY;
-                               }
+       *size = map_size;
 
-                               if(!dirty) {
-                                       dst_page->dirty = FALSE;
-                                       dst_page->precious = TRUE;
-                               }
-                                               
-                               if (dst_page->wire_count == 0) {
-                                  /* deny access to the target page while */
-                                  /* it is being worked on */
-                                       dst_page->busy = TRUE;
-                               } else {
-                                       vm_page_wire(dst_page);
-                               }
-                               /* expect the page to be used */
-                               dst_page->reference = TRUE;
-                               dst_page->precious = 
-                                       (cntrl_flags & UPL_PRECIOUS) 
-                                                       ? TRUE : FALSE;
-                               alias_page->absent = FALSE;
-                               alias_page = NULL;
-                               if(user_page_list) {
-                                       user_page_list[entry].phys_addr
-                                               = dst_page->phys_addr;
-                                       user_page_list[entry].dirty =
-                                                       dst_page->dirty;
-                                       user_page_list[entry].pageout =
-                                                       dst_page->pageout;
-                                       user_page_list[entry].absent =
-                                                       dst_page->absent;
-                                       user_page_list[entry].precious =
-                                                       dst_page->precious;
-                               }
-                               vm_page_unlock_queues();
-                       }
-                       entry++;
-                       dst_offset += PAGE_SIZE_64;
-                       xfer_size -= PAGE_SIZE;
+       /*
+        * Enter the mapped pages in the page table now.
+        */
+       vm_object_lock(object);
+       /*
+        * VM object must be kept locked from before PMAP_ENTER()
+        * until after the kernel is done accessing the page(s).
+        * Otherwise, the pmap mappings in the kernel could be
+        * undone by a call to vm_object_pmap_protect().
+        */
+
+       for (page_map_offset = 0;
+            map_size != 0;
+            map_size -= PAGE_SIZE_64, page_map_offset += PAGE_SIZE_64) {
+               unsigned int    cache_attr;
+
+               page = vm_page_lookup(object, offset + page_map_offset);
+               if (page == VM_PAGE_NULL) {
+                       printf("vm_paging_map_object: no page !?");
+                       vm_object_unlock(object);
+                       kr = vm_map_remove(kernel_map, *address, *size,
+                                          VM_MAP_NO_FLAGS);
+                       assert(kr == KERN_SUCCESS);
+                       *address = 0;
+                       *size = 0;
+                       vm_object_lock(object);
+                       return KERN_MEMORY_ERROR;
                }
+               if (page->pmapped == FALSE) {
+                       pmap_sync_page_data_phys(page->phys_page);
+               }
+               page->pmapped = TRUE;
+               cache_attr = ((unsigned int) object->wimg_bits) & VM_WIMG_MASK;
+
+               //assert(pmap_verify_free(page->phys_page));
+               PMAP_ENTER(kernel_pmap,
+                          *address + page_map_offset,
+                          page,
+                          protection,
+                          cache_attr,
+                          TRUE);
        }
-       if(alias_page != NULL) {
-               vm_page_lock_queues();
-               vm_page_free(alias_page);
-               vm_page_unlock_queues();
-       }
-       if(do_m_lock) {
-          vm_prot_t    access_required;
-          /* call back all associated pages from other users of the pager */
-          /* all future updates will be on data which is based on the     */
-          /* changes we are going to make here. Note: it is assumed that  */
-          /* we already hold copies of the data so we will not be seeing  */
-          /* an avalanche of incoming data from the pager */
-          access_required = (cntrl_flags & UPL_COPYOUT_FROM) 
-                                       ? VM_PROT_READ : VM_PROT_WRITE;
-          while (TRUE) {
-               kern_return_t   rc;
-               thread_t        thread;
-
-               if(!object->pager_ready) {
-                  thread = current_thread();
-                  vm_object_assert_wait(object, 
-                               VM_OBJECT_EVENT_PAGER_READY, THREAD_UNINT);
-                  vm_object_unlock(object);
-                  thread_block((void (*)(void))0);
-                  if (thread->wait_result !=  THREAD_AWAKENED) {
-                     return(KERN_FAILURE);
-                  }
-                  vm_object_lock(object);
-                  continue;
+                          
+       vm_paging_objects_mapped_slow++;
+       vm_paging_pages_mapped_slow += (unsigned long) (map_size / PAGE_SIZE_64);
+
+       return KERN_SUCCESS;
+}
+
+/*
+ * ENCRYPTED SWAP:
+ * vm_paging_unmap_object:
+ *     Unmaps part of a VM object's pages from the kernel
+ *     virtual address space.
+ * Context:
+ *     The VM object is locked.  This lock will get
+ *     dropped and re-acquired though.
+ */
+void
+vm_paging_unmap_object(
+       vm_object_t     object,
+       vm_map_offset_t start,
+       vm_map_offset_t end)
+{
+       kern_return_t   kr;
+       int             i;
+
+       if ((vm_paging_base_address == 0) ||
+           (start < vm_paging_base_address) ||
+           (end > (vm_paging_base_address
+                    + (VM_PAGING_NUM_PAGES * PAGE_SIZE)))) {
+               /*
+                * We didn't use our pre-allocated pool of
+                * kernel virtual address.  Deallocate the
+                * virtual memory.
+                */
+               if (object != VM_OBJECT_NULL) {
+                       vm_object_unlock(object);
+               }
+               kr = vm_map_remove(kernel_map, start, end, VM_MAP_NO_FLAGS);
+               if (object != VM_OBJECT_NULL) {
+                       vm_object_lock(object);
                }
+               assert(kr == KERN_SUCCESS);
+       } else {
+               /*
+                * We used a kernel virtual address from our
+                * pre-allocated pool.  Put it back in the pool
+                * for next time.
+                */
+               assert(end - start == PAGE_SIZE);
+               i = (int) ((start - vm_paging_base_address) >> PAGE_SHIFT);
+               assert(i >= 0 && i < VM_PAGING_NUM_PAGES);
 
-               vm_object_unlock(object);
+               /* undo the pmap mapping */
+               pmap_remove(kernel_pmap, start, end);
 
-               if (rc = memory_object_data_unlock(
-                       object->pager,
-                       object->pager_request,
-                       dst_offset + object->paging_offset,
-                       size,
-                       access_required)) {
-                       if (rc == MACH_SEND_INTERRUPTED) 
-                               continue;
-                       else
-                               return KERN_FAILURE;
+               simple_lock(&vm_paging_lock);
+               vm_paging_page_inuse[i] = FALSE;
+               if (vm_paging_page_waiter) {
+                       thread_wakeup(&vm_paging_page_waiter);
                }
-               break;
-               
-          }
-          /* lets wait on the last page requested */
-          /* NOTE: we will have to update lock completed routine to signal */
-          if(dst_page != VM_PAGE_NULL && 
-               (access_required & dst_page->page_lock) != access_required) {
-               PAGE_ASSERT_WAIT(dst_page, THREAD_UNINT);
-               thread_block((void (*)(void))0);
-               vm_object_lock(object);
-          }
+               simple_unlock(&vm_paging_lock);
        }
-       vm_object_unlock(object);
-       return KERN_SUCCESS;
 }
 
+#if CRYPTO
+/*
+ * Encryption data.
+ * "iv" is the "initial vector".  Ideally, we want to
+ * have a different one for each page we encrypt, so that
+ * crackers can't find encryption patterns too easily.
+ */
+#define SWAP_CRYPT_AES_KEY_SIZE        128     /* XXX 192 and 256 don't work ! */
+boolean_t              swap_crypt_ctx_initialized = FALSE;
+aes_32t                swap_crypt_key[8]; /* big enough for a 256 key */
+aes_ctx                        swap_crypt_ctx;
+const unsigned char    swap_crypt_null_iv[AES_BLOCK_SIZE] = {0xa, };
+
+#if DEBUG
+boolean_t              swap_crypt_ctx_tested = FALSE;
+unsigned char swap_crypt_test_page_ref[4096] __attribute__((aligned(4096)));
+unsigned char swap_crypt_test_page_encrypt[4096] __attribute__((aligned(4096)));
+unsigned char swap_crypt_test_page_decrypt[4096] __attribute__((aligned(4096)));
+#endif /* DEBUG */
 
-kern_return_t
-upl_system_list_request(
-       vm_object_t             object,
-       vm_object_offset_t      offset,
-       vm_size_t               size,
-       vm_size_t               super_cluster,
-       upl_t                   *upl,
-       upl_page_info_t         **user_page_list_ptr,
-       int                     page_list_count,
-       int                     cntrl_flags)
+/*
+ * Initialize the encryption context: key and key size.
+ */
+void swap_crypt_ctx_initialize(void); /* forward */
+void
+swap_crypt_ctx_initialize(void)
 {
-       if(object->paging_offset > offset)
-               return KERN_FAILURE;
-       offset = offset - object->paging_offset;
+       unsigned int    i;
 
-/* turns off super cluster exercised by the default_pager */
-/*
-super_cluster = size;
-*/
-       if ((super_cluster > size) && 
-                       (vm_page_free_count > vm_page_free_reserved)) {
+       /*
+        * No need for locking to protect swap_crypt_ctx_initialized
+        * because the first use of encryption will come from the
+        * pageout thread (we won't pagein before there's been a pageout)
+        * and there's only one pageout thread.
+        */
+       if (swap_crypt_ctx_initialized == FALSE) {
+               for (i = 0;
+                    i < (sizeof (swap_crypt_key) /
+                         sizeof (swap_crypt_key[0]));
+                    i++) {
+                       swap_crypt_key[i] = random();
+               }
+               aes_encrypt_key((const unsigned char *) swap_crypt_key,
+                               SWAP_CRYPT_AES_KEY_SIZE,
+                               &swap_crypt_ctx.encrypt);
+               aes_decrypt_key((const unsigned char *) swap_crypt_key,
+                               SWAP_CRYPT_AES_KEY_SIZE,
+                               &swap_crypt_ctx.decrypt);
+               swap_crypt_ctx_initialized = TRUE;
+       }
 
-               vm_object_offset_t      base_offset;
-               vm_size_t               super_size;
-
-               base_offset = (offset &  
-                       ~((vm_object_offset_t) super_cluster - 1));
-               super_size = (offset+size) > (base_offset + super_cluster) ?
-                               super_cluster<<1 : super_cluster;
-               super_size = ((base_offset + super_size) > object->size) ? 
-                               (object->size - base_offset) : super_size;
-               if(offset > (base_offset + super_size))
-                  panic("upl_system_list_request: Missed target pageout 0x%x,0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", offset, base_offset, super_size, super_cluster, size, object->paging_offset);
-               /* apparently there is a case where the vm requests a */
-               /* page to be written out who's offset is beyond the  */
-               /* object size */
-               if((offset + size) > (base_offset + super_size))
-                  super_size = (offset + size) - base_offset;
+#if DEBUG
+       /*
+        * Validate the encryption algorithms.
+        */
+       if (swap_crypt_ctx_tested == FALSE) {
+               /* initialize */
+               for (i = 0; i < 4096; i++) {
+                       swap_crypt_test_page_ref[i] = (char) i;
+               }
+               /* encrypt */
+               aes_encrypt_cbc(swap_crypt_test_page_ref,
+                               swap_crypt_null_iv,
+                               PAGE_SIZE / AES_BLOCK_SIZE,
+                               swap_crypt_test_page_encrypt,
+                               &swap_crypt_ctx.encrypt);
+               /* decrypt */
+               aes_decrypt_cbc(swap_crypt_test_page_encrypt,
+                               swap_crypt_null_iv,
+                               PAGE_SIZE / AES_BLOCK_SIZE,
+                               swap_crypt_test_page_decrypt,
+                               &swap_crypt_ctx.decrypt);
+               /* compare result with original */
+               for (i = 0; i < 4096; i ++) {
+                       if (swap_crypt_test_page_decrypt[i] !=
+                           swap_crypt_test_page_ref[i]) {
+                               panic("encryption test failed");
+                       }
+               }
 
-               offset = base_offset;
-               size = super_size;
+               /* encrypt again */
+               aes_encrypt_cbc(swap_crypt_test_page_decrypt,
+                               swap_crypt_null_iv,
+                               PAGE_SIZE / AES_BLOCK_SIZE,
+                               swap_crypt_test_page_decrypt,
+                               &swap_crypt_ctx.encrypt);
+               /* decrypt in place */
+               aes_decrypt_cbc(swap_crypt_test_page_decrypt,
+                               swap_crypt_null_iv,
+                               PAGE_SIZE / AES_BLOCK_SIZE,
+                               swap_crypt_test_page_decrypt,
+                               &swap_crypt_ctx.decrypt);
+               for (i = 0; i < 4096; i ++) {
+                       if (swap_crypt_test_page_decrypt[i] !=
+                           swap_crypt_test_page_ref[i]) {
+                               panic("in place encryption test failed");
+                       }
+               }
+
+               swap_crypt_ctx_tested = TRUE;
        }
-       vm_fault_list_request(object, offset, size, upl, user_page_list_ptr,
-                                               page_list_count, cntrl_flags);
+#endif /* DEBUG */
 }
 
-
-kern_return_t
-uc_upl_map(
-       vm_map_t        map, 
-       upl_t           upl, 
-       vm_offset_t     *dst_addr)
+/*
+ * ENCRYPTED SWAP:
+ * vm_page_encrypt:
+ *     Encrypt the given page, for secure paging.
+ *     The page might already be mapped at kernel virtual
+ *     address "kernel_mapping_offset".  Otherwise, we need
+ *     to map it.
+ * 
+ * Context:
+ *     The page's object is locked, but this lock will be released
+ *     and re-acquired.
+ *     The page is busy and not accessible by users (not entered in any pmap).
+ */
+void
+vm_page_encrypt(
+       vm_page_t       page,
+       vm_map_offset_t kernel_mapping_offset)
 {
-       vm_size_t               size;
-       vm_object_offset_t      offset;
-       vm_offset_t             addr;
-       vm_page_t               m;
        kern_return_t           kr;
+       vm_map_size_t           kernel_mapping_size;
+       vm_offset_t             kernel_vaddr;
+       union {
+               unsigned char   aes_iv[AES_BLOCK_SIZE];
+               struct {
+                       memory_object_t         pager_object;
+                       vm_object_offset_t      paging_offset;
+               } vm;
+       } encrypt_iv;
+
+       if (! vm_pages_encrypted) {
+               vm_pages_encrypted = TRUE;
+       }
 
-       /* check to see if already mapped */
-       if(UPL_PAGE_LIST_MAPPED & upl->flags)
-               return KERN_FAILURE;
-
-       offset = 0;  /* Always map the entire object */
-       size = upl->size;
+       assert(page->busy);
+       assert(page->dirty || page->precious);
        
-       vm_object_lock(upl->map_object);
-       upl->map_object->ref_count++;
-       vm_object_res_reference(upl->map_object);
-       vm_object_unlock(upl->map_object);
+       if (page->encrypted) {
+               /*
+                * Already encrypted: no need to do it again.
+                */
+               vm_page_encrypt_already_encrypted_counter++;
+               return;
+       }
+       ASSERT_PAGE_DECRYPTED(page);
+
+       /*
+        * Take a paging-in-progress reference to keep the object
+        * alive even if we have to unlock it (in vm_paging_map_object()
+        * for example)...
+        */
+       vm_object_paging_begin(page->object);
 
-       *dst_addr = 0;
+       if (kernel_mapping_offset == 0) {
+               /*
+                * The page hasn't already been mapped in kernel space
+                * by the caller.  Map it now, so that we can access
+                * its contents and encrypt them.
+                */
+               kernel_mapping_size = PAGE_SIZE;
+               kr = vm_paging_map_object(&kernel_mapping_offset,
+                                         page,
+                                         page->object,
+                                         page->offset,
+                                         &kernel_mapping_size,
+                                         VM_PROT_READ | VM_PROT_WRITE,
+                                         FALSE);
+               if (kr != KERN_SUCCESS) {
+                       panic("vm_page_encrypt: "
+                             "could not map page in kernel: 0x%x\n",
+                             kr);
+               }
+       } else {
+               kernel_mapping_size = 0;
+       }
+       kernel_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping_offset);
 
+       if (swap_crypt_ctx_initialized == FALSE) {
+               swap_crypt_ctx_initialize();
+       }
+       assert(swap_crypt_ctx_initialized);
 
-       /* NEED A UPL_MAP ALIAS */
-       kr = vm_map_enter(map, dst_addr, size, (vm_offset_t) 0, TRUE,
-               upl->map_object, offset, FALSE,
-               VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
+       /*
+        * Prepare an "initial vector" for the encryption.
+        * We use the "pager" and the "paging_offset" for that
+        * page to obfuscate the encrypted data a bit more and
+        * prevent crackers from finding patterns that they could
+        * use to break the key.
+        */
+       bzero(&encrypt_iv.aes_iv[0], sizeof (encrypt_iv.aes_iv));
+       encrypt_iv.vm.pager_object = page->object->pager;
+       encrypt_iv.vm.paging_offset =
+               page->object->paging_offset + page->offset;
+
+       /* encrypt the "initial vector" */
+       aes_encrypt_cbc((const unsigned char *) &encrypt_iv.aes_iv[0],
+                       swap_crypt_null_iv,
+                       1,
+                       &encrypt_iv.aes_iv[0],
+                       &swap_crypt_ctx.encrypt);
+                 
+       /*
+        * Encrypt the page.
+        */
+       aes_encrypt_cbc((const unsigned char *) kernel_vaddr,
+                       &encrypt_iv.aes_iv[0],
+                       PAGE_SIZE / AES_BLOCK_SIZE,
+                       (unsigned char *) kernel_vaddr,
+                       &swap_crypt_ctx.encrypt);
 
-       if (kr != KERN_SUCCESS)
-               return(kr);
+       vm_page_encrypt_counter++;
 
-       for(addr=*dst_addr; size > 0; size-=PAGE_SIZE,addr+=PAGE_SIZE) {
-               m = vm_page_lookup(upl->map_object, offset);
-               if(m) {
-                       PMAP_ENTER(map->pmap, addr, m, VM_PROT_ALL, TRUE);
-               }
-               offset+=PAGE_SIZE_64;
+       /*
+        * Unmap the page from the kernel's address space,
+        * if we had to map it ourselves.  Otherwise, let
+        * the caller undo the mapping if needed.
+        */
+       if (kernel_mapping_size != 0) {
+               vm_paging_unmap_object(page->object,
+                                      kernel_mapping_offset,
+                                      kernel_mapping_offset + kernel_mapping_size);
        }
 
-       upl->flags |= UPL_PAGE_LIST_MAPPED;
-       upl->kaddr = *dst_addr;
-       return KERN_SUCCESS;
+       /*
+        * Clear the "reference" and "modified" bits.
+        * This should clean up any impact the encryption had
+        * on them.
+        * The page was kept busy and disconnected from all pmaps,
+        * so it can't have been referenced or modified from user
+        * space.
+        * The software bits will be reset later after the I/O
+        * has completed (in upl_commit_range()).
+        */
+       pmap_clear_refmod(page->phys_page, VM_MEM_REFERENCED | VM_MEM_MODIFIED);
+
+       page->encrypted = TRUE;
+
+       vm_object_paging_end(page->object);
 }
-       
 
-kern_return_t
-uc_upl_un_map(
-       vm_map_t        map, 
-       upl_t           upl)
+/*
+ * ENCRYPTED SWAP:
+ * vm_page_decrypt:
+ *     Decrypt the given page.
+ *     The page might already be mapped at kernel virtual
+ *     address "kernel_mapping_offset".  Otherwise, we need
+ *     to map it.
+ *
+ * Context:
+ *     The page's VM object is locked but will be unlocked and relocked.
+ *     The page is busy and not accessible by users (not entered in any pmap).
+ */
+void
+vm_page_decrypt(
+       vm_page_t       page,
+       vm_map_offset_t kernel_mapping_offset)
 {
-       vm_size_t       size;
+       kern_return_t           kr;
+       vm_map_size_t           kernel_mapping_size;
+       vm_offset_t             kernel_vaddr;
+       union {
+               unsigned char   aes_iv[AES_BLOCK_SIZE];
+               struct {
+                       memory_object_t         pager_object;
+                       vm_object_offset_t      paging_offset;
+               } vm;
+       } decrypt_iv;
+
+       assert(page->busy);
+       assert(page->encrypted);
 
-       if(upl->flags & UPL_PAGE_LIST_MAPPED) {
-               size = upl->size;
-               vm_deallocate(map, upl->kaddr, size);
-               upl->flags &= ~UPL_PAGE_LIST_MAPPED;
-               upl->kaddr = (vm_offset_t) 0;
-               return KERN_SUCCESS;
+       /*
+        * Take a paging-in-progress reference to keep the object
+        * alive even if we have to unlock it (in vm_paging_map_object()
+        * for example)...
+        */
+       vm_object_paging_begin(page->object);
+
+       if (kernel_mapping_offset == 0) {
+               /*
+                * The page hasn't already been mapped in kernel space
+                * by the caller.  Map it now, so that we can access
+                * its contents and decrypt them.
+                */
+               kernel_mapping_size = PAGE_SIZE;
+               kr = vm_paging_map_object(&kernel_mapping_offset,
+                                         page,
+                                         page->object,
+                                         page->offset,
+                                         &kernel_mapping_size,
+                                         VM_PROT_READ | VM_PROT_WRITE,
+                                         FALSE);
+               if (kr != KERN_SUCCESS) {
+                       panic("vm_page_decrypt: "
+                             "could not map page in kernel: 0x%x\n",
+                             kr);
+               }
        } else {
-               return KERN_FAILURE;
+               kernel_mapping_size = 0;
        }
-}
+       kernel_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping_offset);
 
-kern_return_t
-uc_upl_commit_range(
-       upl_t                   upl, 
-       vm_offset_t             offset, 
-       vm_size_t               size,
-       int                     flags,
-       upl_page_info_t          *page_list) 
-{
-       vm_size_t               xfer_size = size;
-       vm_object_t             shadow_object = upl->map_object->shadow;
-       vm_object_t             object = upl->map_object;
-       vm_object_offset_t      target_offset;
-       vm_object_offset_t      page_offset;
-       int                     entry;
+       assert(swap_crypt_ctx_initialized);
 
-       if(upl->flags & UPL_DEVICE_MEMORY) {
-               xfer_size = 0;
-       } else if ((offset + size) > upl->size) {
-               return KERN_FAILURE;
+       /*
+        * Prepare an "initial vector" for the decryption.
+        * It has to be the same as the "initial vector" we
+        * used to encrypt that page.
+        */
+       bzero(&decrypt_iv.aes_iv[0], sizeof (decrypt_iv.aes_iv));
+       decrypt_iv.vm.pager_object = page->object->pager;
+       decrypt_iv.vm.paging_offset =
+               page->object->paging_offset + page->offset;
+
+       /* encrypt the "initial vector" */
+       aes_encrypt_cbc((const unsigned char *) &decrypt_iv.aes_iv[0],
+                       swap_crypt_null_iv,
+                       1,
+                       &decrypt_iv.aes_iv[0],
+                       &swap_crypt_ctx.encrypt);
+
+       /*
+        * Decrypt the page.
+        */
+       aes_decrypt_cbc((const unsigned char *) kernel_vaddr,
+                       &decrypt_iv.aes_iv[0],
+                       PAGE_SIZE / AES_BLOCK_SIZE,
+                       (unsigned char *) kernel_vaddr,
+                       &swap_crypt_ctx.decrypt);
+       vm_page_decrypt_counter++;
+
+       /*
+        * Unmap the page from the kernel's address space,
+        * if we had to map it ourselves.  Otherwise, let
+        * the caller undo the mapping if needed.
+        */
+       if (kernel_mapping_size != 0) {
+               vm_paging_unmap_object(page->object,
+                                      kernel_vaddr,
+                                      kernel_vaddr + PAGE_SIZE);
        }
 
-       vm_object_lock(shadow_object);
+       /*
+        * After decryption, the page is actually clean.
+        * It was encrypted as part of paging, which "cleans"
+        * the "dirty" pages.
+        * Noone could access it after it was encrypted
+        * and the decryption doesn't count.
+        */
+       page->dirty = FALSE;
+       assert (page->cs_validated == FALSE);
+       pmap_clear_refmod(page->phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
+       page->encrypted = FALSE;
 
-       entry = offset/PAGE_SIZE;
-       target_offset = (vm_object_offset_t)offset;
-       while(xfer_size) {
-               vm_page_t       t,m;
-               upl_page_info_t *p;
-
-               if((t = vm_page_lookup(object, target_offset)) != NULL) {
-
-                       t->pageout = FALSE;
-                       page_offset = t->offset;
-                       VM_PAGE_FREE(t);
-                       t = VM_PAGE_NULL;
-                       m = vm_page_lookup(shadow_object, 
-                                       page_offset + object->shadow_offset);
-                       if(m != VM_PAGE_NULL) {
-                          vm_object_paging_end(shadow_object);
-                          vm_page_lock_queues();
-                          if ((upl->flags & UPL_CLEAR_DIRTY) ||
-                                       (flags & UPL_COMMIT_CLEAR_DIRTY)) {
-                               pmap_clear_modify(m->phys_addr);
-                               m->dirty = FALSE;
-                          }
-                          if(page_list) {
-                               p = &(page_list[entry]);
-                               if(p->phys_addr && p->pageout && !m->pageout) {
-                                       m->busy = TRUE;
-                                       m->pageout = TRUE;
-                                       vm_page_wire(m);
-                               } else if (page_list[entry].phys_addr &&
-                                               !p->pageout && m->pageout) {
-                                       m->pageout = FALSE;
-                                       m->absent = FALSE;
-                                       m->overwriting = FALSE;
-                                       vm_page_unwire(m);
-                                       PAGE_WAKEUP_DONE(m);
-                               }
-                               page_list[entry].phys_addr = 0;
-                          }
-                          if(m->laundry) {
-                             vm_page_laundry_count--;
-                             m->laundry = FALSE;
-                             if (vm_page_laundry_count < vm_page_laundry_min) {
-                                vm_page_laundry_min = 0;
-                                thread_wakeup((event_t) 
-                                            &vm_page_laundry_count);
-                             }
-                          }
-                          if(m->pageout) {
-                             m->cleaning = FALSE;
-                             m->pageout = FALSE;
-#if MACH_CLUSTER_STATS
-                             if (m->wanted) vm_pageout_target_collisions++;
-#endif
-                             pmap_page_protect(m->phys_addr, VM_PROT_NONE);
-                             m->dirty = pmap_is_modified(m->phys_addr);
-                             if(m->dirty) {
-                                CLUSTER_STAT(
-                                     vm_pageout_target_page_dirtied++;)
-                                 vm_page_unwire(m);/* reactivates */
-                                 VM_STAT(reactivations++);
-                                 PAGE_WAKEUP_DONE(m);
-                                     } else if (m->prep_pin_count != 0) {
-                                 vm_page_pin_lock();
-                                 if (m->pin_count != 0) {
-                                    /* page is pinned; reactivate */
-                                    CLUSTER_STAT(
-                                               vm_pageout_target_page_pinned++;)
-                                    vm_page_unwire(m);/* reactivates */
-                                    VM_STAT(reactivations++);
-                                    PAGE_WAKEUP_DONE(m);
-                                 } else {
-                                   /*
-                                    * page is prepped but not pinned; 
-                                   * send it into limbo.  Note that
-                                    * vm_page_free (which will be 
-                                   * called after releasing the pin 
-                                   * lock) knows how to handle a page 
-                                   * with limbo set.
-                                    */
-                                    m->limbo = TRUE;
-                                    CLUSTER_STAT(
-                                                vm_pageout_target_page_limbo++;)
-                                 }
-                                 vm_page_pin_unlock();
-                                 if (m->limbo)
-                                    vm_page_free(m);
-                                } else {
-                                    CLUSTER_STAT(
-                                              vm_pageout_target_page_freed++;)
-                                    vm_page_free(m);/* clears busy, etc. */
-                                }
-                                vm_page_unlock_queues();
-                                target_offset += PAGE_SIZE_64;
-                                xfer_size -= PAGE_SIZE;
-                                entry++;
-                                 continue;
-                             }
-                             if (flags & UPL_COMMIT_INACTIVATE) {
-                                      vm_page_deactivate(m);
-                                     m->reference = FALSE;
-                                     pmap_clear_reference(m->phys_addr);
-                             } else if (!m->active && !m->inactive) {
-                                   if (m->reference || m->prep_pin_count != 0)
-                                      vm_page_activate(m);
-                                   else
-                                      vm_page_deactivate(m);
-                              }
-#if MACH_CLUSTER_STATS
-                              m->dirty = pmap_is_modified(m->phys_addr);
+       /*
+        * We've just modified the page's contents via the data cache and part
+        * of the new contents might still be in the cache and not yet in RAM.
+        * Since the page is now available and might get gathered in a UPL to
+        * be part of a DMA transfer from a driver that expects the memory to
+        * be coherent at this point, we have to flush the data cache.
+        */
+       pmap_sync_page_attributes_phys(page->phys_page);
+       /*
+        * Since the page is not mapped yet, some code might assume that it
+        * doesn't need to invalidate the instruction cache when writing to
+        * that page.  That code relies on "pmapped" being FALSE, so that the
+        * caches get synchronized when the page is first mapped.
+        */
+       assert(pmap_verify_free(page->phys_page));
+       page->pmapped = FALSE;
+       page->wpmapped = FALSE;
 
-                              if (m->dirty)   vm_pageout_cluster_dirtied++;
-                              else            vm_pageout_cluster_cleaned++;
-                              if (m->wanted)  vm_pageout_cluster_collisions++;
-#else
-                              m->dirty = 0;
-#endif
+       vm_object_paging_end(page->object);
+}
 
-                              if((m->busy) && (m->cleaning)) {
-                                 /* the request_page_list case */
-                               if(m->absent) {
-                                  m->absent = FALSE;
-                                  if(shadow_object->absent_count == 1)
-                                     vm_object_absent_release(shadow_object);
-                                  else
-                                     shadow_object->absent_count--;
-                               }
-                                m->overwriting = FALSE;
-                                 m->busy = FALSE;
-                                 m->dirty = FALSE;
-                              }
-                             else if (m->overwriting) {
-                                /* alternate request page list, write to 
-                                /* page_list case.  Occurs when the original
-                                /* page was wired at the time of the list
-                                /* request */
-                                assert(m->wire_count != 0);
-                                vm_page_unwire(m);/* reactivates */
-                                m->overwriting = FALSE;
-                             }
-                              m->cleaning = FALSE;
-                             /* It is a part of the semantic of COPYOUT_FROM */
-                             /* UPLs that a commit implies cache sync        */
-                             /* between the vm page and the backing store    */
-                             /* this can be used to strip the precious bit   */
-                             /* as well as clean */
-                             if (upl->flags & UPL_PAGE_SYNC_DONE)
-                                m->precious = FALSE;
-
-                             if (flags & UPL_COMMIT_SET_DIRTY) {
-                                m->dirty = TRUE;
-                             }
-                              /*
-                               * Wakeup any thread waiting for the page to be un-cleaning.
-                               */
-                              PAGE_WAKEUP(m);
-                              vm_page_unlock_queues();
+#if DEVELOPMENT || DEBUG
+unsigned long upl_encrypt_upls = 0;
+unsigned long upl_encrypt_pages = 0;
+#endif
 
-                       }
-               }
-               target_offset += PAGE_SIZE_64;
-               xfer_size -= PAGE_SIZE;
-               entry++;
+/*
+ * ENCRYPTED SWAP:
+ *
+ * upl_encrypt:
+ *     Encrypts all the pages in the UPL, within the specified range.
+ *
+ */
+void
+upl_encrypt(
+       upl_t                   upl,
+       upl_offset_t            crypt_offset,
+       upl_size_t              crypt_size)
+{
+       upl_size_t              upl_size, subupl_size=crypt_size;
+       upl_offset_t            offset_in_upl, subupl_offset=crypt_offset;
+       vm_object_t             upl_object;
+       vm_object_offset_t      upl_offset;
+       vm_page_t               page;
+       vm_object_t             shadow_object;
+       vm_object_offset_t      shadow_offset;
+       vm_object_offset_t      paging_offset;
+       vm_object_offset_t      base_offset;
+       int                     isVectorUPL = 0;
+       upl_t                   vector_upl = NULL;
+
+       if((isVectorUPL = vector_upl_is_valid(upl)))
+               vector_upl = upl;
+
+process_upl_to_encrypt:
+       if(isVectorUPL) {
+               crypt_size = subupl_size;
+               crypt_offset = subupl_offset;
+               upl =  vector_upl_subupl_byoffset(vector_upl, &crypt_offset, &crypt_size);
+               if(upl == NULL)
+                       panic("upl_encrypt: Accessing a sub-upl that doesn't exist\n");
+               subupl_size -= crypt_size;
+               subupl_offset += crypt_size;
        }
 
-       vm_object_unlock(shadow_object);
-       if(flags & UPL_COMMIT_FREE_ON_EMPTY) {
-               if((upl->flags & UPL_DEVICE_MEMORY) 
-                               || (queue_empty(&upl->map_object->memq))) {
-                       upl_dealloc(upl);
-               }
-       }
-       return KERN_SUCCESS;
-}
+#if DEVELOPMENT || DEBUG
+       upl_encrypt_upls++;
+       upl_encrypt_pages += crypt_size / PAGE_SIZE;
+#endif
+       upl_object = upl->map_object;
+       upl_offset = upl->offset;
+       upl_size = upl->size;
 
-uc_upl_abort_range(
-       upl_t                   upl, 
-       vm_offset_t             offset, 
-       vm_size_t               size,
-       int                     error) 
-{
-       vm_size_t               xfer_size = size;
-       vm_object_t             shadow_object = upl->map_object->shadow;
-       vm_object_t             object = upl->map_object;
-       vm_object_offset_t      target_offset;
-       vm_object_offset_t      page_offset;
-       int                     entry;
+       vm_object_lock(upl_object);
 
-       if(upl->flags & UPL_DEVICE_MEMORY) {
-               xfer_size = 0;
-       } else if ((offset + size) > upl->size) {
-               return KERN_FAILURE;
+       /*
+        * Find the VM object that contains the actual pages.
+        */
+       if (upl_object->pageout) {
+               shadow_object = upl_object->shadow;
+               /*
+                * The offset in the shadow object is actually also
+                * accounted for in upl->offset.  It possibly shouldn't be
+                * this way, but for now don't account for it twice.
+                */
+               shadow_offset = 0;
+               assert(upl_object->paging_offset == 0); /* XXX ? */
+               vm_object_lock(shadow_object);
+       } else {
+               shadow_object = upl_object;
+               shadow_offset = 0;
        }
 
+       paging_offset = shadow_object->paging_offset;
+       vm_object_paging_begin(shadow_object);
 
-       vm_object_lock(shadow_object);
+       if (shadow_object != upl_object)
+               vm_object_unlock(upl_object);
 
-       entry = offset/PAGE_SIZE;
-       target_offset = (vm_object_offset_t)offset;
-       while(xfer_size) {
-               vm_page_t       t,m;
-               upl_page_info_t *p;
-
-               if((t = vm_page_lookup(object, target_offset)) != NULL) {
-
-                       t->pageout = FALSE;
-                       page_offset = t->offset;
-                       VM_PAGE_FREE(t);
-                       t = VM_PAGE_NULL;
-                       m = vm_page_lookup(shadow_object, 
-                                       page_offset + object->shadow_offset);
-                  if(m != VM_PAGE_NULL) {
-                       vm_object_paging_end(m->object);
-                       vm_page_lock_queues();
-                       if(m->absent) {
-                               /* COPYOUT = FALSE case */
-                               /* check for error conditions which must */
-                               /* be passed back to the pages customer  */
-                               if(error & UPL_ABORT_RESTART) {
-                                       m->restart = TRUE;
-                                       m->absent = FALSE;
-                                       vm_object_absent_release(m->object);
-                                       m->page_error = KERN_MEMORY_ERROR;
-                                       m->error = TRUE;
-                               } else if(error & UPL_ABORT_UNAVAILABLE) {
-                                       m->restart = FALSE;
-                                       m->unusual = TRUE;
-                                       m->clustered = FALSE;
-                               } else if(error & UPL_ABORT_ERROR) {
-                                       m->restart = FALSE;
-                                       m->absent = FALSE;
-                                       vm_object_absent_release(m->object);
-                                       m->page_error = KERN_MEMORY_ERROR;
-                                       m->error = TRUE;
-                               } else if(error & UPL_ABORT_DUMP_PAGES) {
-                                       m->clustered = TRUE;    
-                               } else {
-                                       m->clustered = TRUE;
-                               }
-                               
 
-                               m->cleaning = FALSE;
-                               m->overwriting = FALSE;
-                               PAGE_WAKEUP_DONE(m);
-                               if(m->clustered) {
-                                       vm_page_free(m);
-                               } else {
-                                       vm_page_activate(m);
-                               }
+       base_offset = shadow_offset;
+       base_offset += upl_offset;
+       base_offset += crypt_offset;
+       base_offset -= paging_offset;
 
-                               vm_page_unlock_queues();
-                               target_offset += PAGE_SIZE_64;
-                               xfer_size -= PAGE_SIZE;
-                               entry++;
-                               continue;
-                       }
-                       /*                          
-                        * Handle the trusted pager throttle.
-                        */                     
-                       if (m->laundry) { 
-                               vm_page_laundry_count--;
-                               m->laundry = FALSE;  
-                               if (vm_page_laundry_count 
-                                               < vm_page_laundry_min) {
-                                       vm_page_laundry_min = 0;
-                                       thread_wakeup((event_t) 
-                                               &vm_page_laundry_count); 
-                               }                    
-                       }         
-                       if(m->pageout) {
-                               assert(m->busy);
-                               assert(m->wire_count == 1);
-                               m->pageout = FALSE;
-                               vm_page_unwire(m);
-                       }
-                       m->cleaning = FALSE;
-                       m->busy = FALSE;
-                       m->overwriting = FALSE;
-#if    MACH_PAGEMAP
-                       vm_external_state_clr(
-                               m->object->existence_map, m->offset);
-#endif /* MACH_PAGEMAP */
-                       if(error & UPL_ABORT_DUMP_PAGES) {
-                               vm_page_free(m);
-                               pmap_page_protect(m->phys_addr, VM_PROT_NONE);
-                       } else {
-                               PAGE_WAKEUP(m);
-                       }
-                       vm_page_unlock_queues();
+       assert(crypt_offset + crypt_size <= upl_size);
+
+       for (offset_in_upl = 0;
+            offset_in_upl < crypt_size;
+            offset_in_upl += PAGE_SIZE) {
+               page = vm_page_lookup(shadow_object,
+                                     base_offset + offset_in_upl);
+               if (page == VM_PAGE_NULL) {
+                       panic("upl_encrypt: "
+                             "no page for (obj=%p,off=%lld+%d)!\n",
+                             shadow_object,
+                             base_offset,
+                             offset_in_upl);
+               }
+               /*
+                * Disconnect the page from all pmaps, so that nobody can
+                * access it while it's encrypted.  After that point, all
+                * accesses to this page will cause a page fault and block
+                * while the page is busy being encrypted.  After the
+                * encryption completes, any access will cause a
+                * page fault and the page gets decrypted at that time.
+                */
+               pmap_disconnect(page->phys_page);
+               vm_page_encrypt(page, 0);
+
+               if (vm_object_lock_avoid(shadow_object)) {
+                       /*
+                        * Give vm_pageout_scan() a chance to convert more
+                        * pages from "clean-in-place" to "clean-and-free",
+                        * if it's interested in the same pages we selected
+                        * in this cluster.
+                        */
+                       vm_object_unlock(shadow_object);
+                       mutex_pause(2);
+                       vm_object_lock(shadow_object);
                }
-          }
-          target_offset += PAGE_SIZE_64;
-          xfer_size -= PAGE_SIZE;
-          entry++;
        }
+
+       vm_object_paging_end(shadow_object);
        vm_object_unlock(shadow_object);
-       if(error & UPL_ABORT_FREE_ON_EMPTY) {
-               if((upl->flags & UPL_DEVICE_MEMORY) 
-                               || (queue_empty(&upl->map_object->memq))) {
-                       upl_dealloc(upl);
-               }
+       
+       if(isVectorUPL && subupl_size)
+               goto process_upl_to_encrypt;
+}
+
+#else /* CRYPTO */
+void
+upl_encrypt(
+       __unused upl_t                  upl,
+       __unused upl_offset_t   crypt_offset,
+       __unused upl_size_t     crypt_size)
+{
+}
+
+void
+vm_page_encrypt(
+       __unused vm_page_t              page,
+       __unused vm_map_offset_t        kernel_mapping_offset)
+{
+} 
+
+void
+vm_page_decrypt(
+       __unused vm_page_t              page,
+       __unused vm_map_offset_t        kernel_mapping_offset)
+{
+}
+
+#endif /* CRYPTO */
+
+void
+vm_pageout_queue_steal(vm_page_t page, boolean_t queues_locked)
+{
+       boolean_t       pageout;
+
+       pageout = page->pageout;
+
+       page->list_req_pending = FALSE;
+       page->cleaning = FALSE;
+       page->pageout = FALSE;
+
+       if (!queues_locked) {
+               vm_page_lockspin_queues();
+       }
+
+       /*
+        * need to drop the laundry count...
+        * we may also need to remove it
+        * from the I/O paging queue...
+        * vm_pageout_throttle_up handles both cases
+        *
+        * the laundry and pageout_queue flags are cleared...
+        */
+       vm_pageout_throttle_up(page);
+
+       if (pageout == TRUE) {
+               /*
+                * toss the wire count we picked up
+                * when we intially set this page up
+                * to be cleaned...
+                */
+               vm_page_unwire(page, TRUE);
+       }
+       vm_page_steal_pageout_page++;
+
+       if (!queues_locked) {
+               vm_page_unlock_queues();
        }
-       return KERN_SUCCESS;
 }
 
-kern_return_t
-uc_upl_abort(
-       upl_t   upl,
-       int     error)
+upl_t
+vector_upl_create(vm_offset_t upl_offset)
 {
-       vm_object_t             object = NULL;
-       vm_object_t             shadow_object = NULL;
-       vm_object_offset_t      offset;
-       vm_object_offset_t      shadow_offset;
-       vm_object_offset_t      target_offset;
-       int                     i;
-       vm_page_t               t,m;
+       int     vector_upl_size  = sizeof(struct _vector_upl);
+       int i=0;
+       upl_t   upl;
+       vector_upl_t vector_upl = (vector_upl_t)kalloc(vector_upl_size);
+
+       upl = upl_create(0,UPL_VECTOR,0);
+       upl->vector_upl = vector_upl;
+       upl->offset = upl_offset;
+       vector_upl->size = 0;
+       vector_upl->offset = upl_offset;
+       vector_upl->invalid_upls=0;
+       vector_upl->num_upls=0;
+       vector_upl->pagelist = NULL;
+       
+       for(i=0; i < MAX_VECTOR_UPL_ELEMENTS ; i++) {
+               vector_upl->upl_iostates[i].size = 0;
+               vector_upl->upl_iostates[i].offset = 0;
+               
+       }
+       return upl;
+}
 
-       if(upl->flags & UPL_DEVICE_MEMORY) {
-               upl_dealloc(upl);
-               return KERN_SUCCESS;
+void
+vector_upl_deallocate(upl_t upl)
+{
+       if(upl) {
+               vector_upl_t vector_upl = upl->vector_upl;
+               if(vector_upl) {
+                       if(vector_upl->invalid_upls != vector_upl->num_upls)
+                               panic("Deallocating non-empty Vectored UPL\n");
+                       kfree(vector_upl->pagelist,(sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE)));
+                       vector_upl->invalid_upls=0;
+                       vector_upl->num_upls = 0;
+                       vector_upl->pagelist = NULL;
+                       vector_upl->size = 0;
+                       vector_upl->offset = 0;
+                       kfree(vector_upl, sizeof(struct _vector_upl));
+                       vector_upl = (vector_upl_t)0xdeadbeef;
+               }
+               else
+                       panic("vector_upl_deallocate was passed a non-vectored upl\n");
        }
-       object = upl->map_object;
+       else
+               panic("vector_upl_deallocate was passed a NULL upl\n");
+}
 
-       if(object == NULL) {
-               panic("upl_abort: upl object is not backed by an object");
-               return KERN_INVALID_ARGUMENT;
+boolean_t
+vector_upl_is_valid(upl_t upl)
+{
+       if(upl &&  ((upl->flags & UPL_VECTOR)==UPL_VECTOR)) {
+               vector_upl_t vector_upl = upl->vector_upl;
+               if(vector_upl == NULL || vector_upl == (vector_upl_t)0xdeadbeef || vector_upl == (vector_upl_t)0xfeedbeef)
+                       return FALSE;
+               else
+                       return TRUE;
        }
+       return FALSE;
+}
 
-       shadow_object = upl->map_object->shadow;
-       shadow_offset = upl->map_object->shadow_offset;
-       offset = 0;
-       vm_object_lock(shadow_object);
-       for(i = 0; i<(upl->size); i+=PAGE_SIZE, offset += PAGE_SIZE_64) {
-           if((t = vm_page_lookup(object,offset)) != NULL) {
-               target_offset = t->offset + shadow_offset;
-               if((m = vm_page_lookup(shadow_object, target_offset)) != NULL) {
-                       vm_object_paging_end(m->object);
-                       vm_page_lock_queues();
-                       if(m->absent) {
-                               /* COPYOUT = FALSE case */
-                               /* check for error conditions which must */
-                               /* be passed back to the pages customer  */
-                               if(error & UPL_ABORT_RESTART) {
-                                       m->restart = TRUE;
-                                       m->absent = FALSE;
-                                       vm_object_absent_release(m->object);
-                                       m->page_error = KERN_MEMORY_ERROR;
-                                       m->error = TRUE;
-                               } else if(error & UPL_ABORT_UNAVAILABLE) {
-                                       m->restart = FALSE;
-                                       m->unusual = TRUE;
-                                       m->clustered = FALSE;
-                               } else if(error & UPL_ABORT_ERROR) {
-                                       m->restart = FALSE;
-                                       m->absent = FALSE;
-                                       vm_object_absent_release(m->object);
-                                       m->page_error = KERN_MEMORY_ERROR;
-                                       m->error = TRUE;
-                               } else if(error & UPL_ABORT_DUMP_PAGES) {
-                                       m->clustered = TRUE;    
-                               } else {
-                                       m->clustered = TRUE;
+boolean_t
+vector_upl_set_subupl(upl_t upl,upl_t subupl, uint32_t io_size)
+{
+       if(vector_upl_is_valid(upl)) {          
+               vector_upl_t vector_upl = upl->vector_upl;
+               
+               if(vector_upl) {
+                       if(subupl) {
+                               if(io_size) {
+                                       if(io_size < PAGE_SIZE)
+                                               io_size = PAGE_SIZE;
+                                       subupl->vector_upl = (void*)vector_upl;
+                                       vector_upl->upl_elems[vector_upl->num_upls++] = subupl;
+                                       vector_upl->size += io_size;
+                                       upl->size += io_size;
                                }
-                               
-                               m->cleaning = FALSE;
-                               m->overwriting = FALSE;
-                               PAGE_WAKEUP_DONE(m);
-                               if(m->clustered) {
-                                       vm_page_free(m);
-                               } else {
-                                       vm_page_activate(m);
+                               else {
+                                       uint32_t i=0,invalid_upls=0;
+                                       for(i = 0; i < vector_upl->num_upls; i++) {
+                                               if(vector_upl->upl_elems[i] == subupl)
+                                                       break;
+                                       }
+                                       if(i == vector_upl->num_upls)
+                                               panic("Trying to remove sub-upl when none exists");
+                                       
+                                       vector_upl->upl_elems[i] = NULL;
+                                       invalid_upls = hw_atomic_add(&(vector_upl)->invalid_upls, 1); 
+                                       if(invalid_upls == vector_upl->num_upls)
+                                               return TRUE;
+                                       else 
+                                               return FALSE;
                                }
-                               vm_page_unlock_queues();
-                               continue;
-                       }
-                       /*                          
-                        * Handle the trusted pager throttle.
-                        */                     
-                       if (m->laundry) { 
-                               vm_page_laundry_count--;
-                               m->laundry = FALSE;  
-                               if (vm_page_laundry_count 
-                                               < vm_page_laundry_min) {
-                                       vm_page_laundry_min = 0;
-                                       thread_wakeup((event_t) 
-                                               &vm_page_laundry_count); 
-                               }                    
-                       }         
-                       if(m->pageout) {
-                               assert(m->busy);
-                               assert(m->wire_count == 1);
-                               m->pageout = FALSE;
-                               vm_page_unwire(m);
-                       }
-                       m->cleaning = FALSE;
-                       m->busy = FALSE;
-                       m->overwriting = FALSE;
-#if    MACH_PAGEMAP
-                       vm_external_state_clr(
-                               m->object->existence_map, m->offset);
-#endif /* MACH_PAGEMAP */
-                       if(error & UPL_ABORT_DUMP_PAGES) {
-                               vm_page_free(m);
-                               pmap_page_protect(m->phys_addr, VM_PROT_NONE);
-                       } else {
-                               PAGE_WAKEUP(m);
                        }
-                       vm_page_unlock_queues();
+                       else
+                               panic("vector_upl_set_subupl was passed a NULL upl element\n");
                }
-          }
+               else
+                       panic("vector_upl_set_subupl was passed a non-vectored upl\n");
        }
-       vm_object_unlock(shadow_object);
-       /* Remove all the pages from the map object so */
-       /* vm_pageout_object_terminate will work properly. */
-       while (!queue_empty(&upl->map_object->memq)) {
-               vm_page_t p;
+       else
+               panic("vector_upl_set_subupl was passed a NULL upl\n");
 
-               p = (vm_page_t) queue_first(&upl->map_object->memq);
+       return FALSE;
+}      
 
-               assert(p->private);
-               assert(p->pageout);
-               p->pageout = FALSE;
-               assert(!p->cleaning);
+void
+vector_upl_set_pagelist(upl_t upl)
+{
+       if(vector_upl_is_valid(upl)) {          
+               uint32_t i=0;
+               vector_upl_t vector_upl = upl->vector_upl;
 
-               VM_PAGE_FREE(p);
+               if(vector_upl) {
+                       vm_offset_t pagelist_size=0, cur_upl_pagelist_size=0;
+
+                       vector_upl->pagelist = (upl_page_info_array_t)kalloc(sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE));
+                       
+                       for(i=0; i < vector_upl->num_upls; i++) {
+                               cur_upl_pagelist_size = sizeof(struct upl_page_info) * vector_upl->upl_elems[i]->size/PAGE_SIZE;
+                               bcopy(UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(vector_upl->upl_elems[i]), (char*)vector_upl->pagelist + pagelist_size, cur_upl_pagelist_size);
+                               pagelist_size += cur_upl_pagelist_size;
+                               if(vector_upl->upl_elems[i]->highest_page > upl->highest_page)
+                                       upl->highest_page = vector_upl->upl_elems[i]->highest_page;
+                       }
+                       assert( pagelist_size == (sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE)) );
+               }
+               else
+                       panic("vector_upl_set_pagelist was passed a non-vectored upl\n");
        }
-       upl_dealloc(upl);
-       return KERN_SUCCESS;
+       else
+               panic("vector_upl_set_pagelist was passed a NULL upl\n");
+
 }
 
-/* an option on commit should be wire */
-kern_return_t
-uc_upl_commit(
-       upl_t           upl,
-       upl_page_info_t *page_list)
+upl_t
+vector_upl_subupl_byindex(upl_t upl, uint32_t index)
 {
-       if (upl->flags & UPL_DEVICE_MEMORY)
-               page_list = NULL;
-       if ((upl->flags & UPL_CLEAR_DIRTY) ||
-               (upl->flags & UPL_PAGE_SYNC_DONE)) {
-               vm_object_t     shadow_object = upl->map_object->shadow;
-               vm_object_t     object = upl->map_object;
-               vm_object_offset_t target_offset;
-               vm_size_t       xfer_end;
-
-               vm_page_t       t,m;
-
-               vm_object_lock(shadow_object);
+       if(vector_upl_is_valid(upl)) {          
+               vector_upl_t vector_upl = upl->vector_upl;
+               if(vector_upl) {
+                       if(index < vector_upl->num_upls)
+                               return vector_upl->upl_elems[index];
+               }
+               else
+                       panic("vector_upl_subupl_byindex was passed a non-vectored upl\n");
+       }
+       return NULL;
+}
 
-               target_offset = object->shadow_offset;
-               xfer_end = upl->size + object->shadow_offset;
-
-               while(target_offset < xfer_end) {
-                       if ((t = vm_page_lookup(object, 
-                               target_offset - object->shadow_offset))
-                               != NULL) {
-                               m = vm_page_lookup(
-                                       shadow_object, target_offset);
-                               if(m != VM_PAGE_NULL) {
-                                       if (upl->flags & UPL_CLEAR_DIRTY) {
-                                               pmap_clear_modify(m->phys_addr);
-                                               m->dirty = FALSE;
+upl_t
+vector_upl_subupl_byoffset(upl_t upl, upl_offset_t *upl_offset, upl_size_t *upl_size)
+{
+       if(vector_upl_is_valid(upl)) {          
+               uint32_t i=0;
+               vector_upl_t vector_upl = upl->vector_upl;
+
+               if(vector_upl) {
+                       upl_t subupl = NULL;
+                       vector_upl_iostates_t subupl_state;
+
+                       for(i=0; i < vector_upl->num_upls; i++) {
+                               subupl = vector_upl->upl_elems[i];
+                               subupl_state = vector_upl->upl_iostates[i];
+                               if( *upl_offset <= (subupl_state.offset + subupl_state.size - 1)) {
+                                       /* We could have been passed an offset/size pair that belongs
+                                        * to an UPL element that has already been committed/aborted.
+                                        * If so, return NULL.
+                                        */
+                                       if(subupl == NULL)
+                                               return NULL;
+                                       if((subupl_state.offset + subupl_state.size) < (*upl_offset + *upl_size)) {
+                                               *upl_size = (subupl_state.offset + subupl_state.size) - *upl_offset;
+                                               if(*upl_size > subupl_state.size)
+                                                       *upl_size = subupl_state.size;
                                        }
-                                       /* It is a part of the semantic of */
-                                       /* COPYOUT_FROM UPLs that a commit */
-                                       /* implies cache sync between the  */
-                                       /* vm page and the backing store   */
-                                       /* this can be used to strip the   */
-                                       /* precious bit as well as clean   */
-                                       if (upl->flags & UPL_PAGE_SYNC_DONE)
-                                               m->precious = FALSE;
-                               }
+                                       if(*upl_offset >= subupl_state.offset)
+                                               *upl_offset -= subupl_state.offset;
+                                       else if(i)
+                                               panic("Vector UPL offset miscalculation\n");
+                                       return subupl;
+                               }       
                        }
-                       target_offset += PAGE_SIZE_64;
                }
-               vm_object_unlock(shadow_object);
+               else
+                       panic("vector_upl_subupl_byoffset was passed a non-vectored UPL\n");
        }
-       if (page_list) {
-               vm_object_t     shadow_object = upl->map_object->shadow;
-               vm_object_t     object = upl->map_object;
-               vm_object_offset_t target_offset;
-               vm_size_t       xfer_end;
-               int             entry;
-
-               vm_page_t       t, m;
-               upl_page_info_t *p;
+       return NULL;
+}
 
-               vm_object_lock(shadow_object);
+void
+vector_upl_get_submap(upl_t upl, vm_map_t *v_upl_submap, vm_offset_t *submap_dst_addr)
+{
+       *v_upl_submap = NULL;
 
-               entry = 0;
-               target_offset = object->shadow_offset;
-               xfer_end = upl->size + object->shadow_offset;
+       if(vector_upl_is_valid(upl)) {          
+               vector_upl_t vector_upl = upl->vector_upl;
+               if(vector_upl) {
+                       *v_upl_submap = vector_upl->submap;
+                       *submap_dst_addr = vector_upl->submap_dst_addr;
+               }
+               else
+                       panic("vector_upl_get_submap was passed a non-vectored UPL\n");
+       }
+       else
+               panic("vector_upl_get_submap was passed a null UPL\n");
+}
 
-               while(target_offset < xfer_end) {
+void
+vector_upl_set_submap(upl_t upl, vm_map_t submap, vm_offset_t submap_dst_addr)
+{
+       if(vector_upl_is_valid(upl)) {          
+               vector_upl_t vector_upl = upl->vector_upl;
+               if(vector_upl) {
+                       vector_upl->submap = submap;
+                       vector_upl->submap_dst_addr = submap_dst_addr;
+               }
+               else
+                       panic("vector_upl_get_submap was passed a non-vectored UPL\n");
+       }
+       else
+               panic("vector_upl_get_submap was passed a NULL UPL\n");
+}
 
-                       if ((t = vm_page_lookup(object, 
-                               target_offset - object->shadow_offset))
-                               == NULL) {
-                               target_offset += PAGE_SIZE_64;
-                               entry++;
-                               continue;
+void
+vector_upl_set_iostate(upl_t upl, upl_t subupl, upl_offset_t offset, upl_size_t size)
+{
+       if(vector_upl_is_valid(upl)) {          
+               uint32_t i = 0;
+               vector_upl_t vector_upl = upl->vector_upl;
+
+               if(vector_upl) {
+                       for(i = 0; i < vector_upl->num_upls; i++) {
+                               if(vector_upl->upl_elems[i] == subupl)
+                                       break;
                        }
+                       
+                       if(i == vector_upl->num_upls)
+                               panic("setting sub-upl iostate when none exists");
 
-                       m = vm_page_lookup(shadow_object, target_offset);
-                       if(m != VM_PAGE_NULL) {
-                          p = &(page_list[entry]);
-                          if(page_list[entry].phys_addr &&
-                                               p->pageout && !m->pageout) {
-                                       vm_page_lock_queues();
-                                       m->busy = TRUE;
-                                       m->pageout = TRUE;
-                                       vm_page_wire(m);
-                                       vm_page_unlock_queues();
-                          } else if (page_list[entry].phys_addr &&
-                                               !p->pageout && m->pageout) {
-                                       vm_page_lock_queues();
-                                       m->pageout = FALSE;
-                                       m->absent = FALSE;
-                                       m->overwriting = FALSE;
-                                       vm_page_unwire(m);
-                                       PAGE_WAKEUP_DONE(m);
-                                       vm_page_unlock_queues();
-                          }
-                          page_list[entry].phys_addr = 0;
-                       }
-                       target_offset += PAGE_SIZE_64;
-                       entry++;
+                       vector_upl->upl_iostates[i].offset = offset;
+                       if(size < PAGE_SIZE)
+                               size = PAGE_SIZE;
+                       vector_upl->upl_iostates[i].size = size;
                }
-
-               vm_object_unlock(shadow_object);
+               else
+                       panic("vector_upl_set_iostate was passed a non-vectored UPL\n");
        }
-       upl_dealloc(upl);
-       return KERN_SUCCESS;
+       else
+               panic("vector_upl_set_iostate was passed a NULL UPL\n");
 }
 
-upl_t
-upl_create(
-       boolean_t       internal)
+void
+vector_upl_get_iostate(upl_t upl, upl_t subupl, upl_offset_t *offset, upl_size_t *size)
 {
-       upl_t   upl;
+       if(vector_upl_is_valid(upl)) {          
+               uint32_t i = 0;
+               vector_upl_t vector_upl = upl->vector_upl;
+
+               if(vector_upl) {
+                       for(i = 0; i < vector_upl->num_upls; i++) {
+                               if(vector_upl->upl_elems[i] == subupl)
+                                       break;
+                       }
+                       
+                       if(i == vector_upl->num_upls)
+                               panic("getting sub-upl iostate when none exists");
 
-       if(internal) {
-               upl = (upl_t)kalloc(sizeof(struct upl)
-                       + (sizeof(struct upl_page_info)*MAX_UPL_TRANSFER));
-       } else {
-               upl = (upl_t)kalloc(sizeof(struct upl));
+                       *offset = vector_upl->upl_iostates[i].offset;
+                       *size = vector_upl->upl_iostates[i].size;
+               }
+               else
+                       panic("vector_upl_get_iostate was passed a non-vectored UPL\n");
        }
-       upl->flags = 0;
-       upl->src_object = NULL;
-       upl->kaddr = (vm_offset_t)0;
-       upl->size = 0;
-       upl->map_object = NULL;
-       upl->ref_count = 1;
-       upl_lock_init(upl);
-#ifdef UBC_DEBUG
-       upl->ubc_alias1 = 0;
-       upl->ubc_alias2 = 0;
-#endif /* UBC_DEBUG */
-       return(upl);
+       else
+               panic("vector_upl_get_iostate was passed a NULL UPL\n");
 }
 
 void
-upl_destroy(
-       upl_t   upl)
+vector_upl_get_iostate_byindex(upl_t upl, uint32_t index, upl_offset_t *offset, upl_size_t *size)
 {
-
-#ifdef UBC_DEBUG
-       {
-               upl_t   upl_ele;
-               vm_object_lock(upl->map_object->shadow);
-               queue_iterate(&upl->map_object->shadow->uplq, 
-                                               upl_ele, upl_t, uplq) {
-                       if(upl_ele == upl) {
-                               queue_remove(&upl->map_object->shadow->uplq, 
-                                       upl_ele, upl_t, uplq);
-                               break;
+       if(vector_upl_is_valid(upl)) {          
+               vector_upl_t vector_upl = upl->vector_upl;
+               if(vector_upl) {
+                       if(index < vector_upl->num_upls) {
+                               *offset = vector_upl->upl_iostates[index].offset;
+                               *size = vector_upl->upl_iostates[index].size;
                        }
+                       else
+                               *offset = *size = 0;
                }
-               vm_object_unlock(upl->map_object->shadow);
-       }
-#endif /* UBC_DEBUG */
-       if(!(upl->flags & UPL_DEVICE_MEMORY))
-               vm_object_deallocate(upl->map_object);
-       if(upl->flags & UPL_INTERNAL) {
-               kfree((vm_offset_t)upl,
-                       sizeof(struct upl) + 
-                       (sizeof(struct upl_page_info) * MAX_UPL_TRANSFER));
-       } else {
-               kfree((vm_offset_t)upl, sizeof(struct upl));
+               else
+                       panic("vector_upl_get_iostate_byindex was passed a non-vectored UPL\n");
        }
+       else
+               panic("vector_upl_get_iostate_byindex was passed a NULL UPL\n");
 }
 
-vm_size_t
-upl_get_internal_pagelist_offset()
+upl_page_info_t *
+upl_get_internal_vectorupl_pagelist(upl_t upl)
 {
-       return sizeof(struct upl);
+       return ((vector_upl_t)(upl->vector_upl))->pagelist;
 }
 
-void
-upl_set_dirty(
-       upl_t   upl)
+void *
+upl_get_internal_vectorupl(upl_t upl)
+{
+       return upl->vector_upl;
+}
+
+vm_size_t
+upl_get_internal_pagelist_offset(void)
 {
-       upl->flags |= UPL_CLEAR_DIRTY;
+       return sizeof(struct upl);
 }
 
 void
 upl_clear_dirty(
-       upl_t   upl)
+       upl_t           upl,
+       boolean_t       value)
 {
-       upl->flags &= ~UPL_CLEAR_DIRTY;
+       if (value) {
+               upl->flags |= UPL_CLEAR_DIRTY;
+       } else {
+               upl->flags &= ~UPL_CLEAR_DIRTY;
+       }
 }
 
 
 #ifdef MACH_BSD
-boolean_t  upl_page_present(upl_page_info_t *upl, int index);
-boolean_t  upl_dirty_page(upl_page_info_t *upl, int index);
-boolean_t  upl_valid_page(upl_page_info_t *upl, int index);
-vm_offset_t  upl_phys_page(upl_page_info_t *upl, int index);
 
+boolean_t  upl_device_page(upl_page_info_t *upl)
+{
+       return(UPL_DEVICE_PAGE(upl));
+}
 boolean_t  upl_page_present(upl_page_info_t *upl, int index)
 {
        return(UPL_PAGE_PRESENT(upl, index));
 }
+boolean_t  upl_speculative_page(upl_page_info_t *upl, int index)
+{
+       return(UPL_SPECULATIVE_PAGE(upl, index));
+}
 boolean_t  upl_dirty_page(upl_page_info_t *upl, int index)
 {
        return(UPL_DIRTY_PAGE(upl, index));
@@ -3338,12 +7396,14 @@ boolean_t  upl_valid_page(upl_page_info_t *upl, int index)
 {
        return(UPL_VALID_PAGE(upl, index));
 }
-vm_offset_t  upl_phys_page(upl_page_info_t *upl, int index)
+ppnum_t  upl_phys_page(upl_page_info_t *upl, int index)
 {
-       return((vm_offset_t)UPL_PHYS_PAGE(upl, index));
+       return(UPL_PHYS_PAGE(upl, index));
 }
 
-void vm_countdirtypages(void)
+
+void
+vm_countdirtypages(void)
 {
        vm_page_t m;
        int dpages;
@@ -3364,12 +7424,44 @@ void vm_countdirtypages(void)
                if(m->pageout) pgopages++;
                if(m->precious) precpages++;
 
+               assert(m->object != kernel_object);
                m = (vm_page_t) queue_next(&m->pageq);
                if (m ==(vm_page_t )0) break;
 
        } while (!queue_end(&vm_page_queue_inactive,(queue_entry_t) m));
        vm_page_unlock_queues();
 
+       vm_page_lock_queues();
+       m = (vm_page_t) queue_first(&vm_page_queue_throttled);
+       do {
+               if (m ==(vm_page_t )0) break;
+
+               dpages++;
+               assert(m->dirty);
+               assert(!m->pageout);
+               assert(m->object != kernel_object);
+               m = (vm_page_t) queue_next(&m->pageq);
+               if (m ==(vm_page_t )0) break;
+
+       } while (!queue_end(&vm_page_queue_throttled,(queue_entry_t) m));
+       vm_page_unlock_queues();
+
+       vm_page_lock_queues();
+       m = (vm_page_t) queue_first(&vm_page_queue_zf);
+       do {
+               if (m ==(vm_page_t )0) break;
+
+               if(m->dirty) dpages++;
+               if(m->pageout) pgopages++;
+               if(m->precious) precpages++;
+
+               assert(m->object != kernel_object);
+               m = (vm_page_t) queue_next(&m->pageq);
+               if (m ==(vm_page_t )0) break;
+
+       } while (!queue_end(&vm_page_queue_zf,(queue_entry_t) m));
+       vm_page_unlock_queues();
+
        printf("IN Q: %d : %d : %d\n", dpages, pgopages, precpages);
 
        dpages=0;
@@ -3385,6 +7477,7 @@ void vm_countdirtypages(void)
                if(m->pageout) pgopages++;
                if(m->precious) precpages++;
 
+               assert(m->object != kernel_object);
                m = (vm_page_t) queue_next(&m->pageq);
                if(m == (vm_page_t )0) break;
 
@@ -3396,14 +7489,26 @@ void vm_countdirtypages(void)
 }
 #endif /* MACH_BSD */
 
-#ifdef UBC_DEBUG
-kern_return_t  upl_ubc_alias_set(upl_t upl, unsigned int alias1, unsigned int alias2)
+ppnum_t upl_get_highest_page(
+                            upl_t                      upl)
+{
+        return upl->highest_page;
+}
+
+upl_size_t upl_get_size(
+                            upl_t                      upl)
+{
+        return upl->size;
+}
+
+#if UPL_DEBUG
+kern_return_t  upl_ubc_alias_set(upl_t upl, uintptr_t alias1, uintptr_t alias2)
 {
        upl->ubc_alias1 = alias1;
        upl->ubc_alias2 = alias2;
        return KERN_SUCCESS;
 }
-int  upl_ubc_alias_get(upl_t upl, unsigned int * al, unsigned int * al2)
+int  upl_ubc_alias_get(upl_t upl, uintptr_t * al, uintptr_t * al2)
 {
        if(al)
                *al = upl->ubc_alias1;
@@ -3411,7 +7516,7 @@ int  upl_ubc_alias_get(upl_t upl, unsigned int * al, unsigned int * al2)
                *al2 = upl->ubc_alias2;
        return KERN_SUCCESS;
 }
-#endif /* UBC_DEBUG */
+#endif /* UPL_DEBUG */
 
 
 
@@ -3421,15 +7526,11 @@ int  upl_ubc_alias_get(upl_t upl, unsigned int * al, unsigned int * al2)
 #include <vm/vm_print.h>
 
 #define        printf  kdbprintf
-extern int     db_indent;
 void           db_pageout(void);
 
 void
 db_vm(void)
 {
-       extern int vm_page_gobble_count;
-       extern int vm_page_limbo_count, vm_page_limbo_real_count;
-       extern int vm_page_pin_count;
 
        iprintf("VM Statistics:\n");
        db_indent += 2;
@@ -3440,9 +7541,6 @@ db_vm(void)
                vm_page_free_count);
        printf("   wire  %5d  gobbl %5d\n",
               vm_page_wire_count, vm_page_gobble_count);
-       iprintf("laund %5d  limbo %5d  lim_r %5d   pin   %5d\n",
-               vm_page_laundry_count, vm_page_limbo_count,
-               vm_page_limbo_real_count, vm_page_pin_count);
        db_indent -= 2;
        iprintf("target:\n");
        db_indent += 2;
@@ -3451,34 +7549,18 @@ db_vm(void)
                vm_page_free_target);
        printf("   resrv %5d\n", vm_page_free_reserved);
        db_indent -= 2;
-
-       iprintf("burst:\n");
-       db_indent += 2;
-       iprintf("max   %5d  min   %5d  wait  %5d   empty %5d\n",
-                 vm_pageout_burst_max, vm_pageout_burst_min,
-                 vm_pageout_burst_wait, vm_pageout_empty_wait);
-       db_indent -= 2;
        iprintf("pause:\n");
-       db_indent += 2;
-       iprintf("count %5d  max   %5d\n",
-               vm_pageout_pause_count, vm_pageout_pause_max);
-#if    MACH_COUNTERS
-       iprintf("scan_continue called %8d\n", c_vm_pageout_scan_continue);
-#endif /* MACH_COUNTERS */
-       db_indent -= 2;
        db_pageout();
        db_indent -= 2;
 }
 
-void
-db_pageout(void)
-{
-       extern int c_limbo_page_free;
-       extern int c_limbo_convert;
 #if    MACH_COUNTERS
-       extern int c_laundry_pages_freed;
+extern int c_laundry_pages_freed;
 #endif /* MACH_COUNTERS */
 
+void
+db_pageout(void)
+{
        iprintf("Pageout Statistics:\n");
        db_indent += 2;
        iprintf("active %5d  inactv %5d\n",
@@ -3489,11 +7571,6 @@ db_pageout(void)
        iprintf("used   %5d  clean  %5d  dirty  %5d\n",
                vm_pageout_inactive_used, vm_pageout_inactive_clean,
                vm_pageout_inactive_dirty);
-       iprintf("pinned %5d  limbo  %5d  setup_limbo %5d  setup_unprep %5d\n",
-               vm_pageout_inactive_pinned, vm_pageout_inactive_limbo,
-               vm_pageout_setup_limbo, vm_pageout_setup_unprepped);
-       iprintf("limbo_page_free  %5d   limbo_convert  %5d\n",
-               c_limbo_page_free, c_limbo_convert);
 #if    MACH_COUNTERS
        iprintf("laundry_pages_freed %d\n", c_laundry_pages_freed);
 #endif /* MACH_COUNTERS */
@@ -3511,27 +7588,9 @@ db_pageout(void)
        iprintf("collisions   %5d   page_dirtied  %5d   page_freed  %5d\n",
                vm_pageout_target_collisions, vm_pageout_target_page_dirtied,
                vm_pageout_target_page_freed);
-       iprintf("page_pinned  %5d   page_limbo    %5d\n",
-               vm_pageout_target_page_pinned, vm_pageout_target_page_limbo);
        db_indent -= 2;
 #endif /* MACH_CLUSTER_STATS */
        db_indent -= 2;
 }
 
-#if MACH_CLUSTER_STATS
-unsigned long vm_pageout_cluster_dirtied = 0;
-unsigned long vm_pageout_cluster_cleaned = 0;
-unsigned long vm_pageout_cluster_collisions = 0;
-unsigned long vm_pageout_cluster_clusters = 0;
-unsigned long vm_pageout_cluster_conversions = 0;
-unsigned long vm_pageout_target_collisions = 0;
-unsigned long vm_pageout_target_page_dirtied = 0;
-unsigned long vm_pageout_target_page_freed = 0;
-unsigned long vm_pageout_target_page_pinned = 0;
-unsigned long vm_pageout_target_page_limbo = 0;
-#define CLUSTER_STAT(clause)   clause
-#else  /* MACH_CLUSTER_STATS */
-#define CLUSTER_STAT(clause)
-#endif /* MACH_CLUSTER_STATS */
-
 #endif /* MACH_KDB */