]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_fault.c
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / vm / vm_fault.c
index 0958f3cf09452218cca63a34b081252a536d9f07..234ec3e00bde52a18c68970d6b83d49c4c9eff38 100644 (file)
@@ -1,26 +1,31 @@
 /*
 /*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
  *
  *
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
  * 
  * 
- * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
- * 
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
+ * This file contains Original Code and/or Modifications of Original Code 
+ * as defined in and that are subject to the Apple Public Source License 
+ * Version 2.0 (the 'License'). You may not use this file except in 
+ * compliance with the License.  The rights granted to you under the 
+ * License may not be used to create, or enable the creation or 
+ * redistribution of, unlawful or unlicensed copies of an Apple operating 
+ * system, or to circumvent, violate, or enable the circumvention or 
+ * violation of, any terms of an Apple operating system software license 
+ * agreement.
+ *
+ * Please obtain a copy of the License at 
+ * http://www.opensource.apple.com/apsl/ and read it before using this 
  * file.
  * file.
- * 
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
+ *
+ * The Original Code and all software distributed under the License are 
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
+ * Please see the License for the specific language governing rights and 
  * limitations under the License.
  * limitations under the License.
- * 
- * @APPLE_LICENSE_HEADER_END@
+ *
+ * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
  */
 /*
  * @OSF_COPYRIGHT@
  *
  *     Page fault handling module.
  */
  *
  *     Page fault handling module.
  */
-#ifdef MACH_BSD
-/* remove after component interface available */
-extern int     vnode_pager_workaround;
-extern int     device_pager_workaround;
-#endif
 
 #include <mach_cluster_stats.h>
 #include <mach_pagemap.h>
 #include <mach_kdb.h>
 
 
 #include <mach_cluster_stats.h>
 #include <mach_pagemap.h>
 #include <mach_kdb.h>
 
-#include <vm/vm_fault.h>
+#include <mach/mach_types.h>
 #include <mach/kern_return.h>
 #include <mach/message.h>      /* for error codes */
 #include <mach/kern_return.h>
 #include <mach/message.h>      /* for error codes */
+#include <mach/vm_param.h>
+#include <mach/vm_behavior.h>
+#include <mach/memory_object.h>
+                               /* For memory_object_data_{request,unlock} */
+
+#include <kern/kern_types.h>
 #include <kern/host_statistics.h>
 #include <kern/counters.h>
 #include <kern/task.h>
 #include <kern/host_statistics.h>
 #include <kern/counters.h>
 #include <kern/task.h>
@@ -78,21 +84,22 @@ extern int  device_pager_workaround;
 #include <kern/sched_prim.h>
 #include <kern/host.h>
 #include <kern/xpr.h>
 #include <kern/sched_prim.h>
 #include <kern/host.h>
 #include <kern/xpr.h>
+#include <kern/mach_param.h>
+#include <kern/macro_help.h>
+#include <kern/zalloc.h>
+#include <kern/misc_protos.h>
+
 #include <ppc/proc_reg.h>
 #include <ppc/proc_reg.h>
+
+#include <vm/vm_fault.h>
 #include <vm/task_working_set.h>
 #include <vm/vm_map.h>
 #include <vm/vm_object.h>
 #include <vm/vm_page.h>
 #include <vm/task_working_set.h>
 #include <vm/vm_map.h>
 #include <vm/vm_object.h>
 #include <vm/vm_page.h>
+#include <vm/vm_kern.h>
 #include <vm/pmap.h>
 #include <vm/vm_pageout.h>
 #include <vm/pmap.h>
 #include <vm/vm_pageout.h>
-#include <mach/vm_param.h>
-#include <mach/vm_behavior.h>
-#include <mach/memory_object.h>
-                               /* For memory_object_data_{request,unlock} */
-#include <kern/mach_param.h>
-#include <kern/macro_help.h>
-#include <kern/zalloc.h>
-#include <kern/misc_protos.h>
+#include <vm/vm_protos.h>
 
 #include <sys/kdebug.h>
 
 
 #include <sys/kdebug.h>
 
@@ -101,11 +108,9 @@ extern int device_pager_workaround;
 
 #define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */
 
 
 #define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */
 
-int            vm_object_absent_max = 50;
+unsigned int   vm_object_absent_max = 50;
 
 int            vm_fault_debug = 0;
 
 int            vm_fault_debug = 0;
-boolean_t      vm_page_deactivate_behind = TRUE;
-
 
 #if    !VM_FAULT_STATIC_CONFIG
 boolean_t      vm_fault_dirty_handling = FALSE;
 
 #if    !VM_FAULT_STATIC_CONFIG
 boolean_t      vm_fault_dirty_handling = FALSE;
@@ -117,13 +122,14 @@ boolean_t software_reference_bits = TRUE;
 extern struct db_watchpoint *db_watchpoint_list;
 #endif /* MACH_KDB */
 
 extern struct db_watchpoint *db_watchpoint_list;
 #endif /* MACH_KDB */
 
+
 /* Forward declarations of internal routines. */
 extern kern_return_t vm_fault_wire_fast(
                                vm_map_t        map,
 /* Forward declarations of internal routines. */
 extern kern_return_t vm_fault_wire_fast(
                                vm_map_t        map,
-                               vm_offset_t     va,
+                               vm_map_offset_t va,
                                vm_map_entry_t  entry,
                                pmap_t          pmap,
                                vm_map_entry_t  entry,
                                pmap_t          pmap,
-                               vm_offset_t     pmap_addr);
+                               vm_map_offset_t pmap_addr);
 
 extern void vm_fault_continue(void);
 
 
 extern void vm_fault_continue(void);
 
@@ -205,13 +211,115 @@ struct {
 boolean_t vm_allow_clustered_pagein = FALSE;
 int vm_pagein_cluster_used = 0;
 
 boolean_t vm_allow_clustered_pagein = FALSE;
 int vm_pagein_cluster_used = 0;
 
+#define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0)
+
+
+boolean_t      vm_page_deactivate_behind = TRUE;
 /* 
  * Prepage default sizes given VM_BEHAVIOR_DEFAULT reference behavior 
  */
 /* 
  * Prepage default sizes given VM_BEHAVIOR_DEFAULT reference behavior 
  */
-int vm_default_ahead = 1;      /* Number of pages to prepage ahead */
-int vm_default_behind = 0;     /* Number of pages to prepage behind */
+int vm_default_ahead = 0;
+int vm_default_behind = MAX_UPL_TRANSFER;
+
+/*
+ *     vm_page_deactivate_behind
+ *
+ *     Determine if sequential access is in progress
+ *     in accordance with the behavior specified.  If
+ *     so, compute a potential page to deactive and
+ *     deactivate it.
+ *
+ *     The object must be locked.
+ */
+static
+boolean_t
+vm_fault_deactivate_behind(
+       vm_object_t             object,
+       vm_object_offset_t      offset,
+       vm_behavior_t           behavior)
+{
+       vm_page_t m;
+
+#if TRACEFAULTPAGE
+       dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */
+#endif
+
+       if (object == kernel_object) {
+               /*
+                * Do not deactivate pages from the kernel object: they
+                * are not intended to become pageable.
+                */
+               return FALSE;
+       }
+
+       switch (behavior) {
+       case VM_BEHAVIOR_RANDOM:
+               object->sequential = PAGE_SIZE_64;
+               m = VM_PAGE_NULL;
+               break;
+       case VM_BEHAVIOR_SEQUENTIAL:
+               if (offset &&
+                       object->last_alloc == offset - PAGE_SIZE_64) {
+                       object->sequential += PAGE_SIZE_64;
+                       m = vm_page_lookup(object, offset - PAGE_SIZE_64);
+               } else {
+                       object->sequential = PAGE_SIZE_64; /* reset */
+                       m = VM_PAGE_NULL;
+               }
+               break;
+       case VM_BEHAVIOR_RSEQNTL:
+               if (object->last_alloc &&
+                       object->last_alloc == offset + PAGE_SIZE_64) {
+                       object->sequential += PAGE_SIZE_64;
+                       m = vm_page_lookup(object, offset + PAGE_SIZE_64);
+               } else {
+                       object->sequential = PAGE_SIZE_64; /* reset */
+                       m = VM_PAGE_NULL;
+               }
+               break;
+       case VM_BEHAVIOR_DEFAULT:
+       default:
+               if (offset && 
+                       object->last_alloc == offset - PAGE_SIZE_64) {
+                       vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64;
+
+                       object->sequential += PAGE_SIZE_64;
+                       m = (offset >= behind &&
+                               object->sequential >= behind) ?
+                               vm_page_lookup(object, offset - behind) :
+                               VM_PAGE_NULL;
+               } else if (object->last_alloc &&
+                       object->last_alloc == offset + PAGE_SIZE_64) {
+                       vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64;
+
+                       object->sequential += PAGE_SIZE_64;
+                       m = (offset < -behind &&
+                               object->sequential >= behind) ?
+                               vm_page_lookup(object, offset + behind) :
+                               VM_PAGE_NULL;
+               } else {
+                       object->sequential = PAGE_SIZE_64;
+                       m = VM_PAGE_NULL;
+               }
+               break;
+       }
+
+       object->last_alloc = offset;
+
+       if (m) {
+               if (!m->busy) {
+                       vm_page_lock_queues();
+                       vm_page_deactivate(m);
+                       vm_page_unlock_queues();
+#if TRACEFAULTPAGE
+                       dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m);  /* (TEST/DEBUG) */
+#endif
+               }
+               return TRUE;
+       }
+       return FALSE;
+}
 
 
-#define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0)
 
 /*
  *     Routine:        vm_fault_page
 
 /*
  *     Routine:        vm_fault_page
@@ -266,8 +374,8 @@ vm_fault_page(
        vm_prot_t       fault_type,     /* What access is requested */
        boolean_t       must_be_resident,/* Must page be resident? */
        int             interruptible,  /* how may fault be interrupted? */
        vm_prot_t       fault_type,     /* What access is requested */
        boolean_t       must_be_resident,/* Must page be resident? */
        int             interruptible,  /* how may fault be interrupted? */
-       vm_object_offset_t lo_offset,   /* Map entry start */
-       vm_object_offset_t hi_offset,   /* Map entry end */
+       vm_map_offset_t lo_offset,      /* Map entry start */
+       vm_map_offset_t hi_offset,      /* Map entry end */
        vm_behavior_t   behavior,       /* Page reference behavior */
        /* Modifies in place: */
        vm_prot_t       *protection,    /* Protection for mapping */
        vm_behavior_t   behavior,       /* Page reference behavior */
        /* Modifies in place: */
        vm_prot_t       *protection,    /* Protection for mapping */
@@ -284,7 +392,7 @@ vm_fault_page(
                                         * it is a write fault and a full
                                         * page is provided */
        vm_map_t        map,
                                         * it is a write fault and a full
                                         * page is provided */
        vm_map_t        map,
-       vm_offset_t     vaddr)
+       __unused vm_map_offset_t        vaddr)
 {
        register
        vm_page_t               m;
 {
        register
        vm_page_t               m;
@@ -298,10 +406,8 @@ vm_fault_page(
        boolean_t               look_for_page;
        vm_prot_t               access_required = fault_type;
        vm_prot_t               wants_copy_flag;
        boolean_t               look_for_page;
        vm_prot_t               access_required = fault_type;
        vm_prot_t               wants_copy_flag;
-       vm_size_t               cluster_size, length;
-       vm_object_offset_t      cluster_offset;
-       vm_object_offset_t      cluster_start, cluster_end, paging_offset;
-       vm_object_offset_t      align_offset;
+       vm_object_size_t        length;
+       vm_object_offset_t      cluster_start, cluster_end;
        CLUSTER_STAT(int pages_at_higher_offsets;)
        CLUSTER_STAT(int pages_at_lower_offsets;)
        kern_return_t   wait_result;
        CLUSTER_STAT(int pages_at_higher_offsets;)
        CLUSTER_STAT(int pages_at_lower_offsets;)
        kern_return_t   wait_result;
@@ -500,8 +606,14 @@ vm_fault_page(
                        /*
                         *      If the page was pre-paged as part of a
                         *      cluster, record the fact.
                        /*
                         *      If the page was pre-paged as part of a
                         *      cluster, record the fact.
+                        *      If we were passed a valid pointer for
+                        *      "type_of_fault", than we came from
+                        *      vm_fault... we'll let it deal with
+                        *      this condition, since it
+                        *      needs to see m->clustered to correctly
+                        *      account the pageins.
                         */
                         */
-                       if (m->clustered) {
+                       if (type_of_fault == NULL && m->clustered) {
                                vm_pagein_cluster_used++;
                                m->clustered = FALSE;
                        }
                                vm_pagein_cluster_used++;
                                m->clustered = FALSE;
                        }
@@ -542,6 +654,30 @@ vm_fault_page(
                                continue;
                        }
 
                                continue;
                        }
 
+                       if (m->encrypted) {
+                               /*
+                                * ENCRYPTED SWAP:
+                                * the user needs access to a page that we
+                                * encrypted before paging it out.
+                                * Decrypt the page now.
+                                * Keep it busy to prevent anyone from
+                                * accessing it during the decryption.
+                                */
+                               m->busy = TRUE;
+                               vm_page_decrypt(m, 0);
+                               assert(object == m->object);
+                               assert(m->busy);
+                               PAGE_WAKEUP_DONE(m);
+
+                               /*
+                                * Retry from the top, in case
+                                * something changed while we were
+                                * decrypting.
+                                */
+                               continue;
+                       }
+                       ASSERT_PAGE_DECRYPTED(m);
+
                        /*
                         *      If the page is in error, give up now.
                         */
                        /*
                         *      If the page is in error, give up now.
                         */
@@ -611,10 +747,35 @@ vm_fault_page(
                                         * need to allocate a real page.
                                         */
                                        if (VM_PAGE_THROTTLED() ||
                                         * need to allocate a real page.
                                         */
                                        if (VM_PAGE_THROTTLED() ||
-                                           (real_m = vm_page_grab()) == VM_PAGE_NULL) {
-                                               vm_fault_cleanup(object, first_m);
-                                               thread_interrupt_level(interruptible_state);
-                                               return(VM_FAULT_MEMORY_SHORTAGE);
+                                           (real_m = vm_page_grab()) 
+                                                       == VM_PAGE_NULL) {
+                                               vm_fault_cleanup(
+                                                       object, first_m);
+                                               thread_interrupt_level(
+                                                       interruptible_state);
+                                               return(
+                                                  VM_FAULT_MEMORY_SHORTAGE);
+                                       }
+
+                                       /*
+                                        * are we protecting the system from
+                                        * backing store exhaustion.  If so
+                                        * sleep unless we are privileged.
+                                        */
+
+                                       if(vm_backing_store_low) {
+                                          if(!(current_task()->priv_flags 
+                                               & VM_BACKING_STORE_PRIV)) {
+                                               assert_wait((event_t) 
+                                                       &vm_backing_store_low, 
+                                                       THREAD_UNINT);
+                                               vm_fault_cleanup(object, 
+                                                                   first_m);
+                                               thread_block(THREAD_CONTINUE_NULL);
+                                               thread_interrupt_level(
+                                                       interruptible_state);
+                                               return(VM_FAULT_RETRY);
+                                          }
                                        }
 
 
                                        }
 
 
@@ -653,21 +814,24 @@ vm_fault_page(
                                        if (!no_zero_fill) {
                                                vm_object_unlock(object);
                                                vm_page_zero_fill(m);
                                        if (!no_zero_fill) {
                                                vm_object_unlock(object);
                                                vm_page_zero_fill(m);
+                                               vm_object_lock(object);
+
                                                if (type_of_fault)
                                                        *type_of_fault = DBG_ZERO_FILL_FAULT;
                                                VM_STAT(zero_fill_count++);
                                                if (type_of_fault)
                                                        *type_of_fault = DBG_ZERO_FILL_FAULT;
                                                VM_STAT(zero_fill_count++);
-
-                                               if (bumped_pagein == TRUE) {
-                                                       VM_STAT(pageins--);
-                                                       current_task()->pageins--;
-                                               }
-                                               vm_object_lock(object);
                                        }
                                        }
-                                       pmap_clear_modify(m->phys_page);
+                                       if (bumped_pagein == TRUE) {
+                                               VM_STAT(pageins--);
+                                               current_task()->pageins--;
+                                       }
                                        vm_page_lock_queues();
                                        VM_PAGE_QUEUES_REMOVE(m);
                                        m->page_ticket = vm_page_ticket;
                                        vm_page_lock_queues();
                                        VM_PAGE_QUEUES_REMOVE(m);
                                        m->page_ticket = vm_page_ticket;
-                                       if(m->object->size > 0x80000) {
+                                       assert(!m->laundry);
+                                       assert(m->object != kernel_object);
+                                       assert(m->pageq.next == NULL &&
+                                              m->pageq.prev == NULL);
+                                       if(m->object->size > 0x200000) {
                                                m->zero_fill = TRUE;
                                                /* depends on the queues lock */
                                                vm_zf_count += 1;
                                                m->zero_fill = TRUE;
                                                /* depends on the queues lock */
                                                vm_zf_count += 1;
@@ -1064,7 +1228,7 @@ no_clustering:
                                 * do not need to take the map lock.
                                 */
                                cluster_end = offset + PAGE_SIZE_64;
                                 * do not need to take the map lock.
                                 */
                                cluster_end = offset + PAGE_SIZE_64;
-                               tws_build_cluster((tws_hash_t)
+                               tws_build_cluster(
                                        current_task()->dynamic_working_set,
                                        object, &cluster_start,
                                        &cluster_end, 0x40000);
                                        current_task()->dynamic_working_set,
                                        object, &cluster_start,
                                        &cluster_end, 0x40000);
@@ -1084,7 +1248,7 @@ no_clustering:
                         */
 
                        if (type_of_fault)
                         */
 
                        if (type_of_fault)
-                               *type_of_fault = (length << 8) | DBG_PAGEIN_FAULT;
+                               *type_of_fault = ((int)length << 8) | DBG_PAGEIN_FAULT;
                        VM_STAT(pageins++);
                        current_task()->pageins++;
                        bumped_pagein = TRUE;
                        VM_STAT(pageins++);
                        current_task()->pageins++;
                        bumped_pagein = TRUE;
@@ -1125,7 +1289,7 @@ no_clustering:
                        if (rc != KERN_SUCCESS) {
                                if (rc != MACH_SEND_INTERRUPTED
                                    && vm_fault_debug)
                        if (rc != KERN_SUCCESS) {
                                if (rc != MACH_SEND_INTERRUPTED
                                    && vm_fault_debug)
-                                       printf("%s(0x%x, 0x%x, 0x%x, 0x%x) failed, rc=%d\n",
+                                       printf("%s(0x%x, 0x%xll, 0x%xll, 0x%x) failed, rc=%d\n",
                                                "memory_object_data_request",
                                                object->pager,
                                                cluster_start + object->paging_offset, 
                                                "memory_object_data_request",
                                                object->pager,
                                                cluster_start + object->paging_offset, 
@@ -1153,44 +1317,8 @@ no_clustering:
                                return((rc == MACH_SEND_INTERRUPTED) ?
                                        VM_FAULT_INTERRUPTED :
                                        VM_FAULT_MEMORY_ERROR);
                                return((rc == MACH_SEND_INTERRUPTED) ?
                                        VM_FAULT_INTERRUPTED :
                                        VM_FAULT_MEMORY_ERROR);
-                       } else {
-#ifdef notdefcdy
-                               tws_hash_line_t line;
-                               task_t          task;
-
-                               task = current_task();
-                               
-                               if((map != NULL) && 
-                                       (task->dynamic_working_set != 0)) 
-                                               && !(object->private)) {
-                                       vm_object_t     base_object;
-                                       vm_object_offset_t base_offset;
-                                       base_object = object;
-                                       base_offset = offset;
-                                       while(base_object->shadow) {
-                                               base_offset +=
-                                                 base_object->shadow_offset;
-                                               base_object =
-                                                 base_object->shadow;
-                                       }
-                                       if(tws_lookup
-                                               ((tws_hash_t)
-                                               task->dynamic_working_set,
-                                               base_offset, base_object,
-                                               &line) == KERN_SUCCESS) {
-                                               tws_line_signal((tws_hash_t)
-                                               task->dynamic_working_set, 
-                                                       map, line, vaddr);
-                                       }
-                               }
-#endif
                        }
                        
                        }
                        
-                       /*
-                        * Retry with same object/offset, since new data may
-                        * be in a different page (i.e., m is meaningless at
-                        * this point).
-                        */
                        vm_object_lock(object);
                        if ((interruptible != THREAD_UNINT) && 
                            (current_thread()->state & TH_ABORT)) {
                        vm_object_lock(object);
                        if ((interruptible != THREAD_UNINT) && 
                            (current_thread()->state & TH_ABORT)) {
@@ -1198,8 +1326,29 @@ no_clustering:
                                thread_interrupt_level(interruptible_state);
                                return(VM_FAULT_INTERRUPTED);
                        }
                                thread_interrupt_level(interruptible_state);
                                return(VM_FAULT_INTERRUPTED);
                        }
-                       if(m == VM_PAGE_NULL)
+                       if (m == VM_PAGE_NULL &&
+                           object->phys_contiguous) {
+                               /*
+                                * No page here means that the object we
+                                * initially looked up was "physically 
+                                * contiguous" (i.e. device memory).  However,
+                                * with Virtual VRAM, the object might not
+                                * be backed by that device memory anymore,
+                                * so we're done here only if the object is
+                                * still "phys_contiguous".
+                                * Otherwise, if the object is no longer
+                                * "phys_contiguous", we need to retry the
+                                * page fault against the object's new backing
+                                * store (different memory object).
+                                */
                                break;
                                break;
+                       }
+
+                       /*
+                        * Retry with same object/offset, since new data may
+                        * be in a different page (i.e., m is meaningless at
+                        * this point).
+                        */
                        continue;
                }
 
                        continue;
                }
 
@@ -1247,6 +1396,19 @@ no_clustering:
                        assert(m->object == object);
                        first_m = VM_PAGE_NULL;
 
                        assert(m->object == object);
                        first_m = VM_PAGE_NULL;
 
+                       if(m == VM_PAGE_NULL) {
+                               m = vm_page_grab();
+                               if (m == VM_PAGE_NULL) {
+                                       vm_fault_cleanup(
+                                               object, VM_PAGE_NULL);
+                                       thread_interrupt_level(
+                                               interruptible_state);
+                                       return(VM_FAULT_MEMORY_SHORTAGE);
+                               }
+                               vm_page_insert(
+                                       m, object, offset);
+                       }
+
                        if (object->shadow_severed) {
                                VM_PAGE_FREE(m);
                                vm_fault_cleanup(object, VM_PAGE_NULL);
                        if (object->shadow_severed) {
                                VM_PAGE_FREE(m);
                                vm_fault_cleanup(object, VM_PAGE_NULL);
@@ -1254,6 +1416,27 @@ no_clustering:
                                return VM_FAULT_MEMORY_ERROR;
                        }
 
                                return VM_FAULT_MEMORY_ERROR;
                        }
 
+                       /*
+                        * are we protecting the system from
+                        * backing store exhaustion.  If so
+                        * sleep unless we are privileged.
+                        */
+
+                       if(vm_backing_store_low) {
+                               if(!(current_task()->priv_flags 
+                                               & VM_BACKING_STORE_PRIV)) {
+                                       assert_wait((event_t) 
+                                               &vm_backing_store_low, 
+                                               THREAD_UNINT);
+                                       VM_PAGE_FREE(m);
+                                       vm_fault_cleanup(object, VM_PAGE_NULL);
+                                       thread_block(THREAD_CONTINUE_NULL);
+                                       thread_interrupt_level(
+                                               interruptible_state);
+                                       return(VM_FAULT_RETRY);
+                               }
+                       }
+
                        if (VM_PAGE_THROTTLED() ||
                            (m->fictitious && !vm_page_convert(m))) {
                                VM_PAGE_FREE(m);
                        if (VM_PAGE_THROTTLED() ||
                            (m->fictitious && !vm_page_convert(m))) {
                                VM_PAGE_FREE(m);
@@ -1266,19 +1449,23 @@ no_clustering:
                        if (!no_zero_fill) {
                                vm_object_unlock(object);
                                vm_page_zero_fill(m);
                        if (!no_zero_fill) {
                                vm_object_unlock(object);
                                vm_page_zero_fill(m);
+                               vm_object_lock(object);
+
                                if (type_of_fault)
                                        *type_of_fault = DBG_ZERO_FILL_FAULT;
                                VM_STAT(zero_fill_count++);
                                if (type_of_fault)
                                        *type_of_fault = DBG_ZERO_FILL_FAULT;
                                VM_STAT(zero_fill_count++);
-
-                               if (bumped_pagein == TRUE) {
-                                       VM_STAT(pageins--);
-                                       current_task()->pageins--;
-                               }
-                               vm_object_lock(object);
+                       }
+                       if (bumped_pagein == TRUE) {
+                               VM_STAT(pageins--);
+                               current_task()->pageins--;
                        }
                        vm_page_lock_queues();
                        VM_PAGE_QUEUES_REMOVE(m);
                        }
                        vm_page_lock_queues();
                        VM_PAGE_QUEUES_REMOVE(m);
-                       if(m->object->size > 0x80000) {
+                       assert(!m->laundry);
+                       assert(m->object != kernel_object);
+                       assert(m->pageq.next == NULL &&
+                              m->pageq.prev == NULL);
+                       if(m->object->size > 0x200000) {
                                m->zero_fill = TRUE;
                                /* depends on the queues lock */
                                vm_zf_count += 1;
                                m->zero_fill = TRUE;
                                /* depends on the queues lock */
                                vm_zf_count += 1;
@@ -1302,7 +1489,9 @@ no_clustering:
                        m->inactive = TRUE;
                        vm_page_inactive_count++;
                        vm_page_unlock_queues();
                        m->inactive = TRUE;
                        vm_page_inactive_count++;
                        vm_page_unlock_queues();
+#if 0
                        pmap_clear_modify(m->phys_page);
                        pmap_clear_modify(m->phys_page);
+#endif
                        break;
                }
                else {
                        break;
                }
                else {
@@ -1349,6 +1538,15 @@ no_clustering:
        }
 #endif /* EXTRA_ASSERTIONS */
 
        }
 #endif /* EXTRA_ASSERTIONS */
 
+       /*
+        * ENCRYPTED SWAP:
+        * If we found a page, we must have decrypted it before we
+        * get here...
+        */
+       if (m != VM_PAGE_NULL) {
+               ASSERT_PAGE_DECRYPTED(m);
+       }
+
        XPR(XPR_VM_FAULT,
        "vm_f_page: FOUND obj 0x%X, off 0x%X, page 0x%X, 1_obj 0x%X, 1_m 0x%X\n",
                (integer_t)object, offset, (integer_t)m,
        XPR(XPR_VM_FAULT,
        "vm_f_page: FOUND obj 0x%X, off 0x%X, page 0x%X, 1_obj 0x%X, 1_m 0x%X\n",
                (integer_t)object, offset, (integer_t)m,
@@ -1374,6 +1572,27 @@ no_clustering:
 
                        assert(!must_be_resident);
 
 
                        assert(!must_be_resident);
 
+                       /*
+                        * are we protecting the system from
+                        * backing store exhaustion.  If so
+                        * sleep unless we are privileged.
+                        */
+
+                       if(vm_backing_store_low) {
+                               if(!(current_task()->priv_flags 
+                                               & VM_BACKING_STORE_PRIV)) {
+                                       assert_wait((event_t) 
+                                               &vm_backing_store_low, 
+                                               THREAD_UNINT);
+                                       RELEASE_PAGE(m);
+                                       vm_fault_cleanup(object, first_m);
+                                       thread_block(THREAD_CONTINUE_NULL);
+                                       thread_interrupt_level(
+                                               interruptible_state);
+                                       return(VM_FAULT_RETRY);
+                               }
+                       }
+
                        /*
                         *      If we try to collapse first_object at this
                         *      point, we may deadlock when we try to get
                        /*
                         *      If we try to collapse first_object at this
                         *      point, we may deadlock when we try to get
@@ -1418,12 +1637,12 @@ no_clustering:
                         *
                         *      XXXO If we know that only one map has
                         *      access to this page, then we could
                         *
                         *      XXXO If we know that only one map has
                         *      access to this page, then we could
-                        *      avoid the pmap_page_protect() call.
+                        *      avoid the pmap_disconnect() call.
                         */
 
                        vm_page_lock_queues();
                        assert(!m->cleaning);
                         */
 
                        vm_page_lock_queues();
                        assert(!m->cleaning);
-                       pmap_page_protect(m->phys_page, VM_PROT_NONE);
+                       pmap_disconnect(m->phys_page);
                        vm_page_deactivate(m);
                        copy_m->dirty = TRUE;
                        /*
                        vm_page_deactivate(m);
                        copy_m->dirty = TRUE;
                        /*
@@ -1464,7 +1683,7 @@ no_clustering:
                         */     
 
                        vm_object_paging_end(object); 
                         */     
 
                        vm_object_paging_end(object); 
-                       vm_object_collapse(object);
+                       vm_object_collapse(object, offset, TRUE);
                        vm_object_paging_begin(object);
 
                }
                        vm_object_paging_begin(object);
 
                }
@@ -1559,6 +1778,12 @@ no_clustering:
                                copy_object->ref_count--;
                                assert(copy_object->ref_count > 0);
                                copy_m = vm_page_lookup(copy_object, copy_offset);
                                copy_object->ref_count--;
                                assert(copy_object->ref_count > 0);
                                copy_m = vm_page_lookup(copy_object, copy_offset);
+                               /*
+                                * ENCRYPTED SWAP:
+                                * it's OK if the "copy_m" page is encrypted,
+                                * because we're not moving it nor handling its
+                                * contents.
+                                */
                                if (copy_m != VM_PAGE_NULL && copy_m->busy) {
                                        PAGE_ASSERT_WAIT(copy_m, interruptible);
                                        vm_object_unlock(copy_object);
                                if (copy_m != VM_PAGE_NULL && copy_m->busy) {
                                        PAGE_ASSERT_WAIT(copy_m, interruptible);
                                        vm_object_unlock(copy_object);
@@ -1584,6 +1809,31 @@ no_clustering:
                         * We must copy the page to the copy object.
                         */
 
                         * We must copy the page to the copy object.
                         */
 
+                       /*
+                        * are we protecting the system from
+                        * backing store exhaustion.  If so
+                        * sleep unless we are privileged.
+                        */
+
+                       if(vm_backing_store_low) {
+                               if(!(current_task()->priv_flags 
+                                               & VM_BACKING_STORE_PRIV)) {
+                                       assert_wait((event_t) 
+                                               &vm_backing_store_low, 
+                                               THREAD_UNINT);
+                                       RELEASE_PAGE(m);
+                                       VM_OBJ_RES_DECR(copy_object);
+                                       copy_object->ref_count--;
+                                       assert(copy_object->ref_count > 0);
+                                       vm_object_unlock(copy_object);
+                                       vm_fault_cleanup(object, first_m);
+                                       thread_block(THREAD_CONTINUE_NULL);
+                                       thread_interrupt_level(
+                                               interruptible_state);
+                                       return(VM_FAULT_RETRY);
+                               }
+                       }
+
                        /*
                         *      Allocate a page for the copy
                         */
                        /*
                         *      Allocate a page for the copy
                         */
@@ -1614,7 +1864,7 @@ no_clustering:
 
                        vm_page_lock_queues();
                        assert(!m->cleaning);
 
                        vm_page_lock_queues();
                        assert(!m->cleaning);
-                       pmap_page_protect(m->phys_page, VM_PROT_NONE);
+                       pmap_disconnect(m->phys_page);
                        copy_m->dirty = TRUE;
                        vm_page_unlock_queues();
 
                        copy_m->dirty = TRUE;
                        vm_page_unlock_queues();
 
@@ -1729,37 +1979,22 @@ no_clustering:
         *      mark read-only data as dirty.]
         */
 
         *      mark read-only data as dirty.]
         */
 
+
+       if(m != VM_PAGE_NULL) {
 #if    !VM_FAULT_STATIC_CONFIG
 #if    !VM_FAULT_STATIC_CONFIG
-       if (vm_fault_dirty_handling && (*protection & VM_PROT_WRITE) && 
-                       (m != VM_PAGE_NULL)) {
-               m->dirty = TRUE;
-       }
-#endif
-#if TRACEFAULTPAGE
-       dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_page_deactivate_behind);  /* (TEST/DEBUG) */
-#endif
-       if (vm_page_deactivate_behind) {
-               if (offset && /* don't underflow */
-                       (object->last_alloc == (offset - PAGE_SIZE_64))) {
-                       m = vm_page_lookup(object, object->last_alloc);
-                       if ((m != VM_PAGE_NULL) && !m->busy) {
-                               vm_page_lock_queues();
-                               vm_page_deactivate(m);
-                               vm_page_unlock_queues();
-                       }
-#if TRACEFAULTPAGE
-                       dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m);  /* (TEST/DEBUG) */
+               if (vm_fault_dirty_handling && (*protection & VM_PROT_WRITE))
+                       m->dirty = TRUE;
 #endif
 #endif
-               }
-               object->last_alloc = offset;
+               if (vm_page_deactivate_behind)
+                       vm_fault_deactivate_behind(object, offset, behavior);
+       } else {
+               vm_object_unlock(object);
        }
        }
+       thread_interrupt_level(interruptible_state);
+
 #if TRACEFAULTPAGE
        dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0);       /* (TEST/DEBUG) */
 #endif
 #if TRACEFAULTPAGE
        dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0);       /* (TEST/DEBUG) */
 #endif
-       thread_interrupt_level(interruptible_state);
-       if(*result_page == VM_PAGE_NULL) {
-               vm_object_unlock(object);
-       }
        return(VM_FAULT_SUCCESS);
 
 #if 0
        return(VM_FAULT_SUCCESS);
 
 #if 0
@@ -1779,6 +2014,96 @@ no_clustering:
 #undef RELEASE_PAGE
 }
 
 #undef RELEASE_PAGE
 }
 
+/*
+ *     Routine:        vm_fault_tws_insert
+ *     Purpose:
+ *             Add fault information to the task working set.
+ *     Implementation:
+ *             We always insert the base object/offset pair
+ *             rather the actual object/offset.
+ *     Assumptions:
+ *             Map and real_map locked.
+ *             Object locked and referenced.
+ *     Returns:
+ *             TRUE if startup file should be written.
+ *             With object locked and still referenced.
+ *             But we may drop the object lock temporarily.
+ */
+static boolean_t
+vm_fault_tws_insert(
+       vm_map_t map,
+       vm_map_t real_map,
+       vm_map_offset_t vaddr,
+       vm_object_t object,
+       vm_object_offset_t offset)
+{
+       tws_hash_line_t line;
+       task_t          task;
+       kern_return_t   kr;
+       boolean_t       result = FALSE;
+
+       /* Avoid possible map lock deadlock issues */
+       if (map == kernel_map || map == kalloc_map ||
+           real_map == kernel_map || real_map == kalloc_map)
+               return result;
+
+       task = current_task();
+       if (task->dynamic_working_set != 0) {
+               vm_object_t     base_object;
+               vm_object_t     base_shadow;
+               vm_object_offset_t base_offset;
+               base_object = object;
+               base_offset = offset;
+               while ((base_shadow = base_object->shadow)) {
+                       vm_object_lock(base_shadow);
+                       vm_object_unlock(base_object);
+                       base_offset +=
+                               base_object->shadow_offset;
+                       base_object = base_shadow;
+               }
+               kr = tws_lookup(
+                       task->dynamic_working_set,
+                       base_offset, base_object, 
+                       &line);
+               if (kr == KERN_OPERATION_TIMED_OUT){
+                       result = TRUE;
+                       if (base_object != object) {
+                               vm_object_unlock(base_object);
+                               vm_object_lock(object);
+                       }
+               } else if (kr != KERN_SUCCESS) {
+                       if(base_object != object)
+                               vm_object_reference_locked(base_object);
+                       kr = tws_insert(
+                                  task->dynamic_working_set,
+                                  base_offset, base_object,
+                                  vaddr, real_map);
+                       if(base_object != object) {
+                               vm_object_unlock(base_object);
+                               vm_object_deallocate(base_object);
+                       }
+                       if(kr == KERN_NO_SPACE) {
+                               if (base_object == object)
+                                       vm_object_unlock(object);
+                               tws_expand_working_set(
+                                  task->dynamic_working_set, 
+                                  TWS_HASH_LINE_COUNT,
+                                  FALSE);
+                               if (base_object == object)
+                                       vm_object_lock(object);
+                       } else if(kr == KERN_OPERATION_TIMED_OUT) {
+                               result = TRUE;
+                       }
+                       if(base_object != object)
+                               vm_object_lock(object);
+               } else if (base_object != object) {
+                       vm_object_unlock(base_object);
+                       vm_object_lock(object);
+               }
+       }
+       return result;
+}
+
 /*
  *     Routine:        vm_fault
  *     Purpose:
 /*
  *     Routine:        vm_fault
  *     Purpose:
@@ -1793,15 +2118,17 @@ no_clustering:
  *             and deallocated when leaving vm_fault.
  */
 
  *             and deallocated when leaving vm_fault.
  */
 
+extern int _map_enter_debug;
+
 kern_return_t
 vm_fault(
        vm_map_t        map,
 kern_return_t
 vm_fault(
        vm_map_t        map,
-       vm_offset_t     vaddr,
+       vm_map_offset_t vaddr,
        vm_prot_t       fault_type,
        boolean_t       change_wiring,
        int             interruptible,
        pmap_t          caller_pmap,
        vm_prot_t       fault_type,
        boolean_t       change_wiring,
        int             interruptible,
        pmap_t          caller_pmap,
-       vm_offset_t     caller_pmap_addr)
+       vm_map_offset_t caller_pmap_addr)
 {
        vm_map_version_t        version;        /* Map version for verificiation */
        boolean_t               wired;          /* Should mapping be wired down? */
 {
        vm_map_version_t        version;        /* Map version for verificiation */
        boolean_t               wired;          /* Should mapping be wired down? */
@@ -1809,7 +2136,7 @@ vm_fault(
        vm_object_offset_t      offset;         /* Top-level offset */
        vm_prot_t               prot;           /* Protection for mapping */
        vm_behavior_t           behavior;       /* Expected paging behavior */
        vm_object_offset_t      offset;         /* Top-level offset */
        vm_prot_t               prot;           /* Protection for mapping */
        vm_behavior_t           behavior;       /* Expected paging behavior */
-       vm_object_offset_t      lo_offset, hi_offset;
+       vm_map_offset_t         lo_offset, hi_offset;
        vm_object_t             old_copy_object; /* Saved copy object */
        vm_page_t               result_page;    /* Result of vm_fault_page */
        vm_page_t               top_page;       /* Placeholder page */
        vm_object_t             old_copy_object; /* Saved copy object */
        vm_page_t               result_page;    /* Result of vm_fault_page */
        vm_page_t               top_page;       /* Placeholder page */
@@ -1817,7 +2144,7 @@ vm_fault(
 
        register
        vm_page_t               m;      /* Fast access to result_page */
 
        register
        vm_page_t               m;      /* Fast access to result_page */
-       kern_return_t           error_code;     /* page error reasons */
+       kern_return_t           error_code = 0; /* page error reasons */
        register
        vm_object_t             cur_object;
        register
        register
        vm_object_t             cur_object;
        register
@@ -1825,17 +2152,15 @@ vm_fault(
        vm_page_t               cur_m;
        vm_object_t             new_object;
        int                     type_of_fault;
        vm_page_t               cur_m;
        vm_object_t             new_object;
        int                     type_of_fault;
-       vm_map_t                pmap_map = map;
+       vm_map_t                real_map = map;
        vm_map_t                original_map = map;
        pmap_t                  pmap = NULL;
        vm_map_t                original_map = map;
        pmap_t                  pmap = NULL;
-       boolean_t               funnel_set = FALSE;
-       funnel_t                *curflock;
-       thread_t                cur_thread;
        boolean_t               interruptible_state;
        unsigned int            cache_attr;
        int                     write_startup_file = 0;
        boolean_t               interruptible_state;
        unsigned int            cache_attr;
        int                     write_startup_file = 0;
-       vm_prot_t               full_fault_type;
-       
+       boolean_t               need_activation;
+       vm_prot_t               original_fault_type;
+
 
        KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 0)) | DBG_FUNC_START,
                              vaddr,
 
        KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 0)) | DBG_FUNC_START,
                              vaddr,
@@ -1844,13 +2169,15 @@ vm_fault(
                              0,
                              0);
 
                              0,
                              0);
 
-       /* at present we do not fully check for execute permission */
-       /* we generally treat it is read except in certain device  */
-       /* memory settings */
-       full_fault_type = fault_type;
-       if(fault_type & VM_PROT_EXECUTE) {
-               fault_type &= ~VM_PROT_EXECUTE;
-               fault_type |= VM_PROT_READ;
+       if (get_preemption_level() != 0) {
+               KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 0)) | DBG_FUNC_END,
+                                     vaddr,
+                                     0,
+                                     KERN_FAILURE,
+                                     0,
+                                     0);
+
+               return (KERN_FAILURE);
        }
 
        interruptible_state = thread_interrupt_level(interruptible);
        }
 
        interruptible_state = thread_interrupt_level(interruptible);
@@ -1865,31 +2192,25 @@ vm_fault(
        VM_STAT(faults++);
        current_task()->faults++;
 
        VM_STAT(faults++);
        current_task()->faults++;
 
-       /*
-        * drop funnel if it is already held. Then restore while returning
-        */
-       cur_thread = current_thread();
+       original_fault_type = fault_type;
 
 
-       if ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED) {
-               funnel_set = TRUE;
-               curflock = cur_thread->funnel_lock;
-               thread_funnel_set( curflock , FALSE);
-       }
-         
     RetryFault: ;
 
        /*
         *      Find the backing store object and offset into
         *      it to begin the search.
         */
     RetryFault: ;
 
        /*
         *      Find the backing store object and offset into
         *      it to begin the search.
         */
+       fault_type = original_fault_type;
        map = original_map;
        vm_map_lock_read(map);
        kr = vm_map_lookup_locked(&map, vaddr, fault_type, &version,
                                &object, &offset,
                                &prot, &wired,
        map = original_map;
        vm_map_lock_read(map);
        kr = vm_map_lookup_locked(&map, vaddr, fault_type, &version,
                                &object, &offset,
                                &prot, &wired,
-                               &behavior, &lo_offset, &hi_offset, &pmap_map);
+                               &behavior, &lo_offset, &hi_offset, &real_map);
 
 
-       pmap = pmap_map->pmap;
+//if (_map_enter_debug)printf("vm_map_lookup_locked(map=0x%x, addr=0x%llx, prot=%d wired=%d) = %d\n", map, vaddr, prot, wired, kr);
+
+       pmap = real_map->pmap;
 
        if (kr != KERN_SUCCESS) {
                vm_map_unlock_read(map);
 
        if (kr != KERN_SUCCESS) {
                vm_map_unlock_read(map);
@@ -1957,15 +2278,15 @@ vm_fault(
        while (TRUE) {
                m = vm_page_lookup(cur_object, cur_offset);
                if (m != VM_PAGE_NULL) {
        while (TRUE) {
                m = vm_page_lookup(cur_object, cur_offset);
                if (m != VM_PAGE_NULL) {
-                       if (m->busy) {
+                       if (m->busy) {
                                wait_result_t   result;
 
                                if (object != cur_object)
                                        vm_object_unlock(object);
 
                                vm_map_unlock_read(map);
                                wait_result_t   result;
 
                                if (object != cur_object)
                                        vm_object_unlock(object);
 
                                vm_map_unlock_read(map);
-                               if (pmap_map != map)
-                                       vm_map_unlock(pmap_map);
+                               if (real_map != map)
+                                       vm_map_unlock(real_map);
 
 #if    !VM_FAULT_STATIC_CONFIG
                                if (!vm_fault_interruptible)
 
 #if    !VM_FAULT_STATIC_CONFIG
                                if (!vm_fault_interruptible)
@@ -1995,6 +2316,38 @@ vm_fault(
                                break;
                        }
 
                                break;
                        }
 
+                       if (m->encrypted) {
+                               /*
+                                * ENCRYPTED SWAP:
+                                * We've soft-faulted (because it's not in the page
+                                * table) on an encrypted page.
+                                * Keep the page "busy" so that noone messes with
+                                * it during the decryption.
+                                * Release the extra locks we're holding, keep only
+                                * the page's VM object lock.
+                                */
+                               m->busy = TRUE;
+                               if (object != cur_object) {
+                                       vm_object_unlock(object);
+                               }
+                               vm_map_unlock_read(map);
+                               if (real_map != map) 
+                                       vm_map_unlock(real_map);
+
+                               vm_page_decrypt(m, 0);
+
+                               assert(m->busy);
+                               PAGE_WAKEUP_DONE(m);
+                               vm_object_unlock(m->object);
+
+                               /*
+                                * Retry from the top, in case anything
+                                * changed while we were decrypting...
+                                */
+                               goto RetryFault;
+                       }
+                       ASSERT_PAGE_DECRYPTED(m);
+
                        /*
                         *      Two cases of map in faults:
                         *          - At top level w/o copy object.
                        /*
                         *      Two cases of map in faults:
                         *          - At top level w/o copy object.
@@ -2007,6 +2360,7 @@ vm_fault(
                                goto FastMapInFault;
 
                        if ((fault_type & VM_PROT_WRITE) == 0) {
                                goto FastMapInFault;
 
                        if ((fault_type & VM_PROT_WRITE) == 0) {
+                               boolean_t sequential;
 
                                prot &= ~VM_PROT_WRITE;
 
 
                                prot &= ~VM_PROT_WRITE;
 
@@ -2024,8 +2378,6 @@ vm_fault(
 FastMapInFault:
                                m->busy = TRUE;
 
 FastMapInFault:
                                m->busy = TRUE;
 
-                               vm_object_paging_begin(object);
-
 FastPmapEnter:
                                /*
                                 *      Check a couple of global reasons to
 FastPmapEnter:
                                /*
                                 *      Check a couple of global reasons to
@@ -2047,10 +2399,31 @@ FastPmapEnter:
 #endif /* MACH_KDB */
 #endif /* STATIC_CONFIG */
                                cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
 #endif /* MACH_KDB */
 #endif /* STATIC_CONFIG */
                                cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
-                               if ((m->no_isync == TRUE) || 
-                                          (cache_attr != VM_WIMG_DEFAULT)) {
-                                       pmap_sync_caches_phys(m->phys_page);
+
+                               sequential = FALSE;
+                               need_activation = FALSE;
+
+                               if (m->no_isync == TRUE) {
                                        m->no_isync = FALSE;
                                        m->no_isync = FALSE;
+                                       pmap_sync_page_data_phys(m->phys_page);
+
+                                       if ((type_of_fault == DBG_CACHE_HIT_FAULT) && m->clustered) {
+                                               /*
+                                                * found it in the cache, but this
+                                                * is the first fault-in of the page (no_isync == TRUE)
+                                                * so it must have come in as part of
+                                                * a cluster... account 1 pagein against it
+                                                */
+                                               VM_STAT(pageins++);
+                                               current_task()->pageins++;
+                                               type_of_fault = DBG_PAGEIN_FAULT;
+                                               sequential = TRUE;
+                                       }
+                                       if (m->clustered)
+                                               need_activation = TRUE;
+
+                               } else if (cache_attr != VM_WIMG_DEFAULT) {
+                                       pmap_sync_page_attributes_phys(m->phys_page);
                                }
 
                                if(caller_pmap) {
                                }
 
                                if(caller_pmap) {
@@ -2063,7 +2436,7 @@ FastPmapEnter:
                                }
 
                                /*
                                }
 
                                /*
-                                *      Grab the queues lock to manipulate
+                                *      Hold queues lock to manipulate
                                 *      the page queues.  Change wiring
                                 *      case is obvious.  In soft ref bits
                                 *      case activate page only if it fell
                                 *      the page queues.  Change wiring
                                 *      case is obvious.  In soft ref bits
                                 *      case activate page only if it fell
@@ -2074,101 +2447,60 @@ FastPmapEnter:
                                 *      move active page to back of active
                                 *      queue.  This code doesn't.
                                 */
                                 *      move active page to back of active
                                 *      queue.  This code doesn't.
                                 */
-                               vm_page_lock_queues();
-
                                if (m->clustered) {
                                        vm_pagein_cluster_used++;
                                        m->clustered = FALSE;
                                }
                                if (m->clustered) {
                                        vm_pagein_cluster_used++;
                                        m->clustered = FALSE;
                                }
-                               m->reference = TRUE;
-
                                if (change_wiring) {
                                if (change_wiring) {
+                                       vm_page_lock_queues();
+
                                        if (wired)
                                                vm_page_wire(m);
                                        else
                                                vm_page_unwire(m);
                                        if (wired)
                                                vm_page_wire(m);
                                        else
                                                vm_page_unwire(m);
+
+                                       vm_page_unlock_queues();
                                }
                                }
-#if VM_FAULT_STATIC_CONFIG
                                else {
                                else {
-                                       if (!m->active && !m->inactive)
+                                       if ((!m->active && !m->inactive) || ((need_activation == TRUE) && !m->active)) {
+                                               vm_page_lock_queues();
                                                vm_page_activate(m);
                                                vm_page_activate(m);
+                                               vm_page_unlock_queues();
+                                       }
                                }
                                }
-#else                          
-                               else if (software_reference_bits) {
-                                       if (!m->active && !m->inactive)
-                                               vm_page_activate(m);
-                               }
-                               else if (!m->active) {
-                                       vm_page_activate(m);
-                               }
-#endif
-                               vm_page_unlock_queues();
 
                                /*
                                 *      That's it, clean up and return.
                                 */
                                PAGE_WAKEUP_DONE(m);
 
                                /*
                                 *      That's it, clean up and return.
                                 */
                                PAGE_WAKEUP_DONE(m);
-                               vm_object_paging_end(object);
 
 
-                               {
-                                  tws_hash_line_t      line;
-                                  task_t               task;
-
-                                  task = current_task();
-                                  if((map != NULL) && 
-                                       (task->dynamic_working_set != 0) &&
-                                               !(object->private)) {
-                                       kern_return_t   kr;
-                                       vm_object_t     base_object;
-                                       vm_object_offset_t base_offset;
-                                       base_object = object;
-                                       base_offset = cur_offset;
-                                       while(base_object->shadow) {
-                                               base_offset +=
-                                                base_object->shadow_offset;
-                                               base_object = 
-                                                base_object->shadow;
-                                       }
-                                       kr = tws_lookup((tws_hash_t)
-                                               task->dynamic_working_set,
-                                               base_offset, base_object,
-                                               &line);
-                                       if(kr == KERN_OPERATION_TIMED_OUT){
-                                               write_startup_file = 1;
-                                       } else if (kr != KERN_SUCCESS) {
-                                               kr = tws_insert((tws_hash_t)
-                                                  task->dynamic_working_set,
-                                                  base_offset, base_object,
-                                                  vaddr, pmap_map);
-                                               if(kr == KERN_NO_SPACE) {
-                                                 vm_object_unlock(object);
-
-                                                  tws_expand_working_set(
-                                                     task->dynamic_working_set,
-                                                     TWS_HASH_LINE_COUNT,
-                                                     FALSE);
-
-                                                  vm_object_lock(object);
-                                               }
-                                               if(kr == 
-                                                  KERN_OPERATION_TIMED_OUT) {
-                                                       write_startup_file = 1;
-                                               }
-                                       }
-                                  }
+                               sequential = (sequential && vm_page_deactivate_behind) ?
+                                       vm_fault_deactivate_behind(object, cur_offset, behavior) :
+                                       FALSE;
+
+                               /* 
+                                * Add non-sequential pages to the working set.
+                                * The sequential pages will be brought in through
+                                * normal clustering behavior.
+                                */
+                               if (!sequential && !object->private) {
+                                       vm_object_paging_begin(object);
+
+                                       write_startup_file = 
+                                               vm_fault_tws_insert(map, real_map, vaddr, 
+                                                               object, cur_offset);
+
+                                       vm_object_paging_end(object);
                                }
                                vm_object_unlock(object);
 
                                vm_map_unlock_read(map);
                                }
                                vm_object_unlock(object);
 
                                vm_map_unlock_read(map);
-                               if(pmap_map != map)
-                                       vm_map_unlock(pmap_map);
+                               if(real_map != map)
+                                       vm_map_unlock(real_map);
 
                                if(write_startup_file)
                                        tws_send_startup_info(current_task());
 
 
                                if(write_startup_file)
                                        tws_send_startup_info(current_task());
 
-                               if (funnel_set)
-                                       thread_funnel_set( curflock, TRUE);
-
                                thread_interrupt_level(interruptible_state);
 
 
                                thread_interrupt_level(interruptible_state);
 
 
@@ -2231,15 +2563,13 @@ FastPmapEnter:
                         *      Now cope with the source page and object
                         *      If the top object has a ref count of 1
                         *      then no other map can access it, and hence
                         *      Now cope with the source page and object
                         *      If the top object has a ref count of 1
                         *      then no other map can access it, and hence
-                        *      it's not necessary to do the pmap_page_protect.
+                        *      it's not necessary to do the pmap_disconnect.
                         */
 
                         */
 
-
                        vm_page_lock_queues();
                        vm_page_deactivate(cur_m);
                        m->dirty = TRUE;
                        vm_page_lock_queues();
                        vm_page_deactivate(cur_m);
                        m->dirty = TRUE;
-                       pmap_page_protect(cur_m->phys_page,
-                                                 VM_PROT_NONE);
+                       pmap_disconnect(cur_m->phys_page);
                        vm_page_unlock_queues();
 
                        PAGE_WAKEUP_DONE(cur_m);
                        vm_page_unlock_queues();
 
                        PAGE_WAKEUP_DONE(cur_m);
@@ -2253,8 +2583,7 @@ FastPmapEnter:
                         */     
  
                        vm_object_paging_end(object); 
                         */     
  
                        vm_object_paging_end(object); 
-                       vm_object_collapse(object);
-                       vm_object_paging_begin(object);
+                       vm_object_collapse(object, offset, TRUE);
 
                        goto FastPmapEnter;
                }
 
                        goto FastPmapEnter;
                }
@@ -2279,20 +2608,16 @@ FastPmapEnter:
                                        vm_object_paging_end(object);
                                        vm_object_unlock(object);
                                        vm_map_unlock_read(map);
                                        vm_object_paging_end(object);
                                        vm_object_unlock(object);
                                        vm_map_unlock_read(map);
-                                       if(pmap_map != map)
-                                               vm_map_unlock(pmap_map);
+                                       if(real_map != map)
+                                               vm_map_unlock(real_map);
 
                                        if(write_startup_file)
                                                tws_send_startup_info(
                                                                current_task());
 
 
                                        if(write_startup_file)
                                                tws_send_startup_info(
                                                                current_task());
 
-                                       if (funnel_set) {
-                                               thread_funnel_set( curflock, TRUE);
-                                               funnel_set = FALSE;
-                                       }
                                        thread_interrupt_level(interruptible_state);
 
                                        thread_interrupt_level(interruptible_state);
 
-                                       return VM_FAULT_MEMORY_ERROR;
+                                       return KERN_MEMORY_ERROR;
                                }
 
                                /*
                                }
 
                                /*
@@ -2301,9 +2626,18 @@ FastPmapEnter:
                                 *      page, then drop any lower lock.
                                 *      Give up if no page.
                                 */
                                 *      page, then drop any lower lock.
                                 *      Give up if no page.
                                 */
-                               if ((vm_page_free_target - 
-                                  ((vm_page_free_target-vm_page_free_min)>>2))
-                                               > vm_page_free_count) {
+                               if (VM_PAGE_THROTTLED()) {
+                                       break;
+                               }
+
+                               /*
+                                * are we protecting the system from
+                                * backing store exhaustion.  If so
+                                * sleep unless we are privileged.
+                                */
+                               if(vm_backing_store_low) {
+                                       if(!(current_task()->priv_flags 
+                                               & VM_BACKING_STORE_PRIV))
                                        break;
                                }
                                m = vm_page_alloc(object, offset);
                                        break;
                                }
                                m = vm_page_alloc(object, offset);
@@ -2326,9 +2660,6 @@ FastPmapEnter:
                                if (cur_object != object)
                                        vm_object_unlock(cur_object);
 
                                if (cur_object != object)
                                        vm_object_unlock(cur_object);
 
-                               vm_object_paging_begin(object);
-                               vm_object_unlock(object);
-
                                /*
                                 *      Now zero fill page and map it.
                                 *      the page is probably going to 
                                /*
                                 *      Now zero fill page and map it.
                                 *      the page is probably going to 
@@ -2348,7 +2679,11 @@ FastPmapEnter:
                                VM_PAGE_QUEUES_REMOVE(m);
 
                                m->page_ticket = vm_page_ticket;
                                VM_PAGE_QUEUES_REMOVE(m);
 
                                m->page_ticket = vm_page_ticket;
-                               if(m->object->size > 0x80000) {
+                               assert(!m->laundry);
+                               assert(m->object != kernel_object);
+                               assert(m->pageq.next == NULL &&
+                                      m->pageq.prev == NULL);
+                               if(m->object->size > 0x200000) {
                                        m->zero_fill = TRUE;
                                        /* depends on the queues lock */
                                        vm_zf_count += 1;
                                        m->zero_fill = TRUE;
                                        /* depends on the queues lock */
                                        vm_zf_count += 1;
@@ -2373,7 +2708,6 @@ FastPmapEnter:
                                m->inactive = TRUE;
                                vm_page_inactive_count++;
                                vm_page_unlock_queues();
                                m->inactive = TRUE;
                                vm_page_inactive_count++;
                                vm_page_unlock_queues();
-                               vm_object_lock(object);
 
                                goto FastPmapEnter;
                        }
 
                                goto FastPmapEnter;
                        }
@@ -2403,8 +2737,8 @@ FastPmapEnter:
        }
        vm_map_unlock_read(map);
 
        }
        vm_map_unlock_read(map);
 
-       if(pmap_map != map)
-               vm_map_unlock(pmap_map);
+       if(real_map != map)
+               vm_map_unlock(real_map);
 
        /*
         *      Make a reference to this object to
 
        /*
         *      Make a reference to this object to
@@ -2420,54 +2754,12 @@ FastPmapEnter:
        vm_object_paging_begin(object);
 
        XPR(XPR_VM_FAULT,"vm_fault -> vm_fault_page\n",0,0,0,0,0);
        vm_object_paging_begin(object);
 
        XPR(XPR_VM_FAULT,"vm_fault -> vm_fault_page\n",0,0,0,0,0);
-       {
-               tws_hash_line_t line;
-               task_t          task;
-               kern_return_t   kr;
-
-                  task = current_task();
-                  if((map != NULL) && 
-                       (task->dynamic_working_set != 0)
-                               && !(object->private)) {
-                       vm_object_t     base_object;
-                       vm_object_offset_t base_offset;
-                       base_object = object;
-                       base_offset = offset;
-                       while(base_object->shadow) {
-                               base_offset +=
-                                base_object->shadow_offset;
-                               base_object =
-                                base_object->shadow;
-                       }
-                       kr = tws_lookup((tws_hash_t)
-                               task->dynamic_working_set,
-                               base_offset, base_object, 
-                               &line);
-                       if(kr == KERN_OPERATION_TIMED_OUT){
-                               write_startup_file = 1;
-                       } else if (kr != KERN_SUCCESS) {
-                               tws_insert((tws_hash_t)
-                                  task->dynamic_working_set,
-                                  base_offset, base_object, 
-                                  vaddr, pmap_map);
-                               kr = tws_insert((tws_hash_t)
-                                          task->dynamic_working_set,
-                                          base_offset, base_object,
-                                          vaddr, pmap_map);
-                               if(kr == KERN_NO_SPACE) {
-                                       vm_object_unlock(object);
-                                       tws_expand_working_set(
-                                          task->dynamic_working_set, 
-                                          TWS_HASH_LINE_COUNT,
-                                          FALSE);
-                                       vm_object_lock(object);
-                               }
-                               if(kr == KERN_OPERATION_TIMED_OUT) {
-                                       write_startup_file = 1;
-                               }
-                       }
-               }
+
+       if (!object->private) {
+               write_startup_file = 
+                       vm_fault_tws_insert(map, real_map, vaddr, object, offset);
        }
        }
+
        kr = vm_fault_page(object, offset, fault_type,
                           (change_wiring && !wired),
                           interruptible,
        kr = vm_fault_page(object, offset, fault_type,
                           (change_wiring && !wired),
                           interruptible,
@@ -2576,8 +2868,8 @@ FastPmapEnter:
                                   fault_type & ~VM_PROT_WRITE, &version,
                                   &retry_object, &retry_offset, &retry_prot,
                                   &wired, &behavior, &lo_offset, &hi_offset,
                                   fault_type & ~VM_PROT_WRITE, &version,
                                   &retry_object, &retry_offset, &retry_prot,
                                   &wired, &behavior, &lo_offset, &hi_offset,
-                                  &pmap_map);
-               pmap = pmap_map->pmap;
+                                  &real_map);
+               pmap = real_map->pmap;
 
                if (kr != KERN_SUCCESS) {
                        vm_map_unlock_read(map);
 
                if (kr != KERN_SUCCESS) {
                        vm_map_unlock_read(map);
@@ -2601,8 +2893,8 @@ FastPmapEnter:
                if ((retry_object != object) ||
                    (retry_offset != offset)) {
                        vm_map_unlock_read(map);
                if ((retry_object != object) ||
                    (retry_offset != offset)) {
                        vm_map_unlock_read(map);
-                       if(pmap_map != map)
-                               vm_map_unlock(pmap_map);
+                       if(real_map != map)
+                               vm_map_unlock(real_map);
                        if(m != VM_PAGE_NULL) {
                                RELEASE_PAGE(m);
                                UNLOCK_AND_DEALLOCATE;
                        if(m != VM_PAGE_NULL) {
                                RELEASE_PAGE(m);
                                UNLOCK_AND_DEALLOCATE;
@@ -2646,8 +2938,8 @@ FastPmapEnter:
 
        if (wired && (fault_type != (prot|VM_PROT_WRITE))) {
                vm_map_verify_done(map, &version);
 
        if (wired && (fault_type != (prot|VM_PROT_WRITE))) {
                vm_map_verify_done(map, &version);
-               if(pmap_map != map)
-                       vm_map_unlock(pmap_map);
+               if(real_map != map)
+                       vm_map_unlock(real_map);
                if(m != VM_PAGE_NULL) {
                        RELEASE_PAGE(m);
                        UNLOCK_AND_DEALLOCATE;
                if(m != VM_PAGE_NULL) {
                        RELEASE_PAGE(m);
                        UNLOCK_AND_DEALLOCATE;
@@ -2664,13 +2956,29 @@ FastPmapEnter:
         *      the pageout queues.  If the pageout daemon comes
         *      across the page, it will remove it from the queues.
         */
         *      the pageout queues.  If the pageout daemon comes
         *      across the page, it will remove it from the queues.
         */
+       need_activation = FALSE;
+
        if (m != VM_PAGE_NULL) {
                if (m->no_isync == TRUE) {
        if (m != VM_PAGE_NULL) {
                if (m->no_isync == TRUE) {
-                       pmap_sync_caches_phys(m->phys_page);
-
+                       pmap_sync_page_data_phys(m->phys_page);
+
+                        if ((type_of_fault == DBG_CACHE_HIT_FAULT) && m->clustered) {
+                                /*
+                                 * found it in the cache, but this
+                                 * is the first fault-in of the page (no_isync == TRUE)
+                                 * so it must have come in as part of
+                                 * a cluster... account 1 pagein against it
+                                 */
+                                 VM_STAT(pageins++);
+                                 current_task()->pageins++;
+
+                                 type_of_fault = DBG_PAGEIN_FAULT;
+                        }
+                       if (m->clustered) {
+                               need_activation = TRUE;
+                       }
                        m->no_isync = FALSE;
                }
                        m->no_isync = FALSE;
                }
-
                cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
 
                if(caller_pmap) {
                cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
 
                if(caller_pmap) {
@@ -2681,93 +2989,55 @@ FastPmapEnter:
                        PMAP_ENTER(pmap, vaddr, m, 
                                        prot, cache_attr, wired);
                }
                        PMAP_ENTER(pmap, vaddr, m, 
                                        prot, cache_attr, wired);
                }
-               {
-                       tws_hash_line_t line;
-                       task_t          task;
-                       kern_return_t   kr;
-
-                          task = current_task();
-                          if((map != NULL) && 
-                               (task->dynamic_working_set != 0)
-                                       && (object->private)) {
-                               vm_object_t     base_object;
-                               vm_object_offset_t      base_offset;
-                               base_object = m->object;
-                               base_offset = m->offset;
-                               while(base_object->shadow) {
-                                  base_offset +=
-                                       base_object->shadow_offset;
-                                  base_object =
-                                       base_object->shadow;
-                               }
-                               kr = tws_lookup((tws_hash_t)
-                                       task->dynamic_working_set,
-                                       base_offset, base_object, &line);
-                               if(kr == KERN_OPERATION_TIMED_OUT){
-                                       write_startup_file = 1;
-                               } else if (kr != KERN_SUCCESS) {
-                                       tws_insert((tws_hash_t)
-                                          task->dynamic_working_set,
-                                          base_offset, base_object, 
-                                          vaddr, pmap_map);
-                                       kr = tws_insert((tws_hash_t)
-                                                  task->dynamic_working_set,
-                                                  base_offset, base_object,
-                                                  vaddr, pmap_map);
-                                       if(kr == KERN_NO_SPACE) {
-                                               vm_object_unlock(m->object);
-                                               tws_expand_working_set(
-                                                  task->dynamic_working_set, 
-                                                  TWS_HASH_LINE_COUNT,
-                                                  FALSE);
-                                               vm_object_lock(m->object);
-                                       }
-                                       if(kr == KERN_OPERATION_TIMED_OUT) {
-                                               write_startup_file = 1;
-                                       }
-                               }
-                       }
+
+               /*
+                * Add working set information for private objects here.
+                */
+               if (m->object->private) {
+                       write_startup_file =
+                               vm_fault_tws_insert(map, real_map, vaddr, 
+                                           m->object, m->offset);
                }
        } else {
 
                }
        } else {
 
-#ifndef i386
-               int                     memattr;
                vm_map_entry_t          entry;
                vm_map_entry_t          entry;
-               vm_offset_t             laddr;
-               vm_offset_t             ldelta, hdelta;
+               vm_map_offset_t         laddr;
+               vm_map_offset_t         ldelta, hdelta;
 
                /* 
                 * do a pmap block mapping from the physical address
                 * in the object 
                 */
 
 
                /* 
                 * do a pmap block mapping from the physical address
                 * in the object 
                 */
 
+#ifndef i386
                /* While we do not worry about execution protection in   */
                /* general, certian pages may have instruction execution */
                /* disallowed.  We will check here, and if not allowed   */
                /* to execute, we return with a protection failure.      */
 
                /* While we do not worry about execution protection in   */
                /* general, certian pages may have instruction execution */
                /* disallowed.  We will check here, and if not allowed   */
                /* to execute, we return with a protection failure.      */
 
-               if((full_fault_type & VM_PROT_EXECUTE) &&
-                       (pmap_canExecute((ppnum_t)
-                               (object->shadow_offset >> 12)) < 1)) {
+               if((fault_type & VM_PROT_EXECUTE) &&
+                       (!pmap_eligible_for_execute((ppnum_t)
+                               (object->shadow_offset >> 12)))) {
 
                        vm_map_verify_done(map, &version);
 
                        vm_map_verify_done(map, &version);
-                       if(pmap_map != map)
-                               vm_map_unlock(pmap_map);
+                       if(real_map != map)
+                               vm_map_unlock(real_map);
                        vm_fault_cleanup(object, top_page);
                        vm_object_deallocate(object);
                        kr = KERN_PROTECTION_FAILURE;
                        goto done;
                }
                        vm_fault_cleanup(object, top_page);
                        vm_object_deallocate(object);
                        kr = KERN_PROTECTION_FAILURE;
                        goto done;
                }
+#endif /* !i386 */
 
 
-               if(pmap_map != map) {
-                       vm_map_unlock(pmap_map);
+               if(real_map != map) {
+                       vm_map_unlock(real_map);
                }
                if (original_map != map) {
                        vm_map_unlock_read(map);
                        vm_map_lock_read(original_map);
                        map = original_map;
                }
                }
                if (original_map != map) {
                        vm_map_unlock_read(map);
                        vm_map_lock_read(original_map);
                        map = original_map;
                }
-               pmap_map = map;
+               real_map = map;
 
                laddr = vaddr;
                hdelta = 0xFFFFF000;
 
                laddr = vaddr;
                hdelta = 0xFFFFF000;
@@ -2784,11 +3054,11 @@ FastPmapEnter:
                                laddr = (laddr - entry->vme_start) 
                                                        + entry->offset;
                                vm_map_lock_read(entry->object.sub_map);
                                laddr = (laddr - entry->vme_start) 
                                                        + entry->offset;
                                vm_map_lock_read(entry->object.sub_map);
-                               if(map != pmap_map)
+                               if(map != real_map)
                                        vm_map_unlock_read(map);
                                if(entry->use_pmap) {
                                        vm_map_unlock_read(map);
                                if(entry->use_pmap) {
-                                       vm_map_unlock_read(pmap_map);
-                                       pmap_map = entry->object.sub_map;
+                                       vm_map_unlock_read(real_map);
+                                       real_map = entry->object.sub_map;
                                }
                                map = entry->object.sub_map;
                                
                                }
                                map = entry->object.sub_map;
                                
@@ -2798,45 +3068,38 @@ FastPmapEnter:
                }
 
                if(vm_map_lookup_entry(map, laddr, &entry) && 
                }
 
                if(vm_map_lookup_entry(map, laddr, &entry) && 
-                                       (entry->object.vm_object != NULL) &&
-                                       (entry->object.vm_object == object)) {
+                  (entry->object.vm_object != NULL) &&
+                  (entry->object.vm_object == object)) {
 
 
+                       vm_map_offset_t phys_offset;
 
 
+                       phys_offset = (entry->object.vm_object->shadow_offset
+                                      + entry->offset
+                                      + laddr
+                                      - entry->vme_start);
+                       phys_offset -= ldelta;
                        if(caller_pmap) {
                                /* Set up a block mapped area */
                        if(caller_pmap) {
                                /* Set up a block mapped area */
-                               pmap_map_block(caller_pmap, 
+                               pmap_map_block(
+                                       caller_pmap, 
                                        (addr64_t)(caller_pmap_addr - ldelta), 
                                        (addr64_t)(caller_pmap_addr - ldelta), 
-                                       (((vm_offset_t)
-                                   (entry->object.vm_object->shadow_offset)) 
-                                       + entry->offset + 
-                                       (laddr - entry->vme_start) 
-                                                       - ldelta)>>12,
-                               ldelta + hdelta, prot, 
-                               (VM_WIMG_MASK & (int)object->wimg_bits), 0);
+                                       phys_offset >> 12,
+                                       (ldelta + hdelta) >> 12,
+                                       prot,
+                                       (VM_WIMG_MASK & (int)object->wimg_bits),
+                                       0);
                        } else { 
                                /* Set up a block mapped area */
                        } else { 
                                /* Set up a block mapped area */
-                               pmap_map_block(pmap_map->pmap, 
-                                  (addr64_t)(vaddr - ldelta)
-                                  (((vm_offset_t)
-                                   (entry->object.vm_object->shadow_offset)) 
-                                      + entry->offset + 
-                                      (laddr - entry->vme_start) - ldelta)>>12,
-                                  ldelta + hdelta, prot, 
-                                  (VM_WIMG_MASK & (int)object->wimg_bits), 0);
+                               pmap_map_block(
+                                       real_map->pmap
+                                       (addr64_t)(vaddr - ldelta), 
+                                       phys_offset >> 12,
+                                       (ldelta + hdelta) >> 12,
+                                       prot,
+                                       (VM_WIMG_MASK & (int)object->wimg_bits),
+                                       0);
                        }
                }
                        }
                }
-#else
-#ifdef notyet
-               if(caller_pmap) {
-                               pmap_enter(caller_pmap, caller_pmap_addr, 
-                               object->shadow_offset>>12, prot, 0, TRUE);
-               } else {
-                               pmap_enter(pmap, vaddr, 
-                               object->shadow_offset>>12, prot, 0, TRUE);
-               }
-                       /* Map it in */
-#endif
-#endif
 
        }
 
 
        }
 
@@ -2848,6 +3111,12 @@ FastPmapEnter:
        if(m != VM_PAGE_NULL) {
                vm_page_lock_queues();
 
        if(m != VM_PAGE_NULL) {
                vm_page_lock_queues();
 
+               if (m->clustered) {
+                       vm_pagein_cluster_used++;
+                       m->clustered = FALSE;
+               }
+               m->reference = TRUE;
+
                if (change_wiring) {
                        if (wired)
                                vm_page_wire(m);
                if (change_wiring) {
                        if (wired)
                                vm_page_wire(m);
@@ -2856,9 +3125,8 @@ FastPmapEnter:
                }
 #if    VM_FAULT_STATIC_CONFIG
                else {
                }
 #if    VM_FAULT_STATIC_CONFIG
                else {
-                       if (!m->active && !m->inactive)
+                       if ((!m->active && !m->inactive) || ((need_activation == TRUE) && !m->active))
                                vm_page_activate(m);
                                vm_page_activate(m);
-                       m->reference = TRUE;
                }
 #else
                else if (software_reference_bits) {
                }
 #else
                else if (software_reference_bits) {
@@ -2877,8 +3145,8 @@ FastPmapEnter:
         */
 
        vm_map_verify_done(map, &version);
         */
 
        vm_map_verify_done(map, &version);
-       if(pmap_map != map)
-               vm_map_unlock(pmap_map);
+       if(real_map != map)
+               vm_map_unlock(real_map);
        if(m != VM_PAGE_NULL) {
                PAGE_WAKEUP_DONE(m);
                UNLOCK_AND_DEALLOCATE;
        if(m != VM_PAGE_NULL) {
                PAGE_WAKEUP_DONE(m);
                UNLOCK_AND_DEALLOCATE;
@@ -2894,10 +3162,7 @@ FastPmapEnter:
     done:
        if(write_startup_file)
                tws_send_startup_info(current_task());
     done:
        if(write_startup_file)
                tws_send_startup_info(current_task());
-       if (funnel_set) {
-               thread_funnel_set( curflock, TRUE);
-               funnel_set = FALSE;
-       }
+
        thread_interrupt_level(interruptible_state);
 
        KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 0)) | DBG_FUNC_END,
        thread_interrupt_level(interruptible_state);
 
        KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 0)) | DBG_FUNC_END,
@@ -2920,11 +3185,11 @@ vm_fault_wire(
        vm_map_t        map,
        vm_map_entry_t  entry,
        pmap_t          pmap,
        vm_map_t        map,
        vm_map_entry_t  entry,
        pmap_t          pmap,
-       vm_offset_t     pmap_addr)
+       vm_map_offset_t pmap_addr)
 {
 
 {
 
-       register vm_offset_t    va;
-       register vm_offset_t    end_addr = entry->vme_end;
+       register vm_map_offset_t        va;
+       register vm_map_offset_t        end_addr = entry->vme_end;
        register kern_return_t  rc;
 
        assert(entry->in_transition);
        register kern_return_t  rc;
 
        assert(entry->in_transition);
@@ -2985,10 +3250,10 @@ vm_fault_unwire(
        vm_map_entry_t  entry,
        boolean_t       deallocate,
        pmap_t          pmap,
        vm_map_entry_t  entry,
        boolean_t       deallocate,
        pmap_t          pmap,
-       vm_offset_t     pmap_addr)
+       vm_map_offset_t pmap_addr)
 {
 {
-       register vm_offset_t    va;
-       register vm_offset_t    end_addr = entry->vme_end;
+       register vm_map_offset_t        va;
+       register vm_map_offset_t        end_addr = entry->vme_end;
        vm_object_t             object;
 
        object = (entry->is_sub_map)
        vm_object_t             object;
 
        object = (entry->is_sub_map)
@@ -3047,8 +3312,7 @@ vm_fault_unwire(
                        result_object = result_page->object;
                        if (deallocate) {
                                assert(!result_page->fictitious);
                        result_object = result_page->object;
                        if (deallocate) {
                                assert(!result_page->fictitious);
-                               pmap_page_protect(result_page->phys_page,
-                                               VM_PROT_NONE);
+                               pmap_disconnect(result_page->phys_page);
                                VM_PAGE_FREE(result_page);
                        } else {
                                vm_page_lock_queues();
                                VM_PAGE_FREE(result_page);
                        } else {
                                vm_page_lock_queues();
@@ -3094,23 +3358,23 @@ vm_fault_unwire(
  */
 kern_return_t
 vm_fault_wire_fast(
  */
 kern_return_t
 vm_fault_wire_fast(
-       vm_map_t        map,
-       vm_offset_t     va,
+       __unused vm_map_t       map,
+       vm_map_offset_t va,
        vm_map_entry_t  entry,
        vm_map_entry_t  entry,
-       pmap_t          pmap,
-       vm_offset_t     pmap_addr)
+       pmap_t                  pmap,
+       vm_map_offset_t pmap_addr)
 {
        vm_object_t             object;
        vm_object_offset_t      offset;
        register vm_page_t      m;
        vm_prot_t               prot;
 {
        vm_object_t             object;
        vm_object_offset_t      offset;
        register vm_page_t      m;
        vm_prot_t               prot;
-       thread_act_t            thr_act;
+       thread_t                thread = current_thread();
        unsigned int            cache_attr;
 
        VM_STAT(faults++);
 
        unsigned int            cache_attr;
 
        VM_STAT(faults++);
 
-       if((thr_act=current_act()) && (thr_act->task != TASK_NULL))
-         thr_act->task->faults++;
+       if (thread != THREAD_NULL && thread->task != TASK_NULL)
+         thread->task->faults++;
 
 /*
  *     Recovery actions
 
 /*
  *     Recovery actions
@@ -3127,8 +3391,8 @@ vm_fault_wire_fast(
 
 #undef UNLOCK_THINGS
 #define UNLOCK_THINGS  {                               \
 
 #undef UNLOCK_THINGS
 #define UNLOCK_THINGS  {                               \
-       object->paging_in_progress--;                   \
-       vm_object_unlock(object);                       \
+       vm_object_paging_end(object);                      \
+       vm_object_unlock(object);                          \
 }
 
 #undef UNLOCK_AND_DEALLOCATE
 }
 
 #undef UNLOCK_AND_DEALLOCATE
@@ -3169,7 +3433,7 @@ vm_fault_wire_fast(
        assert(object->ref_count > 0);
        object->ref_count++;
        vm_object_res_reference(object);
        assert(object->ref_count > 0);
        object->ref_count++;
        vm_object_res_reference(object);
-       object->paging_in_progress++;
+       vm_object_paging_begin(object);
 
        /*
         *      INVARIANTS (through entire routine):
 
        /*
         *      INVARIANTS (through entire routine):
@@ -3188,14 +3452,17 @@ vm_fault_wire_fast(
        /*
         *      Look for page in top-level object.  If it's not there or
         *      there's something going on, give up.
        /*
         *      Look for page in top-level object.  If it's not there or
         *      there's something going on, give up.
+        * ENCRYPTED SWAP: use the slow fault path, since we'll need to
+        * decrypt the page before wiring it down.
         */
        m = vm_page_lookup(object, offset);
         */
        m = vm_page_lookup(object, offset);
-       if ((m == VM_PAGE_NULL) || (m->busy) || 
+       if ((m == VM_PAGE_NULL) || (m->busy) || (m->encrypted) ||
            (m->unusual && ( m->error || m->restart || m->absent ||
                                prot & m->page_lock))) {
 
                GIVE_UP;
        }
            (m->unusual && ( m->error || m->restart || m->absent ||
                                prot & m->page_lock))) {
 
                GIVE_UP;
        }
+       ASSERT_PAGE_DECRYPTED(m);
 
        /*
         *      Wire the page down now.  All bail outs beyond this
 
        /*
         *      Wire the page down now.  All bail outs beyond this
@@ -3227,7 +3494,7 @@ vm_fault_wire_fast(
         *      may cause other faults.   
         */
        if (m->no_isync == TRUE) {
         *      may cause other faults.   
         */
        if (m->no_isync == TRUE) {
-               pmap_sync_caches_phys(m->phys_page);
+               pmap_sync_page_data_phys(m->phys_page);
 
                m->no_isync = FALSE;
        }
 
                m->no_isync = FALSE;
        }
@@ -3317,7 +3584,7 @@ kern_return_t
 vm_fault_copy(
        vm_object_t             src_object,
        vm_object_offset_t      src_offset,
 vm_fault_copy(
        vm_object_t             src_object,
        vm_object_offset_t      src_offset,
-       vm_size_t               *src_size,              /* INOUT */
+       vm_map_size_t           *copy_size,             /* INOUT */
        vm_object_t             dst_object,
        vm_object_offset_t      dst_offset,
        vm_map_t                dst_map,
        vm_object_t             dst_object,
        vm_object_offset_t      dst_offset,
        vm_map_t                dst_map,
@@ -3334,28 +3601,28 @@ vm_fault_copy(
        vm_page_t               dst_top_page;
        vm_prot_t               dst_prot;
 
        vm_page_t               dst_top_page;
        vm_prot_t               dst_prot;
 
-       vm_size_t               amount_left;
+       vm_map_size_t           amount_left;
        vm_object_t             old_copy_object;
        kern_return_t           error = 0;
 
        vm_object_t             old_copy_object;
        kern_return_t           error = 0;
 
-       vm_size_t               part_size;
+       vm_map_size_t           part_size;
 
        /*
         * In order not to confuse the clustered pageins, align
         * the different offsets on a page boundary.
         */
 
        /*
         * In order not to confuse the clustered pageins, align
         * the different offsets on a page boundary.
         */
-       vm_object_offset_t      src_lo_offset = trunc_page_64(src_offset);
-       vm_object_offset_t      dst_lo_offset = trunc_page_64(dst_offset);
-       vm_object_offset_t      src_hi_offset = round_page_64(src_offset + *src_size);
-       vm_object_offset_t      dst_hi_offset = round_page_64(dst_offset + *src_size);
+       vm_object_offset_t      src_lo_offset = vm_object_trunc_page(src_offset);
+       vm_object_offset_t      dst_lo_offset = vm_object_trunc_page(dst_offset);
+       vm_object_offset_t      src_hi_offset = vm_object_round_page(src_offset + *copy_size);
+       vm_object_offset_t      dst_hi_offset = vm_object_round_page(dst_offset + *copy_size);
 
 #define        RETURN(x)                                       \
        MACRO_BEGIN                                     \
 
 #define        RETURN(x)                                       \
        MACRO_BEGIN                                     \
-       *src_size -= amount_left;                       \
+       *copy_size -= amount_left;                      \
        MACRO_RETURN(x);                                \
        MACRO_END
 
        MACRO_RETURN(x);                                \
        MACRO_END
 
-       amount_left = *src_size;
+       amount_left = *copy_size;
        do { /* while (amount_left > 0) */
                /*
                 * There may be a deadlock if both source and destination
        do { /* while (amount_left > 0) */
                /*
                 * There may be a deadlock if both source and destination
@@ -3373,7 +3640,7 @@ vm_fault_copy(
 
                XPR(XPR_VM_FAULT,"vm_fault_copy -> vm_fault_page\n",0,0,0,0,0);
                switch (vm_fault_page(dst_object,
 
                XPR(XPR_VM_FAULT,"vm_fault_copy -> vm_fault_page\n",0,0,0,0,0);
                switch (vm_fault_page(dst_object,
-                                     trunc_page_64(dst_offset),
+                                     vm_object_trunc_page(dst_offset),
                                      VM_PROT_WRITE|VM_PROT_READ,
                                      FALSE,
                                      interruptible,
                                      VM_PROT_WRITE|VM_PROT_READ,
                                      FALSE,
                                      interruptible,
@@ -3447,7 +3714,7 @@ vm_fault_copy(
                } else {
                        vm_object_lock(src_object);
                        src_page = vm_page_lookup(src_object,
                } else {
                        vm_object_lock(src_object);
                        src_page = vm_page_lookup(src_object,
-                                                 trunc_page_64(src_offset));
+                                                 vm_object_trunc_page(src_offset));
                        if (src_page == dst_page) {
                                src_prot = dst_prot;
                                result_page = VM_PAGE_NULL;
                        if (src_page == dst_page) {
                                src_prot = dst_prot;
                                result_page = VM_PAGE_NULL;
@@ -3459,7 +3726,7 @@ vm_fault_copy(
                                        "vm_fault_copy(2) -> vm_fault_page\n",
                                        0,0,0,0,0);
                                switch (vm_fault_page(src_object, 
                                        "vm_fault_copy(2) -> vm_fault_page\n",
                                        0,0,0,0,0);
                                switch (vm_fault_page(src_object, 
-                                                     trunc_page_64(src_offset),
+                                                     vm_object_trunc_page(src_offset),
                                                      VM_PROT_READ, 
                                                      FALSE, 
                                                      interruptible,
                                                      VM_PROT_READ, 
                                                      FALSE, 
                                                      interruptible,
@@ -3535,8 +3802,8 @@ vm_fault_copy(
                        vm_object_offset_t      src_po,
                                                dst_po;
 
                        vm_object_offset_t      src_po,
                                                dst_po;
 
-                       src_po = src_offset - trunc_page_64(src_offset);
-                       dst_po = dst_offset - trunc_page_64(dst_offset);
+                       src_po = src_offset - vm_object_trunc_page(src_offset);
+                       dst_po = dst_offset - vm_object_trunc_page(dst_offset);
 
                        if (dst_po > src_po) {
                                part_size = PAGE_SIZE - dst_po;
 
                        if (dst_po > src_po) {
                                part_size = PAGE_SIZE - dst_po;