]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_fault.c
xnu-2422.115.4.tar.gz
[apple/xnu.git] / osfmk / vm / vm_fault.c
index f5275261d3d525fe1eb11b1c4ba819a9028d45ec..207a987ec03c23a2bbc5765637992ad1756a4986 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
@@ -64,7 +64,6 @@
 
 #include <mach_cluster_stats.h>
 #include <mach_pagemap.h>
-#include <mach_kdb.h>
 #include <libkern/OSAtomic.h>
 
 #include <mach/mach_types.h>
@@ -89,8 +88,8 @@
 #include <kern/zalloc.h>
 #include <kern/misc_protos.h>
 
-#include <ppc/proc_reg.h>
-
+#include <vm/vm_compressor.h>
+#include <vm/vm_compressor_pager.h>
 #include <vm/vm_fault.h>
 #include <vm/vm_map.h>
 #include <vm/vm_object.h>
 #include <vm/vm_external.h>
 #include <vm/memory_object.h>
 #include <vm/vm_purgeable_internal.h>  /* Needed by some vm_page.h macros */
+#include <vm/vm_shared_region.h>
 
-#include <sys/kdebug.h>
+#include <sys/codesign.h>
 
-#define VM_FAULT_CLASSIFY      0
+#include <libsa/sys/timers.h>  /* for struct timespec */
 
-/* Zero-filled pages are marked "m->zero_fill" and put on the
- * special zero-fill inactive queue  only if they belong to
- * an object at least this big.
- */
-#define        VM_ZF_OBJECT_SIZE_THRESHOLD     (0x200000)
+#define VM_FAULT_CLASSIFY      0
 
 #define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */
 
 int    vm_object_pagein_throttle = 16;
 
-extern int cs_debug;
+/*
+ * We apply a hard throttle to the demand zero rate of tasks that we believe are running out of control which 
+ * kicks in when swap space runs out.  64-bit programs have massive address spaces and can leak enormous amounts
+ * of memory if they're buggy and can run the system completely out of swap space.  If this happens, we
+ * impose a hard throttle on them to prevent them from taking the last bit of memory left.  This helps
+ * keep the UI active so that the user has a chance to kill the offending task before the system 
+ * completely hangs.
+ *
+ * The hard throttle is only applied when the system is nearly completely out of swap space and is only applied
+ * to tasks that appear to be bloated.  When swap runs out, any task using more than vm_hard_throttle_threshold
+ * will be throttled.  The throttling is done by giving the thread that's trying to demand zero a page a
+ * delay of HARD_THROTTLE_DELAY microseconds before being allowed to try the page fault again.
+ */
+
+extern void throttle_lowpri_io(int);
 
-#if    MACH_KDB
-extern struct db_watchpoint *db_watchpoint_list;
-#endif /* MACH_KDB */
+uint64_t vm_hard_throttle_threshold;
 
 
+
+#define NEED_TO_HARD_THROTTLE_THIS_TASK()      ((current_task() != kernel_task && \
+                                                 get_task_resident_size(current_task()) > (((AVAILABLE_NON_COMPRESSED_MEMORY) * PAGE_SIZE) / 5)) && \
+                                                (vm_low_on_space() || (vm_page_free_count < vm_page_throttle_limit && \
+                                                                       proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO) >= THROTTLE_LEVEL_THROTTLED )))
+
+
+
+#define HARD_THROTTLE_DELAY    20000   /* 20000 us == 20 ms */
+#define SOFT_THROTTLE_DELAY    2000    /* 2000 us == 2 ms */
+
+boolean_t current_thread_aborted(void);
+
 /* Forward declarations of internal routines. */
 extern kern_return_t vm_fault_wire_fast(
                                vm_map_t        map,
@@ -149,11 +170,14 @@ extern void vm_fault_classify(vm_object_t object,
 extern void vm_fault_classify_init(void);
 #endif
 
+unsigned long vm_pmap_enter_blocked = 0;
+unsigned long vm_pmap_enter_retried = 0;
 
 unsigned long vm_cs_validates = 0;
 unsigned long vm_cs_revalidates = 0;
 unsigned long vm_cs_query_modified = 0;
 unsigned long vm_cs_validated_dirtied = 0;
+unsigned long vm_cs_bitmap_validated = 0;
 
 /*
  *     Routine:        vm_fault_init
@@ -163,6 +187,41 @@ unsigned long vm_cs_validated_dirtied = 0;
 void
 vm_fault_init(void)
 {
+       int i, vm_compressor_temp;
+       boolean_t need_default_val = TRUE;
+       /*
+        * Choose a value for the hard throttle threshold based on the amount of ram.  The threshold is
+        * computed as a percentage of available memory, and the percentage used is scaled inversely with
+        * the amount of memory.  The percentage runs between 10% and 35%.  We use 35% for small memory systems
+        * and reduce the value down to 10% for very large memory configurations.  This helps give us a
+        * definition of a memory hog that makes more sense relative to the amount of ram in the machine.
+        * The formula here simply uses the number of gigabytes of ram to adjust the percentage.
+        */
+
+       vm_hard_throttle_threshold = sane_size * (35 - MIN((int)(sane_size / (1024*1024*1024)), 25)) / 100;
+
+       /*
+        * Configure compressed pager behavior. A boot arg takes precedence over a device tree entry.
+        */
+
+       if (PE_parse_boot_argn("vm_compressor", &vm_compressor_temp, sizeof (vm_compressor_temp))) {
+               for ( i = 0; i < VM_PAGER_MAX_MODES; i++) {
+                       if (vm_compressor_temp > 0 && 
+                           ((vm_compressor_temp & ( 1 << i)) == vm_compressor_temp)) {
+                               need_default_val = FALSE;
+                               vm_compressor_mode = vm_compressor_temp;
+                               break;
+                       }
+               }
+               if (need_default_val)
+                       printf("Ignoring \"vm_compressor\" boot arg %d\n", vm_compressor_temp);
+       } 
+       if (need_default_val) {
+               /* If no boot arg or incorrect boot arg, try device tree. */
+               PE_get_default("kern.vm_compressor", &vm_compressor_mode, sizeof(vm_compressor_mode));
+       }
+       PE_parse_boot_argn("vm_compressor_threads", &vm_compressor_thread_count, sizeof (vm_compressor_thread_count));
+       printf("\"vm_compressor_mode\" is %d\n", vm_compressor_mode);
 }
 
 /*
@@ -185,7 +244,7 @@ vm_fault_cleanup(
        register vm_page_t      top_page)
 {
        vm_object_paging_end(object);
-       vm_object_unlock(object);
+       vm_object_unlock(object);
 
        if (top_page != VM_PAGE_NULL) {
                object = top_page->object;
@@ -222,8 +281,11 @@ boolean_t  vm_page_deactivate_behind = TRUE;
 /* 
  * default sizes given VM_BEHAVIOR_DEFAULT reference behavior 
  */
-int vm_default_ahead = 0;
-int vm_default_behind = MAX_UPL_TRANSFER;
+#define VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW    128
+#define VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER   16              /* don't make this too big... */
+                                                                /* we use it to size an array on the stack */
+
+int vm_default_behind = VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW;
 
 #define MAX_SEQUENTIAL_RUN     (1024 * 1024 * 1024)
 
@@ -340,6 +402,8 @@ vm_fault_is_sequential(
 }
 
 
+int vm_page_deactivate_behind_count = 0;
+
 /*
  * vm_page_deactivate_behind
  *
@@ -359,10 +423,17 @@ vm_fault_deactivate_behind(
        vm_object_offset_t      offset,
        vm_behavior_t           behavior)
 {
-       vm_page_t       m = NULL;
+       int             n;
+       int             pages_in_run = 0;
+       int             max_pages_in_run = 0;
        int             sequential_run;
        int             sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
+       vm_object_offset_t      run_offset = 0;
+       vm_object_offset_t      pg_offset = 0;
+       vm_page_t       m;
+       vm_page_t       page_run[VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER];
 
+       pages_in_run = 0;
 #if TRACEFAULTPAGE
        dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */
 #endif
@@ -387,12 +458,16 @@ vm_fault_deactivate_behind(
        case VM_BEHAVIOR_RANDOM:
                break;
        case VM_BEHAVIOR_SEQUENTIAL:
-               if (sequential_run >= (int)PAGE_SIZE)
-                       m = vm_page_lookup(object, offset - PAGE_SIZE_64);
+               if (sequential_run >= (int)PAGE_SIZE) {
+                       run_offset = 0 - PAGE_SIZE_64;
+                       max_pages_in_run = 1;
+               }
                break;
        case VM_BEHAVIOR_RSEQNTL:
-               if (sequential_run >= (int)PAGE_SIZE)
-                       m = vm_page_lookup(object, offset + PAGE_SIZE_64);
+               if (sequential_run >= (int)PAGE_SIZE) {
+                       run_offset = PAGE_SIZE_64;
+                       max_pages_in_run = 1;
+               }
                break;
        case VM_BEHAVIOR_DEFAULT:
        default:
@@ -403,32 +478,124 @@ vm_fault_deactivate_behind(
                 * long enough on an object with default access behavior
                 * to consider it for deactivation
                 */
-               if ((uint64_t)sequential_run >= behind) {
+               if ((uint64_t)sequential_run >= behind && (sequential_run % (VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER * PAGE_SIZE)) == 0) {
+                       /*
+                        * the comparisons between offset and behind are done
+                        * in this kind of odd fashion in order to prevent wrap around
+                        * at the end points
+                        */
                        if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) {
-                               if (offset >= behind)
-                                       m = vm_page_lookup(object, offset - behind);
+                               if (offset >= behind) {
+                                       run_offset = 0 - behind;
+                                       pg_offset = PAGE_SIZE_64;
+                                       max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER;
+                               }
                        } else {
-                               if (offset < -behind)
-                                       m = vm_page_lookup(object, offset + behind);
+                               if (offset < -behind) {
+                                       run_offset = behind;
+                                       pg_offset = 0 - PAGE_SIZE_64;
+                                       max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER;
+                               }
                        }
                }
                break;
        }
        }
-       if (m) {
-               if (!m->busy && !m->no_cache && !m->throttled && !m->fictitious && !m->absent) {
-                       pmap_clear_reference(m->phys_page);
-                       m->deactivated = TRUE;
+        for (n = 0; n < max_pages_in_run; n++) {
+               m = vm_page_lookup(object, offset + run_offset + (n * pg_offset));
+
+               if (m && !m->laundry && !m->busy && !m->no_cache && !m->throttled && !m->fictitious && !m->absent) {
+                       page_run[pages_in_run++] = m;
+
+                       /*
+                        * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
+                        *
+                        * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
+                        * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
+                        * new reference happens. If no futher references happen on the page after that remote TLB flushes
+                        * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
+                        * by pageout_scan, which is just fine since the last reference would have happened quite far
+                        * in the past (TLB caches don't hang around for very long), and of course could just as easily
+                        * have happened before we did the deactivate_behind.
+                        */
+                       pmap_clear_refmod_options(m->phys_page, VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
+               }
+       }
+       if (pages_in_run) {
+               vm_page_lockspin_queues();
+
+               for (n = 0; n < pages_in_run; n++) {
+
+                       m = page_run[n];
+
+                       vm_page_deactivate_internal(m, FALSE);
+
+                       vm_page_deactivate_behind_count++;
 #if TRACEFAULTPAGE
                        dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m);  /* (TEST/DEBUG) */
 #endif
-                       return TRUE;
                }
+               vm_page_unlock_queues();
+
+               return TRUE;
        }
        return FALSE;
 }
 
 
+static int
+vm_page_throttled(void)
+{
+        clock_sec_t     elapsed_sec;
+        clock_sec_t     tv_sec;
+        clock_usec_t    tv_usec;
+       
+       thread_t thread = current_thread();
+       
+       if (thread->options & TH_OPT_VMPRIV)
+               return (0);
+
+       thread->t_page_creation_count++;
+
+       if (NEED_TO_HARD_THROTTLE_THIS_TASK())
+               return (HARD_THROTTLE_DELAY);
+
+       if ((vm_page_free_count < vm_page_throttle_limit || ((COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) && SWAPPER_NEEDS_TO_UNTHROTTLE())) &&
+           thread->t_page_creation_count > vm_page_creation_throttle) {
+               
+               clock_get_system_microtime(&tv_sec, &tv_usec);
+
+               elapsed_sec = tv_sec - thread->t_page_creation_time;
+
+               if (elapsed_sec <= 6 || (thread->t_page_creation_count / elapsed_sec) >= (vm_page_creation_throttle / 6)) {
+
+                       if (elapsed_sec >= 60) {
+                               /*
+                                * we'll reset our stats to give a well behaved app
+                                * that was unlucky enough to accumulate a bunch of pages
+                                * over a long period of time a chance to get out of
+                                * the throttled state... we reset the counter and timestamp
+                                * so that if it stays under the rate limit for the next second
+                                * it will be back in our good graces... if it exceeds it, it 
+                                * will remain in the throttled state
+                                */
+                               thread->t_page_creation_time = tv_sec;
+                               thread->t_page_creation_count = (vm_page_creation_throttle / 6) * 5;
+                       }
+                       ++vm_page_throttle_count;
+
+                       if ((COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) && HARD_THROTTLE_LIMIT_REACHED())
+                               return (HARD_THROTTLE_DELAY);
+                       else
+                               return (SOFT_THROTTLE_DELAY);
+               }
+               thread->t_page_creation_time = tv_sec;
+               thread->t_page_creation_count = 0;
+       }
+       return (0);
+}
+
+
 /*
  * check for various conditions that would
  * prevent us from creating a ZF page...
@@ -440,10 +607,16 @@ vm_fault_deactivate_behind(
 static vm_fault_return_t
 vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, boolean_t interruptible_state)
 {
-        if (object->shadow_severed) {
+       int throttle_delay;
+
+        if (object->shadow_severed ||
+           VM_OBJECT_PURGEABLE_FAULT_ERROR(object)) {
                /*
-                * the shadow chain was severed
-                * just have to return an error at this point
+                * Either:
+                * 1. the shadow chain was severed,
+                * 2. the purgeable object is volatile or empty and is marked
+                *    to fault on access while volatile.
+                * Just have to return an error at this point
                 */
                if (m != VM_PAGE_NULL)
                        VM_PAGE_FREE(m);
@@ -473,7 +646,7 @@ vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, boolean_t int
                        return (VM_FAULT_RETRY);
                }
        }
-       if (VM_PAGE_ZFILL_THROTTLED()) {
+       if ((throttle_delay = vm_page_throttled())) {
                /*
                 * we're throttling zero-fills...
                 * treat this as if we couldn't grab a page
@@ -482,6 +655,14 @@ vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, boolean_t int
                        VM_PAGE_FREE(m);
                vm_fault_cleanup(object, first_m);
 
+               VM_DEBUG_EVENT(vmf_check_zfdelay, VMF_CHECK_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
+
+               delay(throttle_delay);
+
+               if (current_thread_aborted()) {
+                       thread_interrupt_level(interruptible_state);
+                       return VM_FAULT_INTERRUPTED;
+               }
                thread_interrupt_level(interruptible_state);
 
                return (VM_FAULT_MEMORY_SHORTAGE);
@@ -522,9 +703,9 @@ vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill)
        m->cs_validated = FALSE;
        m->cs_tainted = FALSE;
 
-       if (no_zero_fill == TRUE)
-               my_fault = DBG_NZF_PAGE_FAULT;
-       else {
+       if (no_zero_fill == TRUE) {
+               my_fault = DBG_NZF_PAGE_FAULT;
+       else {
                vm_page_zero_fill(m);
 
                VM_STAT_INCR(zero_fill_count);
@@ -534,22 +715,29 @@ vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill)
        assert(m->object != kernel_object);
        //assert(m->pageq.next == NULL && m->pageq.prev == NULL);
 
-       if (!IP_VALID(memory_manager_default) &&
+       if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) &&
                (m->object->purgable == VM_PURGABLE_DENY ||
                 m->object->purgable == VM_PURGABLE_NONVOLATILE ||
                 m->object->purgable == VM_PURGABLE_VOLATILE )) {
-               vm_page_lock_queues();
 
-                queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
-                m->throttled = TRUE;
-                vm_page_throttled_count++;
+               vm_page_lockspin_queues();
+
+               if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default)) {
+                       assert(!VM_PAGE_WIRED(m));
 
-               vm_page_unlock_queues();
-       } else {
-               if (m->object->size > VM_ZF_OBJECT_SIZE_THRESHOLD) {
-                       m->zero_fill = TRUE;
-                       OSAddAtomic(1, (SInt32 *)&vm_zf_count);
+                       /*
+                        * can't be on the pageout queue since we don't
+                        * have a pager to try and clean to
+                        */
+                       assert(!m->pageout_queue);
+
+                       VM_PAGE_QUEUES_REMOVE(m);
+
+                       queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
+                       m->throttled = TRUE;
+                       vm_page_throttled_count++;
                }
+               vm_page_unlock_queues();
        }
        return (my_fault);
 }
@@ -596,7 +784,15 @@ vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill)
  *             be destroyed when this guarantee is no longer required.
  *             The "result_page" is also left busy.  It is not removed
  *             from the pageout queues.
+ *     Special Case:
+ *             A return value of VM_FAULT_SUCCESS_NO_PAGE means that the 
+ *             fault succeeded but there's no VM page (i.e. the VM object
+ *             does not actually hold VM pages, but device memory or
+ *             large pages).  The object is still locked and we still hold a
+ *             paging_in_progress reference.
  */
+unsigned int vm_fault_page_blocked_access = 0;
+unsigned int vm_fault_page_forced_retry = 0;
 
 vm_fault_return_t
 vm_fault_page(
@@ -605,10 +801,11 @@ vm_fault_page(
        vm_object_offset_t first_offset,        /* Offset into object */
        vm_prot_t       fault_type,     /* What access is requested */
        boolean_t       must_be_resident,/* Must page be resident? */
+       boolean_t       caller_lookup,  /* caller looked up page */
        /* Modifies in place: */
        vm_prot_t       *protection,    /* Protection for mapping */
-       /* Returns: */
        vm_page_t       *result_page,   /* Page found, if successful */
+       /* Returns: */
        vm_page_t       *top_page,      /* Page in top object, if
                                         * not result_page.  */
        int             *type_of_fault, /* if non-null, fill in with type of fault
@@ -616,13 +813,9 @@ vm_fault_page(
        /* More arguments: */
        kern_return_t   *error_code,    /* code if page is in error */
        boolean_t       no_zero_fill,   /* don't zero fill absent pages */
-#if MACH_PAGEMAP
        boolean_t       data_supply,    /* treat as data_supply if 
                                         * it is a write fault and a full
                                         * page is provided */
-#else
-       __unused boolean_t data_supply,
-#endif
        vm_object_fault_info_t fault_info)
 {
        vm_page_t               m;
@@ -632,17 +825,23 @@ vm_fault_page(
        vm_object_t             next_object;
        vm_object_t             copy_object;
        boolean_t               look_for_page;
+       boolean_t               force_fault_retry = FALSE;
        vm_prot_t               access_required = fault_type;
        vm_prot_t               wants_copy_flag;
        CLUSTER_STAT(int pages_at_higher_offsets;)
        CLUSTER_STAT(int pages_at_lower_offsets;)
        kern_return_t           wait_result;
        boolean_t               interruptible_state;
+       boolean_t               data_already_requested = FALSE;
+       vm_behavior_t           orig_behavior;
+       vm_size_t               orig_cluster_size;
        vm_fault_return_t       error;
        int                     my_fault;
        uint32_t                try_failed_count;
        int                     interruptible; /* how may fault be interrupted? */
+       int                     external_state = VM_EXTERNAL_STATE_UNKNOWN;
        memory_object_t         pager;
+       vm_fault_return_t       retval;
 
 /*
  * MACH page map - an optional optimization where a bit map is maintained
@@ -671,60 +870,45 @@ vm_fault_page(
  * into a copy object in order to avoid a redundant page out operation.
  */
 #if MACH_PAGEMAP
-#define MUST_ASK_PAGER(o, f) (vm_external_state_get((o)->existence_map, (f)) \
-                       != VM_EXTERNAL_STATE_ABSENT)
-#define PAGED_OUT(o, f) (vm_external_state_get((o)->existence_map, (f)) \
-                       == VM_EXTERNAL_STATE_EXISTS)
-#else
-#define MUST_ASK_PAGER(o, f) (TRUE)
-#define PAGED_OUT(o, f) (FALSE)
-#endif
+#define MUST_ASK_PAGER(o, f, s)                                        \
+       ((vm_external_state_get((o)->existence_map, (f))        \
+         != VM_EXTERNAL_STATE_ABSENT) &&                       \
+        (s = (VM_COMPRESSOR_PAGER_STATE_GET((o), (f))))        \
+        != VM_EXTERNAL_STATE_ABSENT)
+#define PAGED_OUT(o, f)                                                \
+       ((vm_external_state_get((o)->existence_map, (f))        \
+         == VM_EXTERNAL_STATE_EXISTS) ||                       \
+        (VM_COMPRESSOR_PAGER_STATE_GET((o), (f))               \
+         == VM_EXTERNAL_STATE_EXISTS))
+#else /* MACH_PAGEMAP */
+#define MUST_ASK_PAGER(o, f, s)                                        \
+       ((s = VM_COMPRESSOR_PAGER_STATE_GET((o), (f))) != VM_EXTERNAL_STATE_ABSENT)
+#define PAGED_OUT(o, f) \
+       (VM_COMPRESSOR_PAGER_STATE_GET((o), (f)) == VM_EXTERNAL_STATE_EXISTS)
+#endif /* MACH_PAGEMAP */
 
 /*
  *     Recovery actions
  */
-#define PREPARE_RELEASE_PAGE(m)                                \
-       MACRO_BEGIN                                     \
-       vm_page_lock_queues();                          \
-       MACRO_END
-
-#define DO_RELEASE_PAGE(m)                             \
-       MACRO_BEGIN                                     \
-       PAGE_WAKEUP_DONE(m);                            \
-       if (!m->active && !m->inactive && !m->throttled)\
-               vm_page_activate(m);                    \
-       vm_page_unlock_queues();                        \
-       MACRO_END
-
 #define RELEASE_PAGE(m)                                        \
        MACRO_BEGIN                                     \
-       PREPARE_RELEASE_PAGE(m);                        \
-       DO_RELEASE_PAGE(m);                             \
+       PAGE_WAKEUP_DONE(m);                            \
+       if (!m->active && !m->inactive && !m->throttled) {              \
+               vm_page_lockspin_queues();                              \
+               if (!m->active && !m->inactive && !m->throttled) {      \
+                       if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE)   \
+                                vm_page_deactivate(m);                  \
+                        else                                           \
+                               vm_page_activate(m);                    \
+               }                                                       \
+               vm_page_unlock_queues();                                \
+       }                                                               \
        MACRO_END
 
 #if TRACEFAULTPAGE
        dbgTrace(0xBEEF0002, (unsigned int) first_object, (unsigned int) first_offset); /* (TEST/DEBUG) */
 #endif
 
-
-#if    MACH_KDB
-               /*
-                *      If there are watchpoints set, then
-                *      we don't want to give away write permission
-                *      on a read fault.  Make the task write fault,
-                *      so that the watchpoint code notices the access.
-                */
-           if (db_watchpoint_list) {
-               /*
-                *      If we aren't asking for write permission,
-                *      then don't give it away.  We're using write
-                *      faults to set the dirty bit.
-                */
-               if (!(fault_type & VM_PROT_WRITE))
-                       *protection &= ~VM_PROT_WRITE;
-       }
-#endif /* MACH_KDB */
-
        interruptible = fault_info->interruptible;
        interruptible_state = thread_interrupt_level(interruptible);
  
@@ -762,7 +946,7 @@ vm_fault_page(
 
        XPR(XPR_VM_FAULT,
                "vm_f_page: obj 0x%X, offset 0x%X, type %d, prot %d\n",
-               (integer_t)object, offset, fault_type, *protection, 0);
+               object, offset, fault_type, *protection, 0);
 
        /*
         * default type of fault
@@ -784,10 +968,53 @@ vm_fault_page(
                        return (VM_FAULT_MEMORY_ERROR);
                }
 
+               if (!object->pager_created && object->phys_contiguous) {
+                       /*
+                        * A physically-contiguous object without a pager:
+                        * must be a "large page" object.  We do not deal
+                        * with VM pages for this object.
+                        */
+                       caller_lookup = FALSE;
+                       m = VM_PAGE_NULL;
+                       goto phys_contig_object;
+               }
+
+               if (object->blocked_access) {
+                       /*
+                        * Access to this VM object has been blocked.
+                        * Replace our "paging_in_progress" reference with
+                        * a "activity_in_progress" reference and wait for
+                        * access to be unblocked.
+                        */
+                       caller_lookup = FALSE; /* no longer valid after sleep */
+                       vm_object_activity_begin(object);
+                       vm_object_paging_end(object);
+                       while (object->blocked_access) {
+                               vm_object_sleep(object,
+                                               VM_OBJECT_EVENT_UNBLOCKED,
+                                               THREAD_UNINT);
+                       }
+                       vm_fault_page_blocked_access++;
+                       vm_object_paging_begin(object);
+                       vm_object_activity_end(object);
+               }
+
                /*
                 * See whether the page at 'offset' is resident
                 */
-               m = vm_page_lookup(object, offset);
+               if (caller_lookup == TRUE) {
+                       /*
+                        * The caller has already looked up the page
+                        * and gave us the result in "result_page".
+                        * We can use this for the first lookup but
+                        * it loses its validity as soon as we unlock
+                        * the object.
+                        */
+                       m = *result_page;
+                       caller_lookup = FALSE; /* no longer valid after that */
+               } else {
+                       m = vm_page_lookup(object, offset);
+               }
 #if TRACEFAULTPAGE
                dbgTrace(0xBEEF0004, (unsigned int) m, (unsigned int) object);  /* (TEST/DEBUG) */
 #endif
@@ -797,20 +1024,16 @@ vm_fault_page(
                                /*
                                 * The page is being brought in,
                                 * wait for it and then retry.
-                                *
-                                * A possible optimization: if the page
-                                * is known to be resident, we can ignore
-                                * pages that are absent (regardless of
-                                * whether they're busy).
                                 */
 #if TRACEFAULTPAGE
                                dbgTrace(0xBEEF0005, (unsigned int) m, (unsigned int) 0);       /* (TEST/DEBUG) */
 #endif
                                wait_result = PAGE_SLEEP(object, m, interruptible);
+
                                XPR(XPR_VM_FAULT,
                                    "vm_f_page: block busy obj 0x%X, offset 0x%X, page 0x%X\n",
-                                       (integer_t)object, offset,
-                                       (integer_t)m, 0, 0);
+                                   object, offset,
+                                   m, 0, 0);
                                counter(c_vm_fault_page_block_busy_kernel++);
 
                                if (wait_result != THREAD_AWAKENED) {
@@ -818,13 +1041,18 @@ vm_fault_page(
                                        thread_interrupt_level(interruptible_state);
 
                                        if (wait_result == THREAD_RESTART)
-                                               return (VM_FAULT_RETRY);
+                                               return (VM_FAULT_RETRY);
                                        else
                                                return (VM_FAULT_INTERRUPTED);
                                }
                                continue;
                        }
+                       if (m->laundry) {
+                               m->pageout = FALSE;
 
+                               if (!m->cleaning) 
+                                       vm_pageout_steal_laundry(m, FALSE);
+                       }
                        if (m->phys_page == vm_page_guard_addr) {
                                /*
                                 * Guard page: off limits !
@@ -842,6 +1070,7 @@ vm_fault_page(
                                        *top_page = first_m;
                                        if (type_of_fault)
                                                *type_of_fault = DBG_GUARD_FAULT;
+                                       thread_interrupt_level(interruptible_state);
                                        return VM_FAULT_SUCCESS;
                                } else {
                                        /*
@@ -924,9 +1153,9 @@ vm_fault_page(
 
                                        XPR(XPR_VM_FAULT,
                                            "vm_f_page: zero obj 0x%X, off 0x%X, page 0x%X, first_obj 0x%X\n",
-                                               (integer_t)object, offset,
-                                               (integer_t)m,
-                                               (integer_t)first_object, 0);
+                                               object, offset,
+                                               m,
+                                               first_object, 0);
 
                                        if (object != first_object) {
                                                /*
@@ -966,6 +1195,9 @@ vm_fault_page(
                                         */
                                        my_fault = vm_fault_zero_page(m, no_zero_fill);
 
+                                       if (fault_info->mark_zf_absent && no_zero_fill == TRUE)
+                                               m->absent = TRUE;
+
                                        break;
                                } else {
                                        if (must_be_resident)
@@ -979,18 +1211,21 @@ vm_fault_page(
                                                m->busy = TRUE;
 
                                                vm_page_lockspin_queues();
+
+                                               assert(!m->pageout_queue);
                                                VM_PAGE_QUEUES_REMOVE(m);
+
                                                vm_page_unlock_queues();
                                        }
                                        XPR(XPR_VM_FAULT,
                                            "vm_f_page: unavail obj 0x%X, off 0x%X, next_obj 0x%X, newoff 0x%X\n",
-                                               (integer_t)object, offset,
-                                               (integer_t)next_object,
-                                               offset+object->shadow_offset,0);
+                                               object, offset,
+                                               next_object,
+                                               offset+object->vo_shadow_offset,0);
 
-                                       offset += object->shadow_offset;
-                                       fault_info->lo_offset += object->shadow_offset;
-                                       fault_info->hi_offset += object->shadow_offset;
+                                       offset += object->vo_shadow_offset;
+                                       fault_info->lo_offset += object->vo_shadow_offset;
+                                       fault_info->hi_offset += object->vo_shadow_offset;
                                        access_required = VM_PROT_READ;
 
                                        vm_object_lock(next_object);
@@ -1024,8 +1259,8 @@ vm_fault_page(
 #endif
                                XPR(XPR_VM_FAULT,
                                    "vm_f_page: cleaning obj 0x%X, offset 0x%X, page 0x%X\n",
-                                       (integer_t)object, offset,
-                                       (integer_t)m, 0, 0);
+                                       object, offset,
+                                       m, 0, 0);
                                /*
                                 * take an extra ref so that object won't die
                                 */
@@ -1056,7 +1291,8 @@ vm_fault_page(
                                        return (VM_FAULT_RETRY);
                                }
                        }
-                       if (type_of_fault == NULL && m->speculative) {
+                       if (type_of_fault == NULL && m->speculative &&
+                           !(fault_info != NULL && fault_info->stealth)) {
                                /*
                                 * If we were passed a non-NULL pointer for
                                 * "type_of_fault", than we came from
@@ -1067,9 +1303,14 @@ vm_fault_page(
                                 * take it off the speculative queue, we'll
                                 * let the caller of vm_fault_page deal
                                 * with getting it onto the correct queue
+                                *
+                                * If the caller specified in fault_info that
+                                * it wants a "stealth" fault, we also leave
+                                * the page in the speculative queue.
                                 */
                                vm_page_lockspin_queues();
-                               VM_PAGE_QUEUES_REMOVE(m);
+                               if (m->speculative)
+                                       VM_PAGE_QUEUES_REMOVE(m);
                                vm_page_unlock_queues();
                        }
 
@@ -1121,7 +1362,7 @@ vm_fault_page(
 #endif
                        XPR(XPR_VM_FAULT,
                            "vm_f_page: found page obj 0x%X, offset 0x%X, page 0x%X\n",
-                               (integer_t)object, offset, (integer_t)m, 0, 0);
+                               object, offset, m, 0, 0);
                        assert(!m->busy);
                        assert(!m->absent);
 
@@ -1137,14 +1378,37 @@ vm_fault_page(
                 * this object can provide the data or we're the top object...
                 * object is locked;  m == NULL
                 */
-               look_for_page = (object->pager_created && (MUST_ASK_PAGER(object, offset) == TRUE) && !data_supply);
+               if (must_be_resident) {
+                       if (fault_type == VM_PROT_NONE &&
+                           object == kernel_object) {
+                               /*
+                                * We've been called from vm_fault_unwire()
+                                * while removing a map entry that was allocated
+                                * with KMA_KOBJECT and KMA_VAONLY.  This page
+                                * is not present and there's nothing more to
+                                * do here (nothing to unwire).
+                                */
+                               vm_fault_cleanup(object, first_m);
+                               thread_interrupt_level(interruptible_state);
+
+                               return VM_FAULT_MEMORY_ERROR;
+                       }
+
+                       goto dont_look_for_page;
+               }
+
+#if !MACH_PAGEMAP
+               data_supply = FALSE;
+#endif /* !MACH_PAGEMAP */
+
+               look_for_page = (object->pager_created && (MUST_ASK_PAGER(object, offset, external_state) == TRUE) && !data_supply);
                
 #if TRACEFAULTPAGE
                dbgTrace(0xBEEF000C, (unsigned int) look_for_page, (unsigned int) object);      /* (TEST/DEBUG) */
 #endif
-               if ((look_for_page || (object == first_object)) && !must_be_resident && !object->phys_contiguous) {
+               if (!look_for_page && object == first_object && !object->phys_contiguous) {
                        /*
-                        * Allocate a new page for this object/offset pair
+                        * Allocate a new page for this object/offset pair as a placeholder
                         */
                        m = vm_page_grab();
 #if TRACEFAULTPAGE
@@ -1157,10 +1421,16 @@ vm_fault_page(
 
                                return (VM_FAULT_MEMORY_SHORTAGE);
                        }
-                       vm_page_insert(m, object, offset);
+
+                       if (fault_info && fault_info->batch_pmap_op == TRUE) {
+                               vm_page_insert_internal(m, object, offset, FALSE, TRUE, TRUE);
+                       } else {
+                               vm_page_insert(m, object, offset);
+                       }
                }
-               if (look_for_page && !must_be_resident) {
+               if (look_for_page) {
                        kern_return_t   rc;
+                       int             my_fault_type;
 
                        /*
                         *      If the memory manager is not ready, we
@@ -1175,7 +1445,7 @@ vm_fault_page(
 
                                XPR(XPR_VM_FAULT,
                                "vm_f_page: ready wait obj 0x%X, offset 0x%X\n",
-                                       (integer_t)object, offset, 0, 0, 0);
+                                       object, offset, 0, 0, 0);
 
                                /*
                                 * take an extra ref so object won't die
@@ -1227,8 +1497,8 @@ vm_fault_page(
                                vm_object_lock(object);
                                assert(object->ref_count > 0);
 
-                               if (object->paging_in_progress > vm_object_pagein_throttle) {
-                                       vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGING_IN_PROGRESS, interruptible);
+                               if (object->paging_in_progress >= vm_object_pagein_throttle) {
+                                       vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS, interruptible);
 
                                        vm_object_unlock(object);
                                        wait_result = thread_block(THREAD_CONTINUE_NULL);
@@ -1243,13 +1513,81 @@ vm_fault_page(
                                        return (VM_FAULT_RETRY);
                                }
                        }
-                       if (m != VM_PAGE_NULL) {
-                               /*
-                                * Indicate that the page is waiting for data
-                                * from the memory manager.
-                                */
-                               m->list_req_pending = TRUE;
+                       if ((COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) && object->internal) {
+
+                               if (m == VM_PAGE_NULL) {
+                                       /*
+                                        * Allocate a new page for this object/offset pair as a placeholder
+                                        */
+                                       m = vm_page_grab();
+#if TRACEFAULTPAGE
+                                       dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object);  /* (TEST/DEBUG) */
+#endif
+                                       if (m == VM_PAGE_NULL) {
+
+                                               vm_fault_cleanup(object, first_m);
+                                               thread_interrupt_level(interruptible_state);
+
+                                               return (VM_FAULT_MEMORY_SHORTAGE);
+                                       }
+
+                                       m->absent = TRUE;
+                                       if (fault_info && fault_info->batch_pmap_op == TRUE) {
+                                               vm_page_insert_internal(m, object, offset, FALSE, TRUE, TRUE);
+                                       } else {
+                                               vm_page_insert(m, object, offset);
+                                       }
+                               }
+                               assert(m->busy);
+                                       
                                m->absent = TRUE;
+                               pager = object->pager;
+
+                               vm_object_unlock(object);
+
+                               rc = vm_compressor_pager_get(pager, offset + object->paging_offset, m->phys_page, &my_fault_type, 0);
+
+                               vm_object_lock(object);
+
+                               switch (rc) {
+                               case KERN_SUCCESS:
+                                       m->absent = FALSE;
+                                       m->dirty = TRUE;
+                                       if ((m->object->wimg_bits &
+                                            VM_WIMG_MASK) !=
+                                           VM_WIMG_USE_DEFAULT) {
+                                               /*
+                                                * If the page is not cacheable,
+                                                * we can't let its contents
+                                                * linger in the data cache
+                                                * after the decompression.
+                                                */
+                                               pmap_sync_page_attributes_phys(
+                                                       m->phys_page);
+                                       } else
+                                               m->written_by_kernel = TRUE;
+                                       break;
+                               case KERN_MEMORY_FAILURE:
+                                       m->unusual = TRUE;
+                                       m->error = TRUE;
+                                       m->absent = FALSE;
+                                       break;
+                               case KERN_MEMORY_ERROR:
+                                       assert(m->absent);
+                                       break;
+                               default:
+                                       panic("?");
+                               }
+                               PAGE_WAKEUP_DONE(m);
+
+                               rc = KERN_SUCCESS;
+                               goto data_requested;
+                       }
+                       my_fault_type = DBG_PAGEIN_FAULT;
+               
+                       if (m != VM_PAGE_NULL) {
+                               VM_PAGE_FREE(m);
+                               m = VM_PAGE_NULL;
                        }
 
 #if TRACEFAULTPAGE
@@ -1295,9 +1633,48 @@ vm_fault_page(
 
                        XPR(XPR_VM_FAULT,
                            "vm_f_page: data_req obj 0x%X, offset 0x%X, page 0x%X, acc %d\n",
-                               (integer_t)object, offset, (integer_t)m,
+                               object, offset, m,
                                access_required | wants_copy_flag, 0);
 
+                       if (object->copy == first_object) {
+                               /*
+                                * if we issue the memory_object_data_request in
+                                * this state, we are subject to a deadlock with
+                                * the underlying filesystem if it is trying to
+                                * shrink the file resulting in a push of pages
+                                * into the copy object...  that push will stall
+                                * on the placeholder page, and if the pushing thread
+                                * is holding a lock that is required on the pagein
+                                * path (such as a truncate lock), we'll deadlock...
+                                * to avoid this potential deadlock, we throw away
+                                * our placeholder page before calling memory_object_data_request
+                                * and force this thread to retry the vm_fault_page after
+                                * we have issued the I/O.  the second time through this path
+                                * we will find the page already in the cache (presumably still
+                                * busy waiting for the I/O to complete) and then complete
+                                * the fault w/o having to go through memory_object_data_request again
+                                */
+                               assert(first_m != VM_PAGE_NULL);
+                               assert(first_m->object == first_object);
+                                       
+                               vm_object_lock(first_object);
+                               VM_PAGE_FREE(first_m);
+                               vm_object_paging_end(first_object);
+                               vm_object_unlock(first_object);
+
+                               first_m = VM_PAGE_NULL;
+                               force_fault_retry = TRUE;
+
+                               vm_fault_page_forced_retry++;
+                       }
+
+                       if (data_already_requested == TRUE) {
+                               orig_behavior = fault_info->behavior;
+                               orig_cluster_size = fault_info->cluster_size;
+
+                               fault_info->behavior = VM_BEHAVIOR_RANDOM;
+                               fault_info->cluster_size = PAGE_SIZE;
+                       }
                        /*
                         * Call the memory manager to retrieve the data.
                         */
@@ -1308,11 +1685,18 @@ vm_fault_page(
                                access_required | wants_copy_flag,
                                (memory_object_fault_info_t)fault_info);
 
+                       if (data_already_requested == TRUE) {
+                               fault_info->behavior = orig_behavior;
+                               fault_info->cluster_size = orig_cluster_size;
+                       } else
+                               data_already_requested = TRUE;
+
 #if TRACEFAULTPAGE
                        dbgTrace(0xBEEF0013, (unsigned int) object, (unsigned int) rc); /* (TEST/DEBUG) */
 #endif
                        vm_object_lock(object);
 
+               data_requested:
                        if (rc != KERN_SUCCESS) {
 
                                vm_fault_cleanup(object, first_m);
@@ -1321,14 +1705,30 @@ vm_fault_page(
                                return ((rc == MACH_SEND_INTERRUPTED) ?
                                        VM_FAULT_INTERRUPTED :
                                        VM_FAULT_MEMORY_ERROR);
+                       } else {
+                               clock_sec_t     tv_sec;
+                               clock_usec_t    tv_usec;
+
+                               if (my_fault_type == DBG_PAGEIN_FAULT) {
+                                       clock_get_system_microtime(&tv_sec, &tv_usec);
+                                       current_thread()->t_page_creation_time = tv_sec;
+                                       current_thread()->t_page_creation_count = 0;
+                               }
                        }
-                       if ((interruptible != THREAD_UNINT) && (current_thread()->sched_mode & TH_MODE_ABORT)) {
+                       if ((interruptible != THREAD_UNINT) && (current_thread()->sched_flags & TH_SFLAG_ABORT)) {
 
                                vm_fault_cleanup(object, first_m);
                                thread_interrupt_level(interruptible_state);
 
                                return (VM_FAULT_INTERRUPTED);
                        }
+                       if (force_fault_retry == TRUE) {
+
+                               vm_fault_cleanup(object, first_m);
+                               thread_interrupt_level(interruptible_state);
+
+                               return (VM_FAULT_RETRY);
+                       }
                        if (m == VM_PAGE_NULL && object->phys_contiguous) {
                                /*
                                 * No page here means that the object we
@@ -1343,14 +1743,15 @@ vm_fault_page(
                                 * page fault against the object's new backing
                                 * store (different memory object).
                                 */
-                               break;
+                       phys_contig_object:
+                               goto done;
                        }
                        /*
                         * potentially a pagein fault
                         * if we make it through the state checks
                         * above, than we'll count it as such
                         */
-                       my_fault = DBG_PAGEIN_FAULT;
+                       my_fault = my_fault_type;
 
                        /*
                         * Retry with same object/offset, since new data may
@@ -1359,7 +1760,7 @@ vm_fault_page(
                         */
                        continue;
                }
-
+dont_look_for_page:
                /*
                 * We get here if the object has no pager, or an existence map 
                 * exists and indicates the page isn't present on the pager
@@ -1377,8 +1778,8 @@ vm_fault_page(
 
                XPR(XPR_VM_FAULT,
                    "vm_f_page: no pager obj 0x%X, offset 0x%X, page 0x%X, next_obj 0x%X\n",
-                       (integer_t)object, offset, (integer_t)m,
-                       (integer_t)object->shadow, 0);
+                       object, offset, m,
+                       object->shadow, 0);
 
                next_object = object->shadow;
 
@@ -1426,6 +1827,8 @@ vm_fault_page(
                        }
                        my_fault = vm_fault_zero_page(m, no_zero_fill);
 
+                       if (fault_info->mark_zf_absent && no_zero_fill == TRUE)
+                               m->absent = TRUE;
                        break;
 
                } else {
@@ -1436,9 +1839,9 @@ vm_fault_page(
                        if ((object != first_object) || must_be_resident)
                                vm_object_paging_end(object);
 
-                       offset += object->shadow_offset;
-                       fault_info->lo_offset += object->shadow_offset;
-                       fault_info->hi_offset += object->shadow_offset;
+                       offset += object->vo_shadow_offset;
+                       fault_info->lo_offset += object->vo_shadow_offset;
+                       fault_info->hi_offset += object->vo_shadow_offset;
                        access_required = VM_PROT_READ;
 
                        vm_object_lock(next_object);
@@ -1471,12 +1874,10 @@ vm_fault_page(
        dbgTrace(0xBEEF0015, (unsigned int) object, (unsigned int) m);  /* (TEST/DEBUG) */
 #endif
 #if    EXTRA_ASSERTIONS
-       if (m != VM_PAGE_NULL) {
-               assert(m->busy && !m->absent);
-               assert((first_m == VM_PAGE_NULL) ||
-                       (first_m->busy && !first_m->absent &&
-                        !first_m->active && !first_m->inactive));
-       }
+       assert(m->busy && !m->absent);
+       assert((first_m == VM_PAGE_NULL) ||
+              (first_m->busy && !first_m->absent &&
+               !first_m->active && !first_m->inactive));
 #endif /* EXTRA_ASSERTIONS */
 
        /*
@@ -1484,14 +1885,12 @@ vm_fault_page(
         * If we found a page, we must have decrypted it before we
         * get here...
         */
-       if (m != VM_PAGE_NULL) {
-               ASSERT_PAGE_DECRYPTED(m);
-       }
+       ASSERT_PAGE_DECRYPTED(m);
 
        XPR(XPR_VM_FAULT,
            "vm_f_page: FOUND obj 0x%X, off 0x%X, page 0x%X, 1_obj 0x%X, 1_m 0x%X\n",
-               (integer_t)object, offset, (integer_t)m,
-               (integer_t)first_object, (integer_t)first_m);
+               object, offset, m,
+               first_object, first_m);
 
        /*
         * If the page is being written, but isn't
@@ -1499,7 +1898,7 @@ vm_fault_page(
         * we have to copy it into a new page owned
         * by the top-level object.
         */
-       if ((object != first_object) && (m != VM_PAGE_NULL)) {
+       if (object != first_object) {
 
 #if TRACEFAULTPAGE
                dbgTrace(0xBEEF0016, (unsigned int) object, (unsigned int) fault_type); /* (TEST/DEBUG) */
@@ -1563,8 +1962,8 @@ vm_fault_page(
                        }
                        XPR(XPR_VM_FAULT,
                            "vm_f_page: page_copy obj 0x%X, offset 0x%X, m 0x%X, copy_m 0x%X\n",
-                               (integer_t)object, offset,
-                               (integer_t)m, (integer_t)copy_m, 0);
+                               object, offset,
+                               m, copy_m, 0);
 
                        vm_page_copy(m, copy_m);
 
@@ -1588,7 +1987,8 @@ vm_fault_page(
                        /*
                         * We no longer need the old page or object.
                         */
-                       PAGE_WAKEUP_DONE(m);
+                       RELEASE_PAGE(m);
+
                        vm_object_paging_end(object);
                        vm_object_unlock(object);
 
@@ -1614,7 +2014,7 @@ vm_fault_page(
                         */
                        assert(copy_m->busy);
                        vm_page_insert(copy_m, object, offset);
-                       copy_m->dirty = TRUE;
+                       SET_PAGE_DIRTY(copy_m, TRUE);
 
                        m = copy_m;
                        /*
@@ -1639,7 +2039,7 @@ vm_fault_page(
         */
        try_failed_count = 0;
 
-       while ((copy_object = first_object->copy) != VM_OBJECT_NULL && (m != VM_PAGE_NULL)) {
+       while ((copy_object = first_object->copy) != VM_OBJECT_NULL) {
                vm_object_offset_t      copy_offset;
                vm_page_t               copy_m;
 
@@ -1687,9 +2087,9 @@ vm_fault_page(
                /*
                 * Does the page exist in the copy?
                 */
-               copy_offset = first_offset - copy_object->shadow_offset;
+               copy_offset = first_offset - copy_object->vo_shadow_offset;
 
-               if (copy_object->size <= copy_offset)
+               if (copy_object->vo_size <= copy_offset)
                        /*
                         * Copy object doesn't cover this page -- do nothing.
                         */
@@ -1819,6 +2219,7 @@ vm_fault_page(
 #if MACH_PAGEMAP
                            || vm_external_state_get(copy_object->existence_map, copy_offset) == VM_EXTERNAL_STATE_ABSENT
 #endif
+                           || VM_COMPRESSOR_PAGER_STATE_GET(copy_object, copy_offset) == VM_EXTERNAL_STATE_ABSENT
                            ) {
 
                                vm_page_lockspin_queues();
@@ -1826,17 +2227,81 @@ vm_fault_page(
                                vm_page_activate(copy_m);
                                vm_page_unlock_queues();
 
-                               copy_m->dirty = TRUE;
+                               SET_PAGE_DIRTY(copy_m, TRUE);
                                PAGE_WAKEUP_DONE(copy_m);
-                       } 
-                       else {
+
+                       } else if (copy_object->internal &&
+                                  (DEFAULT_PAGER_IS_ACTIVE || DEFAULT_FREEZER_IS_ACTIVE)) {
+                               /*
+                                * For internal objects check with the pager to see
+                                * if the page already exists in the backing store.
+                                * If yes, then we can drop the copy page. If not,
+                                * then we'll activate it, mark it dirty and keep it
+                                * around.
+                                */
+                               
+                               kern_return_t kr = KERN_SUCCESS;
+
+                               memory_object_t copy_pager = copy_object->pager;
+                               assert(copy_pager != MEMORY_OBJECT_NULL);
+                               vm_object_paging_begin(copy_object);
+
+                               vm_object_unlock(copy_object);
+
+                               kr = memory_object_data_request(
+                                       copy_pager,
+                                       copy_offset + copy_object->paging_offset,
+                                       0, /* Only query the pager. */
+                                       VM_PROT_READ,
+                                       NULL);
+                               
+                               vm_object_lock(copy_object);
+
+                               vm_object_paging_end(copy_object);
+
+                               /*
+                                * Since we dropped the copy_object's lock,
+                                * check whether we'll have to deallocate 
+                                * the hard way.
+                                */
+                               if ((copy_object->shadow != object) || (copy_object->ref_count == 1)) {
+                                       vm_object_unlock(copy_object);
+                                       vm_object_deallocate(copy_object);
+                                       vm_object_lock(object);
+
+                                       continue;
+                               }
+                               if (kr == KERN_SUCCESS) {
+                                       /*
+                                        * The pager has the page. We don't want to overwrite
+                                        * that page by sending this one out to the backing store.
+                                        * So we drop the copy page.
+                                        */
+                                       VM_PAGE_FREE(copy_m);
+
+                               } else {
+                                       /*
+                                        * The pager doesn't have the page. We'll keep this one
+                                        * around in the copy object. It might get sent out to 
+                                        * the backing store under memory pressure.      
+                                        */
+                                       vm_page_lockspin_queues();
+                                       assert(!m->cleaning);
+                                       vm_page_activate(copy_m);
+                                       vm_page_unlock_queues();
+
+                                       SET_PAGE_DIRTY(copy_m, TRUE);
+                                       PAGE_WAKEUP_DONE(copy_m);
+                               } 
+                       } else {
+                               
                                assert(copy_m->busy == TRUE);
                                assert(!m->cleaning);
 
                                /*
                                 * dirty is protected by the object lock
                                 */
-                               copy_m->dirty = TRUE;
+                               SET_PAGE_DIRTY(copy_m, TRUE);
 
                                /*
                                 * The page is already ready for pageout:
@@ -1874,6 +2339,7 @@ vm_fault_page(
                                 */
                                vm_object_lock(object);
                        }
+
                        /*
                         * Because we're pushing a page upward
                         * in the object tree, we must restart
@@ -1904,25 +2370,31 @@ vm_fault_page(
 
                break;
        }
+
+done:
        *result_page = m;
        *top_page = first_m;
 
        XPR(XPR_VM_FAULT,
                "vm_f_page: DONE obj 0x%X, offset 0x%X, m 0x%X, first_m 0x%X\n",
-               (integer_t)object, offset, (integer_t)m, (integer_t)first_m, 0);
+               object, offset, m, first_m, 0);
 
        if (m != VM_PAGE_NULL) {
+               retval = VM_FAULT_SUCCESS;
                if (my_fault == DBG_PAGEIN_FAULT) {
 
-                       VM_STAT_INCR(pageins);
+                       if (!m->object->internal || (DEFAULT_PAGER_IS_ACTIVE || DEFAULT_FREEZER_IS_ACTIVE))
+                               VM_STAT_INCR(pageins);
                        DTRACE_VM2(pgin, int, 1, (uint64_t *), NULL);
                        DTRACE_VM2(maj_fault, int, 1, (uint64_t *), NULL);
                        current_task()->pageins++;
 
                        if (m->object->internal) {
                                DTRACE_VM2(anonpgin, int, 1, (uint64_t *), NULL);
+                               my_fault = DBG_PAGEIND_FAULT;
                        } else {
                                DTRACE_VM2(fspgin, int, 1, (uint64_t *), NULL);
+                               my_fault = DBG_PAGEINV_FAULT;
                        }
 
                        /*
@@ -1933,18 +2405,24 @@ vm_fault_page(
                        vm_fault_is_sequential(object, offset, fault_info->behavior);
 
                        vm_fault_deactivate_behind(object, offset, fault_info->behavior);
+               } else if (my_fault == DBG_COMPRESSOR_FAULT || my_fault == DBG_COMPRESSOR_SWAPIN_FAULT) {
+
+                       VM_STAT_INCR(decompressions);
                }
                if (type_of_fault)
                        *type_of_fault = my_fault;
-       } else
-               vm_object_unlock(object);
+       } else {
+               retval = VM_FAULT_SUCCESS_NO_VM_PAGE;
+               assert(first_m == VM_PAGE_NULL);
+               assert(object == first_object);
+       }
 
        thread_interrupt_level(interruptible_state);
 
 #if TRACEFAULTPAGE
        dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0);       /* (TEST/DEBUG) */
 #endif
-       return (VM_FAULT_SUCCESS);
+       return retval;
 
 backoff:
        thread_interrupt_level(interruptible_state);
@@ -1958,6 +2436,21 @@ backoff:
 
 
 
+/*
+ * CODE SIGNING:
+ * When soft faulting a page, we have to validate the page if:
+ * 1. the page is being mapped in user space
+ * 2. the page hasn't already been found to be "tainted"
+ * 3. the page belongs to a code-signed object
+ * 4. the page has not been validated yet or has been mapped for write.
+ */
+#define VM_FAULT_NEED_CS_VALIDATION(pmap, page)                                \
+       ((pmap) != kernel_pmap /*1*/ &&                                 \
+        !(page)->cs_tainted /*2*/ &&                                   \
+        (page)->object->code_signed /*3*/ &&                           \
+        (!(page)->cs_validated || (page)->wpmapped /*4*/))
+
+
 /*
  * page queue lock must NOT be held
  * m->object must be locked
@@ -1967,6 +2460,8 @@ backoff:
  * careful not to modify the VM object in any way that is not
  * legal under a shared lock...
  */
+extern int proc_selfpid(void);
+extern char *proc_name_address(void *p);
 unsigned long cs_enter_tainted_rejected = 0;
 unsigned long cs_enter_tainted_accepted = 0;
 kern_return_t
@@ -1974,18 +2469,23 @@ vm_fault_enter(vm_page_t m,
               pmap_t pmap,
               vm_map_offset_t vaddr,
               vm_prot_t prot,
+              vm_prot_t fault_type,
               boolean_t wired,
               boolean_t change_wiring,
               boolean_t no_cache,
+              boolean_t cs_bypass,
+              boolean_t *need_retry,
               int *type_of_fault)
 {
-       unsigned int    cache_attr;
-       kern_return_t   kr;
+       kern_return_t   kr, pe_result;
        boolean_t       previously_pmapped = m->pmapped;
-
+       boolean_t       must_disconnect = 0;
+       boolean_t       map_is_switched, map_is_switch_protected;
+       int             cs_enforcement_enabled;
+       
        vm_object_lock_assert_held(m->object);
 #if DEBUG
-       mutex_assert(&vm_page_queue_lock, MA_NOTOWNED);
+       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
 #endif /* DEBUG */
 
        if (m->phys_page == vm_page_guard_addr) {
@@ -1993,37 +2493,25 @@ vm_fault_enter(vm_page_t m,
                return KERN_SUCCESS;
        }
 
-        cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
+       if (*type_of_fault == DBG_ZERO_FILL_FAULT) {
 
-       if (m->object->code_signed && pmap != kernel_pmap &&
-           (!m->cs_validated || m->wpmapped)) {
                vm_object_lock_assert_exclusive(m->object);
 
-               if (m->cs_validated && m->wpmapped) {
-                       vm_cs_revalidates++;
-               }
-
+       } else if ((fault_type & VM_PROT_WRITE) == 0) {
                /*
-                * CODE SIGNING:
-                * This page comes from a VM object backed by a signed
-                * memory object.  We are about to enter it into a process
-                * address space, so we need to validate its signature.
+                * This is not a "write" fault, so we
+                * might not have taken the object lock
+                * exclusively and we might not be able
+                * to update the "wpmapped" bit in
+                * vm_fault_enter().
+                * Let's just grant read access to
+                * the page for now and we'll
+                * soft-fault again if we need write
+                * access later...
                 */
-               /* VM map is locked, so 1 ref will remain on VM object */
-               vm_page_validate_cs(m);
-       }
-
+               prot &= ~VM_PROT_WRITE;
+       }       
        if (m->pmapped == FALSE) {
-               /*
-                * This is the first time this page is being
-                * mapped in an address space (pmapped == FALSE).
-                *
-                * Part of that page may still be in the data cache
-                * and not flushed to memory.  In case we end up
-                * accessing that page via the instruction cache,
-                * we need to ensure that the 2 caches are in sync.
-                */
-               pmap_sync_page_data_phys(m->phys_page);
 
                if ((*type_of_fault == DBG_CACHE_HIT_FAULT) && m->clustered) {
                        /*
@@ -2037,18 +2525,17 @@ vm_fault_enter(vm_page_t m,
 
                        if (m->object->internal) {
                                DTRACE_VM2(anonpgin, int, 1, (uint64_t *), NULL);
+                               *type_of_fault = DBG_PAGEIND_FAULT;
                        } else {
                                DTRACE_VM2(fspgin, int, 1, (uint64_t *), NULL);
+                               *type_of_fault = DBG_PAGEINV_FAULT;
                        }
 
                        current_task()->pageins++;
-
-                       *type_of_fault = DBG_PAGEIN_FAULT;
                }
                VM_PAGE_CONSUME_CLUSTERED(m);
 
-       } else if (cache_attr != VM_WIMG_DEFAULT)
-               pmap_sync_page_attributes_phys(m->phys_page);
+       }
 
        if (*type_of_fault != DBG_COW_FAULT) {
                DTRACE_VM2(as_fault, int, 1, (uint64_t *), NULL);
@@ -2058,108 +2545,549 @@ vm_fault_enter(vm_page_t m,
                }
        }
 
-       if (m->cs_tainted) {
-               /*
-                * CODE SIGNING:
-                * This page has been tainted and can not be trusted.
-                * Let's notify the current process and let it take any
-                * necessary precautions before we enter the tainted page
-                * into its address space.
-                */
-               if (cs_invalid_page()) {
+       /* Validate code signature if necessary. */
+       if (VM_FAULT_NEED_CS_VALIDATION(pmap, m)) {
+               vm_object_lock_assert_exclusive(m->object);
+
+               if (m->cs_validated) {
+                       vm_cs_revalidates++;
+               }
+
+               /* VM map is locked, so 1 ref will remain on VM object - 
+                * so no harm if vm_page_validate_cs drops the object lock */
+               vm_page_validate_cs(m);
+       }
+
+#define page_immutable(m,prot) ((m)->cs_validated /*&& ((prot) & VM_PROT_EXECUTE)*/)
+
+       map_is_switched = ((pmap != vm_map_pmap(current_task()->map)) &&
+                          (pmap == vm_map_pmap(current_thread()->map)));
+       map_is_switch_protected = current_thread()->map->switch_protect;
+       
+       /* If the map is switched, and is switch-protected, we must protect
+        * some pages from being write-faulted: immutable pages because by 
+        * definition they may not be written, and executable pages because that
+        * would provide a way to inject unsigned code.
+        * If the page is immutable, we can simply return. However, we can't
+        * immediately determine whether a page is executable anywhere. But,
+        * we can disconnect it everywhere and remove the executable protection
+        * from the current map. We do that below right before we do the 
+        * PMAP_ENTER.
+        */
+       cs_enforcement_enabled = cs_enforcement(NULL);
+
+       if(cs_enforcement_enabled && map_is_switched && 
+          map_is_switch_protected && page_immutable(m, prot) && 
+          (prot & VM_PROT_WRITE))
+       {
+               return KERN_CODESIGN_ERROR;
+       }
+
+       /* A page could be tainted, or pose a risk of being tainted later.
+        * Check whether the receiving process wants it, and make it feel
+        * the consequences (that hapens in cs_invalid_page()).
+        * For CS Enforcement, two other conditions will 
+        * cause that page to be tainted as well: 
+        * - pmapping an unsigned page executable - this means unsigned code;
+        * - writeable mapping of a validated page - the content of that page
+        *   can be changed without the kernel noticing, therefore unsigned
+        *   code can be created
+        */
+       if (m->cs_tainted ||
+           ((cs_enforcement_enabled && !cs_bypass ) &&
+            (/* The page is unsigned and wants to be executable */
+             (!m->cs_validated && (prot & VM_PROT_EXECUTE))  ||
+             /* The page should be immutable, but is in danger of being modified
+               * This is the case where we want policy from the code directory -
+               * is the page immutable or not? For now we have to assume that 
+               * code pages will be immutable, data pages not.
+               * We'll assume a page is a code page if it has a code directory 
+               * and we fault for execution.
+               * That is good enough since if we faulted the code page for
+               * writing in another map before, it is wpmapped; if we fault
+               * it for writing in this map later it will also be faulted for executing 
+               * at the same time; and if we fault for writing in another map
+               * later, we will disconnect it from this pmap so we'll notice
+               * the change.
+               */
+             (page_immutable(m, prot) && ((prot & VM_PROT_WRITE) || m->wpmapped))
+             ))
+               ) 
+       {
+               /* We will have a tainted page. Have to handle the special case
+                * of a switched map now. If the map is not switched, standard
+                * procedure applies - call cs_invalid_page().
+                * If the map is switched, the real owner is invalid already.
+                * There is no point in invalidating the switching process since
+                * it will not be executing from the map. So we don't call
+                * cs_invalid_page() in that case. */
+               boolean_t reject_page;
+               if(map_is_switched) { 
+                       assert(pmap==vm_map_pmap(current_thread()->map));
+                       assert(!(prot & VM_PROT_WRITE) || (map_is_switch_protected == FALSE));
+                       reject_page = FALSE;
+               } else {
+                       if (cs_debug > 5)
+                               printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s slid: %s prot: 0x%x\n", 
+                                      m->object->code_signed ? "yes" : "no",
+                                      m->cs_validated ? "yes" : "no",
+                                      m->cs_tainted ? "yes" : "no",
+                                      m->wpmapped ? "yes" : "no",
+                                      m->slid ? "yes" : "no",
+                                      (int)prot);
+                       reject_page = cs_invalid_page((addr64_t) vaddr);
+               }
+               
+               if (reject_page) {
                        /* reject the tainted page: abort the page fault */
-                       kr = KERN_MEMORY_ERROR;
+                       int                     pid;
+                       const char              *procname;
+                       task_t                  task;
+                       vm_object_t             file_object, shadow;
+                       vm_object_offset_t      file_offset;
+                       char                    *pathname, *filename;
+                       vm_size_t               pathname_len, filename_len;
+                       boolean_t               truncated_path;
+#define __PATH_MAX 1024
+                       struct timespec         mtime, cs_mtime;
+
+                       kr = KERN_CODESIGN_ERROR;
                        cs_enter_tainted_rejected++;
+
+                       /* get process name and pid */
+                       procname = "?";
+                       task = current_task();
+                       pid = proc_selfpid();
+                       if (task->bsd_info != NULL)
+                               procname = proc_name_address(task->bsd_info);
+
+                       /* get file's VM object */
+                       file_object = m->object;
+                       file_offset = m->offset;
+                       for (shadow = file_object->shadow;
+                            shadow != VM_OBJECT_NULL;
+                            shadow = file_object->shadow) {
+                               vm_object_lock_shared(shadow);
+                               if (file_object != m->object) {
+                                       vm_object_unlock(file_object);
+                               }
+                               file_offset += file_object->vo_shadow_offset;
+                               file_object = shadow;
+                       }
+
+                       mtime.tv_sec = 0;
+                       mtime.tv_nsec = 0;
+                       cs_mtime.tv_sec = 0;
+                       cs_mtime.tv_nsec = 0;
+
+                       /* get file's pathname and/or filename */
+                       pathname = NULL;
+                       filename = NULL;
+                       pathname_len = 0;
+                       filename_len = 0;
+                       truncated_path = FALSE;
+                       if (file_object->pager == NULL) {
+                               /* no pager -> no file -> no pathname */
+                               pathname = (char *) "<nil>";
+                       } else {
+                               pathname = (char *)kalloc(__PATH_MAX * 2);
+                               if (pathname) {
+                                       pathname_len = __PATH_MAX;
+                                       filename = pathname + pathname_len;
+                                       filename_len = __PATH_MAX;
+                               }
+                               vnode_pager_get_object_name(file_object->pager,
+                                                           pathname,
+                                                           pathname_len,
+                                                           filename,
+                                                           filename_len,
+                                                           &truncated_path);
+                               vnode_pager_get_object_mtime(file_object->pager,
+                                                            &mtime,
+                                                            &cs_mtime);
+                       }
+                       printf("CODE SIGNING: process %d[%s]: "
+                              "rejecting invalid page at address 0x%llx "
+                              "from offset 0x%llx in file \"%s%s%s\" "
+                              "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
+                              "(signed:%d validated:%d tainted:%d "
+                              "wpmapped:%d slid:%d)\n",
+                              pid, procname, (addr64_t) vaddr,
+                              file_offset,
+                              pathname,
+                              (truncated_path ? "/.../" : ""),
+                              (truncated_path ? filename : ""),
+                              cs_mtime.tv_sec, cs_mtime.tv_nsec,
+                              ((cs_mtime.tv_sec == mtime.tv_sec &&
+                                cs_mtime.tv_nsec == mtime.tv_nsec)
+                               ? "=="
+                               : "!="),
+                              mtime.tv_sec, mtime.tv_nsec,
+                              m->object->code_signed,
+                              m->cs_validated,
+                              m->cs_tainted,
+                              m->wpmapped,
+                              m->slid);
+                       if (file_object != m->object) {
+                               vm_object_unlock(file_object);
+                       }
+                       if (pathname_len != 0) {
+                               kfree(pathname, __PATH_MAX * 2);
+                               pathname = NULL;
+                               filename = NULL;
+                       }
                } else {
                        /* proceed with the tainted page */
                        kr = KERN_SUCCESS;
+                       /* Page might have been tainted before or not; now it
+                        * definitively is. If the page wasn't tainted, we must
+                        * disconnect it from all pmaps later. */
+                       must_disconnect = !m->cs_tainted;
+                       m->cs_tainted = TRUE;
                        cs_enter_tainted_accepted++;
                }
-               if (cs_debug || kr != KERN_SUCCESS) {
-                       printf("CODESIGNING: vm_fault_enter(0x%llx): "
-                              "page %p obj %p off 0x%llx *** TAINTED ***\n",
-                              (long long)vaddr, m, m->object, m->offset);
+               if (kr != KERN_SUCCESS) {
+                       if (cs_debug) {
+                               printf("CODESIGNING: vm_fault_enter(0x%llx): "
+                                      "page %p obj %p off 0x%llx *** INVALID PAGE ***\n",
+                                      (long long)vaddr, m, m->object, m->offset);
+                       }
+#if !SECURE_KERNEL
+                       if (cs_enforcement_panic) {
+                               panic("CODESIGNING: panicking on invalid page\n");
+                       }
+#endif
                }
+               
        } else {
                /* proceed with the valid page */
                kr = KERN_SUCCESS;
        }
 
-       if (kr == KERN_SUCCESS) {
-               /*
-                * NOTE: we may only hold the vm_object lock SHARED
-                * at this point, but the update of pmapped is ok
-                * since this is the ONLY bit updated behind the SHARED
-                * lock... however, we need to figure out how to do an atomic
-                * update on a bit field to make this less fragile... right
-                * now I don'w know how to coerce 'C' to give me the offset info
-                * that's needed for an AtomicCompareAndSwap
-                */
-               m->pmapped = TRUE;
-               if (prot & VM_PROT_WRITE) {
-                       vm_object_lock_assert_exclusive(m->object);
-                       m->wpmapped = TRUE;
-               }
-
-               PMAP_ENTER(pmap, vaddr, m, prot, cache_attr, wired);
-       }
+       boolean_t       page_queues_locked = FALSE;
+#define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED()  \
+MACRO_BEGIN                                    \
+       if (! page_queues_locked) {             \
+               page_queues_locked = TRUE;      \
+               vm_page_lockspin_queues();      \
+       }                                       \
+MACRO_END
+#define __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED()    \
+MACRO_BEGIN                                    \
+       if (page_queues_locked) {               \
+               page_queues_locked = FALSE;     \
+               vm_page_unlock_queues();        \
+       }                                       \
+MACRO_END
 
        /*
         * Hold queues lock to manipulate
         * the page queues.  Change wiring
         * case is obvious.
         */
-       if (change_wiring) {
-               vm_page_lockspin_queues();
+       assert(m->compressor || m->object != compressor_object);
+       if (m->compressor) {
+               /*
+                * Compressor pages are neither wired
+                * nor pageable and should never change.
+                */
+               assert(m->object == compressor_object);
+       } else if (change_wiring) {
+               __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
 
                if (wired) {
                        if (kr == KERN_SUCCESS) {
                                vm_page_wire(m);
                        }
                } else {
-                       vm_page_unwire(m);
+                       vm_page_unwire(m, TRUE);
                }
-               vm_page_unlock_queues();
+               /* we keep the page queues lock, if we need it later */
 
        } else {
                if (kr != KERN_SUCCESS) {
-                       vm_page_lock_queues();
+                       __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
                        vm_page_deactivate(m);
-                       vm_page_unlock_queues();
-               } else {
-                       if (((!m->active && !m->inactive) || no_cache) && !m->wire_count && !m->throttled) {
-                               vm_page_lockspin_queues();
+                       /* we keep the page queues lock, if we need it later */
+               } else if (((!m->active && !m->inactive) ||
+                           m->clean_queue ||
+                           no_cache) &&
+                          !VM_PAGE_WIRED(m) && !m->throttled) {
+
+                       if (vm_page_local_q &&
+                           !no_cache &&
+                           (*type_of_fault == DBG_COW_FAULT ||
+                            *type_of_fault == DBG_ZERO_FILL_FAULT) ) {
+                               struct vpl      *lq;
+                               uint32_t        lid;
+
+                               __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
+                               vm_object_lock_assert_exclusive(m->object);
+
                                /*
-                                * test again now that we hold the page queue lock
+                                * we got a local queue to stuff this
+                                * new page on...
+                                * its safe to manipulate local and
+                                * local_id at this point since we're
+                                * behind an exclusive object lock and
+                                * the page is not on any global queue.
+                                *
+                                * we'll use the current cpu number to
+                                * select the queue note that we don't
+                                * need to disable preemption... we're
+                                * going to behind the local queue's
+                                * lock to do the real work
                                 */
-                               if (((!m->active && !m->inactive) || no_cache) && !m->wire_count) {
+                               lid = cpu_number();
+
+                               lq = &vm_page_local_q[lid].vpl_un.vpl;
+
+                               VPL_LOCK(&lq->vpl_lock);
 
+                               queue_enter(&lq->vpl_queue, m,
+                                           vm_page_t, pageq);
+                               m->local = TRUE;
+                               m->local_id = lid;
+                               lq->vpl_count++;
+                                       
+                               if (m->object->internal)
+                                       lq->vpl_internal_count++;
+                               else
+                                       lq->vpl_external_count++;
+
+                               VPL_UNLOCK(&lq->vpl_lock);
+
+                               if (lq->vpl_count > vm_page_local_q_soft_limit)
+                               {
                                        /*
-                                        * If this is a no_cache mapping and the page has never been
-                                        * mapped before or was previously a no_cache page, then we
-                                        * want to leave pages in the speculative state so that they
-                                        * can be readily recycled if free memory runs low.  Otherwise
-                                        * the page is activated as normal. 
+                                        * we're beyond the soft limit
+                                        * for the local queue
+                                        * vm_page_reactivate_local will
+                                        * 'try' to take the global page
+                                        * queue lock... if it can't
+                                        * that's ok... we'll let the
+                                        * queue continue to grow up
+                                        * to the hard limit... at that
+                                        * point we'll wait for the
+                                        * lock... once we've got the
+                                        * lock, we'll transfer all of
+                                        * the pages from the local
+                                        * queue to the global active
+                                        * queue
                                         */
+                                       vm_page_reactivate_local(lid, FALSE, FALSE);
+                               }
+                       } else {
+
+                               __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
+
+                               /*
+                                * test again now that we hold the
+                                * page queue lock
+                                */
+                               if (!VM_PAGE_WIRED(m)) {
+                                       if (m->clean_queue) {
+                                               VM_PAGE_QUEUES_REMOVE(m);
+
+                                               vm_pageout_cleaned_reactivated++;
+                                               vm_pageout_cleaned_fault_reactivated++;
+                                       }
 
-                                       if (no_cache && (!previously_pmapped || m->no_cache)) {
-                                               m->no_cache = TRUE;
+                                       if ((!m->active &&
+                                            !m->inactive) ||
+                                           no_cache) {
+                                               /*
+                                                * If this is a no_cache mapping
+                                                * and the page has never been
+                                                * mapped before or was
+                                                * previously a no_cache page,
+                                                * then we want to leave pages
+                                                * in the speculative state so
+                                                * that they can be readily
+                                                * recycled if free memory runs
+                                                * low.  Otherwise the page is
+                                                * activated as normal. 
+                                                */
 
-                                               if (m->active || m->inactive)
-                                                       VM_PAGE_QUEUES_REMOVE(m);
+                                               if (no_cache &&
+                                                   (!previously_pmapped ||
+                                                    m->no_cache)) {
+                                                       m->no_cache = TRUE;
 
-                                               if (!m->speculative) 
-                                                       vm_page_speculate(m, TRUE);
+                                                       if (!m->speculative) 
+                                                               vm_page_speculate(m, FALSE);
 
-                                       } else if (!m->active && !m->inactive)
-                                               vm_page_activate(m);
+                                               } else if (!m->active &&
+                                                          !m->inactive) {
 
+                                                       vm_page_activate(m);
+                                               }
+                                       }
                                }
+                               /* we keep the page queues lock, if we need it later */
+                       }
+               }
+       }
 
-                               vm_page_unlock_queues();
+       if ((prot & VM_PROT_EXECUTE) &&
+           ! m->xpmapped) {
+
+               __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
+
+               /*
+                * xpmapped is protected by the page queues lock
+                * so it matters not that we might only hold the
+                * object lock in the shared state
+                */
+
+               if (! m->xpmapped) {
+
+                       m->xpmapped = TRUE;
+                       __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
+
+                       if ((COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) &&
+                           m->object->internal &&
+                           m->object->pager != NULL) {
+                               /*
+                                * This page could have been
+                                * uncompressed by the
+                                * compressor pager and its
+                                * contents might be only in
+                                * the data cache.
+                                * Since it's being mapped for
+                                * "execute" for the fist time,
+                                * make sure the icache is in
+                                * sync.
+                                */
+                               pmap_sync_page_data_phys(m->phys_page);
+                       }
+
+               }
+       }
+       /* we're done with the page queues lock, if we ever took it */
+       __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
+
+
+       /* If we have a KERN_SUCCESS from the previous checks, we either have
+        * a good page, or a tainted page that has been accepted by the process.
+        * In both cases the page will be entered into the pmap.
+        * If the page is writeable, we need to disconnect it from other pmaps
+        * now so those processes can take note.
+        */
+       if (kr == KERN_SUCCESS) {
+               /*
+                * NOTE: we may only hold the vm_object lock SHARED
+                * at this point, but the update of pmapped is ok
+                * since this is the ONLY bit updated behind the SHARED
+                * lock... however, we need to figure out how to do an atomic
+                * update on a bit field to make this less fragile... right
+                * now I don't know how to coerce 'C' to give me the offset info
+                * that's needed for an AtomicCompareAndSwap
+                */
+               m->pmapped = TRUE;
+               if(vm_page_is_slideable(m)) {
+                       boolean_t was_busy = m->busy;
+
+                       vm_object_lock_assert_exclusive(m->object);
+
+                       m->busy = TRUE;
+                       kr = vm_page_slide(m, 0);
+                       assert(m->busy);
+                       if(!was_busy) {
+                               PAGE_WAKEUP_DONE(m);
+                       }
+                       if (kr != KERN_SUCCESS) {
+                               /*
+                                * This page has not been slid correctly,
+                                * do not do the pmap_enter() !
+                                * Let vm_fault_enter() return the error
+                                * so the caller can fail the fault.
+                                */
+                               goto after_the_pmap_enter;
+                       }
+               }
+
+               if (fault_type & VM_PROT_WRITE) {
+
+                       if (m->wpmapped == FALSE) {
+                               vm_object_lock_assert_exclusive(m->object);
+
+                               m->wpmapped = TRUE;
+                       }
+                       if (must_disconnect) {
+                               /*
+                                * We can only get here 
+                                * because of the CSE logic
+                                */
+                               assert(cs_enforcement_enabled);
+                               pmap_disconnect(m->phys_page);
+                               /* 
+                                * If we are faulting for a write, we can clear
+                                * the execute bit - that will ensure the page is
+                                * checked again before being executable, which
+                                * protects against a map switch.
+                                * This only happens the first time the page
+                                * gets tainted, so we won't get stuck here 
+                                * to make an already writeable page executable.
+                                */
+                               if (!cs_bypass){
+                                       prot &= ~VM_PROT_EXECUTE;
+                               }
                        }
                }
+
+               /* Prevent a deadlock by not
+                * holding the object lock if we need to wait for a page in
+                * pmap_enter() - <rdar://problem/7138958> */
+               PMAP_ENTER_OPTIONS(pmap, vaddr, m, prot, fault_type, 0,
+                                 wired, PMAP_OPTIONS_NOWAIT, pe_result);
+
+               if(pe_result == KERN_RESOURCE_SHORTAGE) {
+
+                       if (need_retry) {
+                               /*
+                                * this will be non-null in the case where we hold the lock
+                                * on the top-object in this chain... we can't just drop
+                                * the lock on the object we're inserting the page into
+                                * and recall the PMAP_ENTER since we can still cause
+                                * a deadlock if one of the critical paths tries to 
+                                * acquire the lock on the top-object and we're blocked
+                                * in PMAP_ENTER waiting for memory... our only recourse
+                                * is to deal with it at a higher level where we can 
+                                * drop both locks.
+                                */
+                               *need_retry = TRUE;
+                               vm_pmap_enter_retried++;
+                               goto after_the_pmap_enter;
+                       }
+                       /* The nonblocking version of pmap_enter did not succeed.
+                        * and we don't need to drop other locks and retry
+                        * at the level above us, so 
+                        * use the blocking version instead. Requires marking
+                        * the page busy and unlocking the object */
+                       boolean_t was_busy = m->busy;
+
+                       vm_object_lock_assert_exclusive(m->object);
+
+                       m->busy = TRUE;
+                       vm_object_unlock(m->object);
+                       
+                       PMAP_ENTER(pmap, vaddr, m, prot, fault_type, 0, wired);
+                               
+                       /* Take the object lock again. */
+                       vm_object_lock(m->object);
+                       
+                       /* If the page was busy, someone else will wake it up.
+                        * Otherwise, we have to do it now. */
+                       assert(m->busy);
+                       if(!was_busy) {
+                               PAGE_WAKEUP_DONE(m);
+                       }
+                       vm_pmap_enter_blocked++;
+               }
        }
+
+after_the_pmap_enter:
        return kr;
 }
 
@@ -2183,6 +3111,7 @@ extern int _map_enter_debug;
 unsigned long vm_fault_collapse_total = 0;
 unsigned long vm_fault_collapse_skipped = 0;
 
+
 kern_return_t
 vm_fault(
        vm_map_t        map,
@@ -2217,27 +3146,34 @@ vm_fault(
        vm_prot_t               original_fault_type;
        struct vm_object_fault_info fault_info;
        boolean_t               need_collapse = FALSE;
+       boolean_t               need_retry = FALSE;
+       boolean_t               *need_retry_ptr = NULL;
        int                     object_lock_type = 0;
        int                     cur_object_lock_type;
+       vm_object_t             top_object = VM_OBJECT_NULL;
+       int                     throttle_delay;
 
 
-       KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_START,
-                             (int)((uint64_t)vaddr >> 32),
-                             (int)vaddr,
-                             0,
+       KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
+                     (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_START,
+                             ((uint64_t)vaddr >> 32),
+                             vaddr,
+                             (map == kernel_map),
                              0,
                              0);
 
        if (get_preemption_level() != 0) {
-               KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
-                                     (int)((uint64_t)vaddr >> 32),
-                                     (int)vaddr,
+               KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
+                                     (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
+                                     ((uint64_t)vaddr >> 32),
+                                     vaddr,
                                      KERN_FAILURE,
                                      0,
                                      0);
 
                return (KERN_FAILURE);
        }
+       
        interruptible_state = thread_interrupt_level(interruptible);
 
        VM_STAT_INCR(faults);
@@ -2279,6 +3215,10 @@ RetryFault:
        }
        pmap = real_map->pmap;
        fault_info.interruptible = interruptible;
+       fault_info.stealth = FALSE;
+       fault_info.io_sync = FALSE;
+       fault_info.mark_zf_absent = FALSE;
+       fault_info.batch_pmap_op = FALSE;
 
        /*
         * If the page is wired, we must fault for the current protection
@@ -2356,6 +3296,18 @@ RetryFault:
        cur_offset = offset;
 
        while (TRUE) {
+               if (!cur_object->pager_created &&
+                   cur_object->phys_contiguous) /* superpage */
+                       break;
+
+               if (cur_object->blocked_access) {
+                       /*
+                        * Access to this VM object has been blocked.
+                        * Let the slow path handle it.
+                        */
+                       break;
+               }
+
                m = vm_page_lookup(cur_object, cur_offset);
 
                if (m != VM_PAGE_NULL) {
@@ -2367,7 +3319,6 @@ RetryFault:
                                 * have object that 'm' belongs to locked exclusively
                                 */
                                if (object != cur_object) {
-                                       vm_object_unlock(object);
 
                                        if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
 
@@ -2376,11 +3327,13 @@ RetryFault:
                                                if (vm_object_lock_upgrade(cur_object) == FALSE) {
                                                        /*
                                                         * couldn't upgrade so go do a full retry
-                                                        * immediately since we've already dropped
-                                                        * the top object lock associated with this page
-                                                        * and the current one got dropped due to the
-                                                        * failed upgrade... the state is no longer valid
+                                                        * immediately since we can no longer be
+                                                        * certain about cur_object (since we
+                                                        * don't hold a reference on it)...
+                                                        * first drop the top object lock
                                                         */
+                                                       vm_object_unlock(object);
+
                                                        vm_map_unlock_read(map);
                                                        if (real_map != map)
                                                                vm_map_unlock(real_map);
@@ -2407,6 +3360,30 @@ RetryFault:
                                                continue;
                                        }
                                }
+                               if (m->pageout_queue && m->object->internal && COMPRESSED_PAGER_IS_ACTIVE) {
+                                       /*
+                                        * m->busy == TRUE and the object is locked exclusively
+                                        * if m->pageout_queue == TRUE after we acquire the
+                                        * queues lock, we are guaranteed that it is stable on
+                                        * the pageout queue and therefore reclaimable
+                                        *
+                                        * NOTE: this is only true for the internal pageout queue
+                                        * in the compressor world
+                                        */
+                                       vm_page_lock_queues();
+
+                                       if (m->pageout_queue) {
+                                               vm_pageout_throttle_up(m);
+                                               vm_page_unlock_queues();
+
+                                               PAGE_WAKEUP_DONE(m);
+                                               goto reclaimed_from_pageout;
+                                       }
+                                       vm_page_unlock_queues();
+                               }
+                               if (object != cur_object)
+                                       vm_object_unlock(object);
+
                                vm_map_unlock_read(map);
                                if (real_map != map)
                                        vm_map_unlock(real_map);
@@ -2426,6 +3403,46 @@ RetryFault:
                                kr = KERN_ABORTED;
                                goto done;
                        }
+reclaimed_from_pageout:
+                       if (m->laundry) {
+                               if (object != cur_object) {
+                                       if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
+                                               cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+
+                                               vm_object_unlock(object);
+                                               vm_object_unlock(cur_object);
+
+                                               vm_map_unlock_read(map);
+                                               if (real_map != map)
+                                                       vm_map_unlock(real_map);
+
+                                               goto RetryFault;
+                                       }
+
+                               } else if (object_lock_type == OBJECT_LOCK_SHARED) {
+
+                                       object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+
+                                       if (vm_object_lock_upgrade(object) == FALSE) {
+                                               /*
+                                                * couldn't upgrade, so explictly take the lock
+                                                * exclusively and go relookup the page since we
+                                                * will have dropped the object lock and
+                                                * a different thread could have inserted
+                                                * a page at this offset
+                                                * no need for a full retry since we're
+                                                * at the top level of the object chain
+                                                */
+                                               vm_object_lock(object);
+
+                                               continue;
+                                       }
+                               }
+                               m->pageout = FALSE;
+                               
+                               vm_pageout_steal_laundry(m, FALSE);
+                       }
+
                        if (m->phys_page == vm_page_guard_addr) {
                                /*
                                 * Guard page: let the slow path deal with it
@@ -2438,6 +3455,17 @@ RetryFault:
                                 */
                                break;
                        }
+                       if (VM_OBJECT_PURGEABLE_FAULT_ERROR(m->object)) {
+                               if (object != cur_object)
+                                       vm_object_unlock(object);
+                               vm_map_unlock_read(map);
+                               if (real_map != map)
+                                       vm_map_unlock(real_map);
+                               vm_object_unlock(cur_object);
+                               kr = KERN_MEMORY_ERROR;
+                               goto done;
+                       }
+
                        if (m->encrypted) {
                                /*
                                 * ENCRYPTED SWAP:
@@ -2512,8 +3540,35 @@ RetryFault:
                        }
                        ASSERT_PAGE_DECRYPTED(m);
 
-                       if (m->object->code_signed && map != kernel_map &&
-                           (!m->cs_validated || m->wpmapped)) {
+                       if(vm_page_is_slideable(m)) {
+                               /*
+                                * We might need to slide this page, and so,
+                                * we want to hold the VM object exclusively.
+                                */
+                               if (object != cur_object) {
+                                       if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
+                                               vm_object_unlock(object);
+                                               vm_object_unlock(cur_object);
+
+                                               cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+
+                                               vm_map_unlock_read(map);
+                                               if (real_map != map)
+                                                       vm_map_unlock(real_map);
+
+                                               goto RetryFault;
+                                       }
+                               } else if (object_lock_type == OBJECT_LOCK_SHARED) {
+
+                                       vm_object_unlock(object);
+                                       object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+                                       vm_map_unlock_read(map);
+                                       goto RetryFault;
+                               }
+                       }
+
+                       if (VM_FAULT_NEED_CS_VALIDATION(map->pmap, m)) {
+upgrade_for_validation:
                                /*
                                 * We might need to validate this page
                                 * against its code signature, so we
@@ -2561,37 +3616,32 @@ RetryFault:
                         */
 
                        if (object == cur_object && object->copy == VM_OBJECT_NULL) {
-                               if ((fault_type & VM_PROT_WRITE) == 0) {
-                                       /*
-                                        * This is not a "write" fault, so we
-                                        * might not have taken the object lock
-                                        * exclusively and we might not be able
-                                        * to update the "wpmapped" bit in
-                                        * vm_fault_enter().
-                                        * Let's just grant read access to
-                                        * the page for now and we'll
-                                        * soft-fault again if we need write
-                                        * access later...
-                                        */
-                                       prot &= ~VM_PROT_WRITE;
-                               }
+
                                goto FastPmapEnter;
                        }
 
                        if ((fault_type & VM_PROT_WRITE) == 0) {
 
-                               prot &= ~VM_PROT_WRITE;
-
-                               /*
-                                * Set up to map the page...
-                                * mark the page busy, drop
-                                * unneeded object lock
-                                */     
                                if (object != cur_object) {
-                                       /*      
-                                        * don't need the original object anymore
+                                       /*
+                                        * We still need to hold the top object
+                                        * lock here to prevent a race between
+                                        * a read fault (taking only "shared"
+                                        * locks) and a write fault (taking
+                                        * an "exclusive" lock on the top
+                                        * object.
+                                        * Otherwise, as soon as we release the
+                                        * top lock, the write fault could
+                                        * proceed and actually complete before
+                                        * the read fault, and the copied page's
+                                        * translation could then be overwritten
+                                        * by the read fault's translation for
+                                        * the original page.
+                                        *
+                                        * Let's just record what the top object
+                                        * is and we'll release it later.
                                         */
-                                       vm_object_unlock(object);
+                                       top_object = object;
 
                                        /*
                                         * switch to the object that has the new page
@@ -2608,34 +3658,56 @@ FastPmapEnter:
                                 * cur_object == NULL or it's been unlocked
                                 * no paging references on either object or cur_object
                                 */
-#if    MACH_KDB
-                               if (db_watchpoint_list && (fault_type & VM_PROT_WRITE) == 0)
-                                       prot &= ~VM_PROT_WRITE;
-#endif
+                               if (top_object != VM_OBJECT_NULL || object_lock_type != OBJECT_LOCK_EXCLUSIVE)
+                                       need_retry_ptr = &need_retry;
+                               else
+                                       need_retry_ptr = NULL;
+
                                if (caller_pmap) {
                                        kr = vm_fault_enter(m,
                                                            caller_pmap,
                                                            caller_pmap_addr,
                                                            prot,
+                                                           fault_type,
                                                            wired,
                                                            change_wiring,
                                                            fault_info.no_cache,
+                                                           fault_info.cs_bypass,
+                                                           need_retry_ptr,
                                                            &type_of_fault);
                                } else {
                                        kr = vm_fault_enter(m,
                                                            pmap,
                                                            vaddr,
                                                            prot,
+                                                           fault_type,
                                                            wired,
                                                            change_wiring,
                                                            fault_info.no_cache,
+                                                           fault_info.cs_bypass,
+                                                           need_retry_ptr,
                                                            &type_of_fault);
                                }
 
+                               if (top_object != VM_OBJECT_NULL) {
+                                       /*
+                                        * It's safe to drop the top object
+                                        * now that we've done our
+                                        * vm_fault_enter().  Any other fault
+                                        * in progress for that virtual
+                                        * address will either find our page
+                                        * and translation or put in a new page
+                                        * and translation.
+                                        */
+                                       vm_object_unlock(top_object);
+                                       top_object = VM_OBJECT_NULL;
+                               }
+
                                if (need_collapse == TRUE)
                                        vm_object_collapse(object, offset, TRUE);
-
-                               if (type_of_fault == DBG_PAGEIN_FAULT) {
+                               
+                               if (need_retry == FALSE &&
+                                   (type_of_fault == DBG_PAGEIND_FAULT || type_of_fault == DBG_PAGEINV_FAULT || type_of_fault == DBG_CACHE_HIT_FAULT)) {
                                        /*
                                         * evaluate access pattern and update state
                                         * vm_fault_deactivate_behind depends on the
@@ -2657,11 +3729,53 @@ FastPmapEnter:
                                if (real_map != map)
                                        vm_map_unlock(real_map);
 
+                               if (need_retry == TRUE) {
+                                       /*
+                                        * vm_fault_enter couldn't complete the PMAP_ENTER...
+                                        * at this point we don't hold any locks so it's safe
+                                        * to ask the pmap layer to expand the page table to
+                                        * accommodate this mapping... once expanded, we'll
+                                        * re-drive the fault which should result in vm_fault_enter
+                                        * being able to successfully enter the mapping this time around
+                                        */
+                                       (void)pmap_enter_options(pmap, vaddr, 0, 0, 0, 0, 0, PMAP_OPTIONS_NOENTER, NULL);
+                                       
+                                       need_retry = FALSE;
+                                       goto RetryFault;
+                               }
                                goto done;
                        }
                        /*
                         * COPY ON WRITE FAULT
-                        *
+                        */
+                       assert(object_lock_type == OBJECT_LOCK_EXCLUSIVE);
+
+                       if ((throttle_delay = vm_page_throttled())) {
+                               /*
+                                * drop all of our locks...
+                                * wait until the free queue is
+                                * pumped back up and then
+                                * redrive the fault
+                                */
+                               if (object != cur_object)
+                                       vm_object_unlock(cur_object);
+                               vm_object_unlock(object);
+                               vm_map_unlock_read(map);
+                               if (real_map != map)
+                                       vm_map_unlock(real_map);
+
+                               VM_DEBUG_EVENT(vmf_cowdelay, VMF_COWDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
+
+                               delay(throttle_delay);
+
+                               if (!current_thread_aborted() && vm_page_wait((change_wiring) ? 
+                                                THREAD_UNINT :
+                                                THREAD_ABORTSAFE))
+                                       goto RetryFault;
+                               kr = KERN_ABORTED;
+                               goto done;
+                       }
+                        /*
                         * If objects match, then
                         * object->copy must not be NULL (else control
                         * would be in previous code block), and we
@@ -2675,13 +3789,19 @@ FastPmapEnter:
                                 */
                                break;
                        }
-                       assert(object_lock_type == OBJECT_LOCK_EXCLUSIVE);
-
+                       
                        /*
                         * This is now a shadow based copy on write
                         * fault -- it requires a copy up the shadow
                         * chain.
-                        *
+                        */
+                       
+                       if ((cur_object_lock_type == OBJECT_LOCK_SHARED) &&
+                           VM_FAULT_NEED_CS_VALIDATION(NULL, m)) {
+                               goto upgrade_for_validation;
+                       }
+
+                       /*
                         * Allocate a page in the original top level
                         * object. Give up if allocate fails.  Also
                         * need to remember current page, as it's the
@@ -2711,7 +3831,7 @@ FastPmapEnter:
                         */
                        vm_page_copy(cur_m, m);
                        vm_page_insert(m, object, offset);
-                       m->dirty = TRUE;
+                       SET_PAGE_DIRTY(m, FALSE);
 
                        /*
                         * Now cope with the source page and object
@@ -2764,12 +3884,148 @@ FastPmapEnter:
                         * No page at cur_object, cur_offset... m == NULL
                         */
                        if (cur_object->pager_created) {
-                               if (MUST_ASK_PAGER(cur_object, cur_offset) == TRUE) {
+                               int     compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN;
+
+                               if (MUST_ASK_PAGER(cur_object, cur_offset, compressor_external_state) == TRUE) {
+                                       int             my_fault_type;
+                                       int             c_flags = C_DONT_BLOCK;
+                                       boolean_t       insert_cur_object = FALSE;
+
                                        /*
                                         * May have to talk to a pager...
-                                        * take the slow path.
+                                        * if so, take the slow path by
+                                        * doing a 'break' from the while (TRUE) loop
+                                        *
+                                        * external_state will only be set to VM_EXTERNAL_STATE_EXISTS
+                                        * if the compressor is active and the page exists there
                                         */
-                                       break;
+                                       if (compressor_external_state != VM_EXTERNAL_STATE_EXISTS)
+                                               break;
+
+                                       if (map == kernel_map || real_map == kernel_map) {
+                                               /*
+                                                * can't call into the compressor with the kernel_map
+                                                * lock held, since the compressor may try to operate
+                                                * on the kernel map in order to return an empty c_segment
+                                                */
+                                               break;
+                                       }
+                                       if (object != cur_object) {
+                                               if (fault_type & VM_PROT_WRITE)
+                                                       c_flags |= C_KEEP;
+                                               else
+                                                       insert_cur_object = TRUE;
+                                       }
+                                       if (insert_cur_object == TRUE) {
+
+                                               if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
+
+                                                       cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+
+                                                       if (vm_object_lock_upgrade(cur_object) == FALSE) {
+                                                               /*
+                                                                * couldn't upgrade so go do a full retry
+                                                                * immediately since we can no longer be
+                                                                * certain about cur_object (since we
+                                                                * don't hold a reference on it)...
+                                                                * first drop the top object lock
+                                                                */
+                                                               vm_object_unlock(object);
+
+                                                               vm_map_unlock_read(map);
+                                                               if (real_map != map)
+                                                                       vm_map_unlock(real_map);
+
+                                                               goto RetryFault;
+                                                       }
+                                               }
+                                       } else if (object_lock_type == OBJECT_LOCK_SHARED) {
+
+                                               object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+
+                                               if (object != cur_object) {
+                                                       /*
+                                                        * we can't go for the upgrade on the top
+                                                        * lock since the upgrade may block waiting
+                                                        * for readers to drain... since we hold
+                                                        * cur_object locked at this point, waiting
+                                                        * for the readers to drain would represent
+                                                        * a lock order inversion since the lock order
+                                                        * for objects is the reference order in the
+                                                        * shadown chain
+                                                        */
+                                                       vm_object_unlock(object);
+                                                       vm_object_unlock(cur_object);
+
+                                                       vm_map_unlock_read(map);
+                                                       if (real_map != map)
+                                                               vm_map_unlock(real_map);
+
+                                                       goto RetryFault;
+                                               }
+                                               if (vm_object_lock_upgrade(object) == FALSE) {
+                                                       /*
+                                                        * couldn't upgrade, so explictly take the lock
+                                                        * exclusively and go relookup the page since we
+                                                        * will have dropped the object lock and
+                                                        * a different thread could have inserted
+                                                        * a page at this offset
+                                                        * no need for a full retry since we're
+                                                        * at the top level of the object chain
+                                                        */
+                                                       vm_object_lock(object);
+                                                       
+                                                       continue;
+                                               }
+                                       }
+                                       m = vm_page_grab();
+
+                                       if (m == VM_PAGE_NULL) {
+                                               /*
+                                                * no free page currently available...
+                                                * must take the slow path
+                                                */
+                                               break;
+                                       }
+                                       if (vm_compressor_pager_get(cur_object->pager, cur_offset + cur_object->paging_offset,
+                                                                   m->phys_page, &my_fault_type, c_flags) != KERN_SUCCESS) {
+                                               vm_page_release(m);
+                                               break;
+                                       }
+                                       m->dirty = TRUE;
+
+                                       if (insert_cur_object)
+                                               vm_page_insert(m, cur_object, cur_offset);
+                                       else
+                                               vm_page_insert(m, object, offset);
+
+                                       if ((m->object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_USE_DEFAULT) {
+                                                /*
+                                                * If the page is not cacheable,
+                                                * we can't let its contents
+                                                * linger in the data cache
+                                                * after the decompression.
+                                                */
+                                               pmap_sync_page_attributes_phys(m->phys_page);
+                                       }
+                                       type_of_fault = my_fault_type;
+
+                                       VM_STAT_INCR(decompressions);
+
+                                       if (cur_object != object) {
+                                               if (insert_cur_object) {
+                                                       top_object = object;
+                                                       /*
+                                                        * switch to the object that has the new page
+                                                        */
+                                                       object = cur_object;
+                                                       object_lock_type = cur_object_lock_type;
+                                               } else {
+                                                       vm_object_unlock(cur_object);
+                                                       cur_object = object;
+                                               }
+                                       }
+                                       goto FastPmapEnter;
                                }
                                /*
                                 * existence map present and indicates
@@ -2781,8 +4037,9 @@ FastPmapEnter:
                                 * Zero fill fault.  Page gets
                                 * inserted into the original object.
                                 */
-                               if (cur_object->shadow_severed) {
-
+                               if (cur_object->shadow_severed ||
+                                   VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object))
+                               {
                                        if (object != cur_object)
                                                vm_object_unlock(cur_object);
                                        vm_object_unlock(object);
@@ -2794,7 +4051,7 @@ FastPmapEnter:
                                        kr = KERN_MEMORY_ERROR;
                                        goto done;
                                }
-                               if (VM_PAGE_ZFILL_THROTTLED()) {
+                               if ((throttle_delay = vm_page_throttled())) {
                                        /*
                                         * drop all of our locks...
                                         * wait until the free queue is
@@ -2808,11 +4065,14 @@ FastPmapEnter:
                                        if (real_map != map)
                                                vm_map_unlock(real_map);
 
-                                       if (vm_page_wait((change_wiring) ? 
+                                       VM_DEBUG_EVENT(vmf_zfdelay, VMF_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
+
+                                       delay(throttle_delay);
+
+                                       if (!current_thread_aborted() && vm_page_wait((change_wiring) ? 
                                                         THREAD_UNINT :
                                                         THREAD_ABORTSAFE))
                                                goto RetryFault;
-
                                        kr = KERN_ABORTED;
                                        goto done;
                                }
@@ -2875,7 +4135,7 @@ FastPmapEnter:
                        /*
                         * On to the next level in the shadow chain
                         */
-                       cur_offset += cur_object->shadow_offset;
+                       cur_offset += cur_object->vo_shadow_offset;
                        new_object = cur_object->shadow;
 
                        /*
@@ -2938,8 +4198,10 @@ handle_copy_delay:
 
        error_code = 0;
 
+       result_page = VM_PAGE_NULL;
        kr = vm_fault_page(object, offset, fault_type,
                           (change_wiring && !wired),
+                          FALSE, /* page not looked up */
                           &prot, &result_page, &top_page,
                           &type_of_fault,
                           &error_code, map->no_zero_fill,
@@ -2953,14 +4215,14 @@ handle_copy_delay:
         * if kr == VM_FAULT_SUCCESS, then the paging reference
         * is still held along with the ref_count on the original object
         *
-        *      if m != NULL, then the object it belongs to 
-        *      is returned locked with a paging reference
+        *      the object is returned locked with a paging reference
         *
         *      if top_page != NULL, then it's BUSY and the 
         *      object it belongs to has a paging reference
         *      but is returned unlocked
         */
-       if (kr != VM_FAULT_SUCCESS) {
+       if (kr != VM_FAULT_SUCCESS &&
+           kr != VM_FAULT_SUCCESS_NO_VM_PAGE) {
                /*
                 * we didn't succeed, lose the object reference immediately.
                 */
@@ -2989,6 +4251,9 @@ handle_copy_delay:
                        else
                                kr = KERN_MEMORY_ERROR;
                        goto done;
+               default:
+                       panic("vm_fault: unexpected error 0x%x from "
+                             "vm_fault_page()\n", kr);
                }
        }
        m = result_page;
@@ -3006,10 +4271,12 @@ handle_copy_delay:
 #define RELEASE_PAGE(m)                                        \
        MACRO_BEGIN                                     \
        PAGE_WAKEUP_DONE(m);                            \
-       vm_page_lockspin_queues();                      \
-       if (!m->active && !m->inactive && !m->throttled)\
-               vm_page_activate(m);                    \
-       vm_page_unlock_queues();                        \
+       if (!m->active && !m->inactive && !m->throttled) {              \
+               vm_page_lockspin_queues();                              \
+               if (!m->active && !m->inactive && !m->throttled)        \
+                       vm_page_activate(m);                            \
+               vm_page_unlock_queues();                                \
+       }                                                               \
        MACRO_END
 
        /*
@@ -3019,8 +4286,10 @@ handle_copy_delay:
        if (m != VM_PAGE_NULL) {
                old_copy_object = m->object->copy;
                vm_object_unlock(m->object);
-       } else
+       } else {
                old_copy_object = VM_OBJECT_NULL;
+               vm_object_unlock(object);
+       }
 
        /*
         * no object locks are held at this point
@@ -3167,18 +4436,24 @@ handle_copy_delay:
                                            caller_pmap,
                                            caller_pmap_addr,
                                            prot,
+                                           fault_type,
                                            wired,
                                            change_wiring,
                                            fault_info.no_cache,
+                                           fault_info.cs_bypass,
+                                           NULL,
                                            &type_of_fault);
                } else {
                        kr = vm_fault_enter(m,
                                            pmap,
                                            vaddr,
                                            prot,
+                                           fault_type,
                                            wired,
                                            change_wiring,
                                            fault_info.no_cache,
+                                           fault_info.cs_bypass,
+                                           NULL,
                                            &type_of_fault);
                }
                if (kr != KERN_SUCCESS) {
@@ -3209,7 +4484,7 @@ handle_copy_delay:
                /* to execute, we return with a protection failure.      */
 
                if ((fault_type & VM_PROT_EXECUTE) &&
-                       (!pmap_eligible_for_execute((ppnum_t)(object->shadow_offset >> 12)))) {
+                       (!pmap_eligible_for_execute((ppnum_t)(object->vo_shadow_offset >> 12)))) {
 
                        vm_map_verify_done(map, &version);
 
@@ -3266,26 +4541,29 @@ handle_copy_delay:
                                        (entry->object.vm_object != NULL) &&
                                        (entry->object.vm_object == object)) {
 
+                       int superpage = (!object->pager_created && object->phys_contiguous)? VM_MEM_SUPERPAGE : 0;
                        if (caller_pmap) {
                                /*
                                 * Set up a block mapped area
                                 */
+                               assert((uint32_t)((ldelta + hdelta) >> 12) == ((ldelta + hdelta) >> 12));
                                pmap_map_block(caller_pmap, 
                                               (addr64_t)(caller_pmap_addr - ldelta), 
-                                              (((vm_map_offset_t) (entry->object.vm_object->shadow_offset)) +
-                                               entry->offset + (laddr - entry->vme_start) - ldelta) >> 12,
-                                              ((ldelta + hdelta) >> 12), prot, 
-                                              (VM_WIMG_MASK & (int)object->wimg_bits), 0);
+                                              (ppnum_t)((((vm_map_offset_t) (entry->object.vm_object->vo_shadow_offset)) +
+                                                         entry->offset + (laddr - entry->vme_start) - ldelta) >> 12),
+                                              (uint32_t)((ldelta + hdelta) >> 12), prot, 
+                                              (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0);
                        } else { 
                                /*
                                 * Set up a block mapped area
                                 */
+                               assert((uint32_t)((ldelta + hdelta) >> 12) == ((ldelta + hdelta) >> 12));
                                pmap_map_block(real_map->pmap, 
                                               (addr64_t)(vaddr - ldelta), 
-                                              (((vm_map_offset_t)(entry->object.vm_object->shadow_offset)) +
-                                               entry->offset + (laddr - entry->vme_start) - ldelta) >> 12,
-                                              ((ldelta + hdelta) >> 12), prot, 
-                                              (VM_WIMG_MASK & (int)object->wimg_bits), 0);
+                                              (ppnum_t)((((vm_map_offset_t)(entry->object.vm_object->vo_shadow_offset)) +
+                                                         entry->offset + (laddr - entry->vme_start) - ldelta) >> 12),
+                                              (uint32_t)((ldelta + hdelta) >> 12), prot, 
+                                              (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0);
                        }
                }
        }
@@ -3312,9 +4590,17 @@ handle_copy_delay:
 done:
        thread_interrupt_level(interruptible_state);
 
-       KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
-                             (int)((uint64_t)vaddr >> 32),
-                             (int)vaddr,
+       /*
+        * Only throttle on faults which cause a pagein.
+        */
+       if ((type_of_fault == DBG_PAGEIND_FAULT) || (type_of_fault == DBG_PAGEINV_FAULT) || (type_of_fault == DBG_COMPRESSOR_SWAPIN_FAULT)) {
+               throttle_lowpri_io(1);
+       }
+
+       KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
+                             (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
+                             ((uint64_t)vaddr >> 32),
+                             vaddr,
                              kr,
                              type_of_fault,
                              0);
@@ -3423,6 +4709,11 @@ vm_fault_unwire(
        fault_info.lo_offset = entry->offset;
        fault_info.hi_offset = (entry->vme_end - entry->vme_start) + entry->offset;
        fault_info.no_cache = entry->no_cache;
+       fault_info.stealth = TRUE;
+       fault_info.io_sync = FALSE;
+       fault_info.cs_bypass = FALSE;
+       fault_info.mark_zf_absent = FALSE;
+       fault_info.batch_pmap_op = FALSE;
 
        /*
         *      Since the pages are wired down, we must be able to
@@ -3431,11 +4722,11 @@ vm_fault_unwire(
 
        for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) {
 
-               if (pmap) {
-                       pmap_change_wiring(pmap, 
-                                          pmap_addr + (va - entry->vme_start), FALSE);
-               }
                if (object == VM_OBJECT_NULL) {
+                       if (pmap) {
+                               pmap_change_wiring(pmap, 
+                                                  pmap_addr + (va - entry->vme_start), FALSE);
+                       }
                        (void) vm_fault(map, va, VM_PROT_NONE, 
                                        TRUE, THREAD_UNINT, pmap, pmap_addr);
                } else {
@@ -3445,7 +4736,13 @@ vm_fault_unwire(
                        vm_object_t     result_object;
                        vm_fault_return_t result;
 
-                       fault_info.cluster_size = end_addr - va;
+                       if (end_addr - va > (vm_size_t) -1) {
+                               /* 32-bit overflow */
+                               fault_info.cluster_size = (vm_size_t) (0 - PAGE_SIZE);
+                       } else {
+                               fault_info.cluster_size = (vm_size_t) (end_addr - va);
+                               assert(fault_info.cluster_size == end_addr - va);
+                       }
 
                        do {
                                prot = VM_PROT_NONE;
@@ -3455,10 +4752,12 @@ vm_fault_unwire(
                                XPR(XPR_VM_FAULT,
                                        "vm_fault_unwire -> vm_fault_page\n",
                                        0,0,0,0,0);
+                               result_page = VM_PAGE_NULL;
                                result = vm_fault_page(
                                        object,
                                        entry->offset + (va - entry->vme_start),
                                        VM_PROT_NONE, TRUE,
+                                       FALSE, /* page not looked up */
                                        &prot, &result_page, &top_page,
                                        (int *)0,
                                        NULL, map->no_zero_fill, 
@@ -3478,6 +4777,18 @@ vm_fault_unwire(
                        if (result == VM_FAULT_MEMORY_ERROR && !object->alive)
                                continue;
 
+                       if (result == VM_FAULT_MEMORY_ERROR &&
+                           object == kernel_object) {
+                               /*
+                                * This must have been allocated with
+                                * KMA_KOBJECT and KMA_VAONLY and there's
+                                * no physical page at this offset.
+                                * We're done (no page to free).
+                                */
+                               assert(deallocate);
+                               continue;
+                       }
+
                        if (result != VM_FAULT_SUCCESS)
                                panic("vm_fault_unwire: failure");
 
@@ -3489,9 +4800,21 @@ vm_fault_unwire(
                                pmap_disconnect(result_page->phys_page);
                                VM_PAGE_FREE(result_page);
                        } else {
-                               vm_page_lockspin_queues();
-                               vm_page_unwire(result_page);
-                               vm_page_unlock_queues();
+                               if ((pmap) && (result_page->phys_page != vm_page_guard_addr))
+                                       pmap_change_wiring(pmap, 
+                                           pmap_addr + (va - entry->vme_start), FALSE);
+
+
+                               if (VM_PAGE_WIRED(result_page)) {
+                                       vm_page_lockspin_queues();
+                                       vm_page_unwire(result_page, TRUE);
+                                       vm_page_unlock_queues();
+                               }
+                               if(entry->zero_wired_pages) {
+                                       pmap_zero_page(result_page->phys_page);
+                                       entry->zero_wired_pages = FALSE;
+                               }
+
                                PAGE_WAKEUP_DONE(result_page);
                        }
                        vm_fault_cleanup(result_object, top_page);
@@ -3558,7 +4881,7 @@ vm_fault_wire_fast(
 #define RELEASE_PAGE(m)        {                               \
        PAGE_WAKEUP_DONE(m);                            \
        vm_page_lockspin_queues();                      \
-       vm_page_unwire(m);                              \
+       vm_page_unwire(m, TRUE);                        \
        vm_page_unlock_queues();                        \
 }
 
@@ -3677,9 +5000,12 @@ vm_fault_wire_fast(
                            pmap,
                            pmap_addr,
                            prot,
+                           prot,
                            TRUE,
                            FALSE,
                            FALSE,
+                           FALSE,
+                           NULL,
                            &type_of_fault);
 
 done:
@@ -3709,10 +5035,12 @@ vm_fault_copy_cleanup(
 
        vm_object_lock(object);
        PAGE_WAKEUP_DONE(page);
-       vm_page_lockspin_queues();
-       if (!page->active && !page->inactive && !page->throttled)
-               vm_page_activate(page);
-       vm_page_unlock_queues();
+       if (!page->active && !page->inactive && !page->throttled) {
+               vm_page_lockspin_queues();
+               if (!page->active && !page->inactive && !page->throttled)
+                       vm_page_activate(page);
+               vm_page_unlock_queues();
+       }
        vm_fault_cleanup(object, top_page);
 }
 
@@ -3726,7 +5054,7 @@ vm_fault_copy_dst_cleanup(
                object = page->object;
                vm_object_lock(object);
                vm_page_lockspin_queues();
-               vm_page_unwire(page);
+               vm_page_unwire(page, TRUE);
                vm_page_unlock_queues();
                vm_object_paging_end(object);   
                vm_object_unlock(object);
@@ -3784,6 +5112,7 @@ vm_fault_copy(
        vm_map_size_t           amount_left;
        vm_object_t             old_copy_object;
        kern_return_t           error = 0;
+       vm_fault_return_t       result;
 
        vm_map_size_t           part_size;
        struct vm_object_fault_info fault_info_src;
@@ -3808,6 +5137,11 @@ vm_fault_copy(
        fault_info_src.lo_offset = vm_object_trunc_page(src_offset);
        fault_info_src.hi_offset = fault_info_src.lo_offset + amount_left;
        fault_info_src.no_cache   = FALSE;
+       fault_info_src.stealth = TRUE;
+       fault_info_src.io_sync = FALSE;
+       fault_info_src.cs_bypass = FALSE;
+       fault_info_src.mark_zf_absent = FALSE;
+       fault_info_src.batch_pmap_op = FALSE;
 
        fault_info_dst.interruptible = interruptible;
        fault_info_dst.behavior = VM_BEHAVIOR_SEQUENTIAL;
@@ -3815,6 +5149,11 @@ vm_fault_copy(
        fault_info_dst.lo_offset = vm_object_trunc_page(dst_offset);
        fault_info_dst.hi_offset = fault_info_dst.lo_offset + amount_left;
        fault_info_dst.no_cache   = FALSE;
+       fault_info_dst.stealth = TRUE;
+       fault_info_dst.io_sync = FALSE;
+       fault_info_dst.cs_bypass = FALSE;
+       fault_info_dst.mark_zf_absent = FALSE;
+       fault_info_dst.batch_pmap_op = FALSE;
 
        do { /* while (amount_left > 0) */
                /*
@@ -3831,18 +5170,27 @@ vm_fault_copy(
                vm_object_lock(dst_object);
                vm_object_paging_begin(dst_object);
 
-               fault_info_dst.cluster_size = amount_left;
+               if (amount_left > (vm_size_t) -1) {
+                       /* 32-bit overflow */
+                       fault_info_dst.cluster_size = (vm_size_t) (0 - PAGE_SIZE);
+               } else {
+                       fault_info_dst.cluster_size = (vm_size_t) amount_left;
+                       assert(fault_info_dst.cluster_size == amount_left);
+               }
 
                XPR(XPR_VM_FAULT,"vm_fault_copy -> vm_fault_page\n",0,0,0,0,0);
-               switch (vm_fault_page(dst_object,
-                                     vm_object_trunc_page(dst_offset),
-                                     VM_PROT_WRITE|VM_PROT_READ,
-                                     FALSE,
-                                     &dst_prot, &dst_page, &dst_top_page,
-                                     (int *)0,
-                                     &error,
-                                     dst_map->no_zero_fill,
-                                     FALSE, &fault_info_dst)) {
+               dst_page = VM_PAGE_NULL;
+               result = vm_fault_page(dst_object,
+                                      vm_object_trunc_page(dst_offset),
+                                      VM_PROT_WRITE|VM_PROT_READ,
+                                      FALSE,
+                                      FALSE, /* page not looked up */
+                                      &dst_prot, &dst_page, &dst_top_page,
+                                      (int *)0,
+                                      &error,
+                                      dst_map->no_zero_fill,
+                                      FALSE, &fault_info_dst);
+               switch (result) {
                case VM_FAULT_SUCCESS:
                        break;
                case VM_FAULT_RETRY:
@@ -3853,11 +5201,19 @@ vm_fault_copy(
                        /* fall thru */
                case VM_FAULT_INTERRUPTED:
                        RETURN(MACH_SEND_INTERRUPTED);
+               case VM_FAULT_SUCCESS_NO_VM_PAGE:
+                       /* success but no VM page: fail the copy */
+                       vm_object_paging_end(dst_object);
+                       vm_object_unlock(dst_object);
+                       /*FALLTHROUGH*/
                case VM_FAULT_MEMORY_ERROR:
                        if (error)
                                return (error);
                        else
                                return(KERN_MEMORY_ERROR);
+               default:
+                       panic("vm_fault_copy: unexpected error 0x%x from "
+                             "vm_fault_page()\n", result);
                }
                assert ((dst_prot & VM_PROT_WRITE) != VM_PROT_NONE);
 
@@ -3908,20 +5264,29 @@ vm_fault_copy(
                                src_prot = VM_PROT_READ;
                                vm_object_paging_begin(src_object);
 
-                               fault_info_src.cluster_size = amount_left;
+                               if (amount_left > (vm_size_t) -1) {
+                                       /* 32-bit overflow */
+                                       fault_info_src.cluster_size = (vm_size_t) (0 - PAGE_SIZE);
+                               } else {
+                                       fault_info_src.cluster_size = (vm_size_t) amount_left;
+                                       assert(fault_info_src.cluster_size == amount_left);
+                               }
 
                                XPR(XPR_VM_FAULT,
                                        "vm_fault_copy(2) -> vm_fault_page\n",
                                        0,0,0,0,0);
-                               switch (vm_fault_page(
-                                               src_object, 
-                                               vm_object_trunc_page(src_offset),
-                                               VM_PROT_READ, FALSE,
-                                               &src_prot, 
-                                               &result_page, &src_top_page,
-                                               (int *)0, &error, FALSE,
-                                               FALSE, &fault_info_src)) {
-
+                               result_page = VM_PAGE_NULL;
+                               result = vm_fault_page(
+                                       src_object, 
+                                       vm_object_trunc_page(src_offset),
+                                       VM_PROT_READ, FALSE,
+                                       FALSE, /* page not looked up */
+                                       &src_prot, 
+                                       &result_page, &src_top_page,
+                                       (int *)0, &error, FALSE,
+                                       FALSE, &fault_info_src);
+
+                               switch (result) {
                                case VM_FAULT_SUCCESS:
                                        break;
                                case VM_FAULT_RETRY:
@@ -3933,12 +5298,21 @@ vm_fault_copy(
                                case VM_FAULT_INTERRUPTED:
                                        vm_fault_copy_dst_cleanup(dst_page);
                                        RETURN(MACH_SEND_INTERRUPTED);
+                               case VM_FAULT_SUCCESS_NO_VM_PAGE:
+                                       /* success but no VM page: fail */
+                                       vm_object_paging_end(src_object);
+                                       vm_object_unlock(src_object);
+                                       /*FALLTHROUGH*/
                                case VM_FAULT_MEMORY_ERROR:
                                        vm_fault_copy_dst_cleanup(dst_page);
                                        if (error)
                                                return (error);
                                        else
                                                return(KERN_MEMORY_ERROR);
+                               default:
+                                       panic("vm_fault_copy(2): unexpected "
+                                             "error 0x%x from "
+                                             "vm_fault_page()\n", result);
                                }
 
 
@@ -3993,14 +5367,23 @@ vm_fault_copy(
                        }
 
                        if (result_page == VM_PAGE_NULL) {
+                               assert((vm_offset_t) dst_po == dst_po);
+                               assert((vm_size_t) part_size == part_size);
                                vm_page_part_zero_fill(dst_page,
-                                                       dst_po, part_size);
+                                                      (vm_offset_t) dst_po,
+                                                      (vm_size_t) part_size);
                        } else {
-                               vm_page_part_copy(result_page, src_po,
-                                       dst_page, dst_po, part_size);
+                               assert((vm_offset_t) src_po == src_po);
+                               assert((vm_offset_t) dst_po == dst_po);
+                               assert((vm_size_t) part_size == part_size);
+                               vm_page_part_copy(result_page,
+                                                 (vm_offset_t) src_po,
+                                                 dst_page,
+                                                 (vm_offset_t) dst_po,
+                                                 (vm_size_t)part_size);
                                if(!dst_page->dirty){
                                        vm_object_lock(dst_object);
-                                       dst_page->dirty = TRUE;
+                                       SET_PAGE_DIRTY(dst_page, TRUE);
                                        vm_object_unlock(dst_page->object);
                                }
 
@@ -4011,10 +5394,13 @@ vm_fault_copy(
                        if (result_page == VM_PAGE_NULL)
                                vm_page_zero_fill(dst_page);
                        else{
+                               vm_object_lock(result_page->object);
                                vm_page_copy(result_page, dst_page);
+                               vm_object_unlock(result_page->object);
+
                                if(!dst_page->dirty){
                                        vm_object_lock(dst_object);
-                                       dst_page->dirty = TRUE;
+                                       SET_PAGE_DIRTY(dst_page, TRUE);
                                        vm_object_unlock(dst_page->object);
                                }
                        }
@@ -4095,7 +5481,7 @@ vm_fault_classify(vm_object_t             object,
                                break;
                        }
 
-                       offset += object->shadow_offset;
+                       offset += object->vo_shadow_offset;
                        object = object->shadow;
                        level++;
                        continue;
@@ -4128,8 +5514,91 @@ vm_fault_classify_init(void)
 #endif /* VM_FAULT_CLASSIFY */
 
 
-extern int cs_validation;
+void
+vm_page_validate_cs_mapped(
+       vm_page_t       page,
+       const void      *kaddr)
+{
+       vm_object_t             object;
+       vm_object_offset_t      offset;
+       kern_return_t           kr;
+       memory_object_t         pager;
+       void                    *blobs;
+       boolean_t               validated, tainted;
+
+       assert(page->busy);
+       vm_object_lock_assert_exclusive(page->object);
+
+       if (!cs_validation) {
+               return;
+       }
+
+       if (page->wpmapped && !page->cs_tainted) {
+               /*
+                * This page was mapped for "write" access sometime in the
+                * past and could still be modifiable in the future.
+                * Consider it tainted.
+                * [ If the page was already found to be "tainted", no
+                * need to re-validate. ]
+                */
+               page->cs_validated = TRUE;
+               page->cs_tainted = TRUE;
+               if (cs_debug) {
+                       printf("CODESIGNING: vm_page_validate_cs: "
+                              "page %p obj %p off 0x%llx "
+                              "was modified\n",
+                              page, page->object, page->offset);
+               }
+               vm_cs_validated_dirtied++;
+       }
+
+       if (page->cs_validated) {
+               return;
+       }
 
+       vm_cs_validates++;
+
+       object = page->object;
+       assert(object->code_signed);
+       offset = page->offset;
+
+       if (!object->alive || object->terminating || object->pager == NULL) {
+               /*
+                * The object is terminating and we don't have its pager
+                * so we can't validate the data...
+                */
+               return;
+       }
+       /*
+        * Since we get here to validate a page that was brought in by
+        * the pager, we know that this pager is all setup and ready
+        * by now.
+        */
+       assert(!object->internal);
+       assert(object->pager != NULL);
+       assert(object->pager_ready);
+
+       pager = object->pager;
+       assert(object->paging_in_progress);
+       kr = vnode_pager_get_object_cs_blobs(pager, &blobs);
+       if (kr != KERN_SUCCESS) {
+               blobs = NULL;
+       }
+
+       /* verify the SHA1 hash for this page */
+       validated = cs_validate_page(blobs,
+                                    pager,
+                                    offset + object->paging_offset,
+                                    (const void *)kaddr,
+                                    &tainted);
+
+       page->cs_validated = validated;
+       if (validated) {
+               page->cs_tainted = tainted;
+       }
+}
+
+extern int panic_on_cs_killed;
 void
 vm_page_validate_cs(
        vm_page_t       page)
@@ -4140,10 +5609,8 @@ vm_page_validate_cs(
        vm_map_size_t           ksize;
        vm_offset_t             kaddr;
        kern_return_t           kr;
-       memory_object_t         pager;
-       void                    *blobs;
-       boolean_t               validated, tainted;
        boolean_t               busy_page;
+       boolean_t               need_unmap;
 
        vm_object_lock_assert_held(page->object);
 
@@ -4151,44 +5618,46 @@ vm_page_validate_cs(
                return;
        }
 
-       if (page->cs_validated && !page->cs_tainted && page->wpmapped) {
+       if (page->wpmapped && !page->cs_tainted) {
                vm_object_lock_assert_exclusive(page->object);
 
                /*
-                * This page has already been validated and found to
-                * be valid.  However, it was mapped for "write" access
-                * sometime in the past, so we have to check if it was
-                * modified.  If so, it needs to be revalidated.
-                * If the page was already found to be "tainted", no
-                * need to re-validate.
+                * This page was mapped for "write" access sometime in the
+                * past and could still be modifiable in the future.
+                * Consider it tainted.
+                * [ If the page was already found to be "tainted", no
+                * need to re-validate. ]
                 */
-               if (!page->dirty) {
-                       vm_cs_query_modified++;
-                       page->dirty = pmap_is_modified(page->phys_page);
-               }
-               if (page->dirty) {
-                       /*
-                        * The page is dirty, so let's clear its
-                        * "validated" bit and re-validate it.
-                        */
-                       if (cs_debug) {
-                               printf("CODESIGNING: vm_page_validate_cs: "
-                                      "page %p obj %p off 0x%llx "
-                                      "was modified\n",
-                                      page, page->object, page->offset);
-                       }
-                       page->cs_validated = FALSE;
-                       vm_cs_validated_dirtied++;
+               page->cs_validated = TRUE;
+               page->cs_tainted = TRUE;
+               if (cs_debug) {
+                       printf("CODESIGNING: vm_page_validate_cs: "
+                              "page %p obj %p off 0x%llx "
+                              "was modified\n",
+                              page, page->object, page->offset);
                }
+               vm_cs_validated_dirtied++;
        }
 
        if (page->cs_validated) {
                return;
        }
 
-       vm_object_lock_assert_exclusive(page->object);
+       if (panic_on_cs_killed &&
+           page->slid) {
+               panic("vm_page_validate_cs(%p): page is slid\n", page);
+       }
+       assert(!page->slid);
 
-       vm_cs_validates++;
+#if CHECK_CS_VALIDATION_BITMAP 
+       if ( vnode_pager_cs_check_validation_bitmap( page->object->pager, trunc_page(page->offset + page->object->paging_offset), CS_BITMAP_CHECK ) == KERN_SUCCESS) {
+               page->cs_validated = TRUE;
+               page->cs_tainted = FALSE;
+               vm_cs_bitmap_validated++;
+               return;
+       }
+#endif
+       vm_object_lock_assert_exclusive(page->object);
 
        object = page->object;
        assert(object->code_signed);
@@ -4208,64 +5677,38 @@ vm_page_validate_cs(
        vm_object_paging_begin(object);
 
        /* map the page in the kernel address space */
-       koffset = 0;
        ksize = PAGE_SIZE_64;
-       kr = vm_paging_map_object(&koffset,
-                                 page,
+       koffset = 0;
+       need_unmap = FALSE;
+       kr = vm_paging_map_object(page,
                                  object,
                                  offset,
+                                 VM_PROT_READ,
+                                 FALSE, /* can't unlock object ! */
                                  &ksize,
-                                 FALSE); /* can't unlock object ! */
+                                 &koffset,
+                                 &need_unmap);
        if (kr != KERN_SUCCESS) {
                panic("vm_page_validate_cs: could not map page: 0x%x\n", kr);
        }
        kaddr = CAST_DOWN(vm_offset_t, koffset);
 
-       /*
-        * Since we get here to validate a page that was brought in by
-        * the pager, we know that this pager is all setup and ready
-        * by now.
-        */
-       assert(!object->internal);
-       assert(object->pager != NULL);
-       assert(object->pager_ready);
-
-       if (!object->alive || object->terminating || object->pager == NULL) {
-               /*
-                * The object is terminating and we don't have its pager
-                * so we can't validate the data...
-                */
-               goto out;
-       }
+       /* validate the mapped page */
+       vm_page_validate_cs_mapped(page, (const void *) kaddr);
 
-       pager = object->pager;
-       assert(pager != NULL);
-
-       kr = vnode_pager_get_object_cs_blobs(pager, &blobs);
-       if (kr != KERN_SUCCESS) {
-               blobs = NULL;
+#if CHECK_CS_VALIDATION_BITMAP 
+       if ( page->cs_validated == TRUE && page->cs_tainted == FALSE ) {
+               vnode_pager_cs_check_validation_bitmap( object->pager, trunc_page( offset + object->paging_offset), CS_BITMAP_SET );
        }
-
-       /* verify the SHA1 hash for this page */
-       validated = cs_validate_page(blobs,
-                                    offset + object->paging_offset,
-                                    (const void *)kaddr,
-                                    &tainted);
-
+#endif
        assert(page->busy);
        assert(object == page->object);
        vm_object_lock_assert_exclusive(object);
 
-       page->cs_validated = validated;
-       if (validated) {
-               page->cs_tainted = tainted;
-       }
-
-out:
        if (!busy_page) {
                PAGE_WAKEUP_DONE(page);
        }
-       if (koffset != 0) {
+       if (need_unmap) {
                /* unmap the map from the kernel address space */
                vm_paging_unmap_object(object, koffset, koffset + ksize);
                koffset = 0;