]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_fault.c
xnu-4570.61.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_fault.c
index 87ffc3ee7c6d8b78671fcc2bfe599303a8e851ce..256c70dfe27d158f351fa7e830850b82c816c72e 100644 (file)
@@ -1,8 +1,8 @@
 /*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * unlawful or unlicensed copies of an Apple operating system, or to
  * circumvent, violate, or enable the circumvention or violation of, any
  * terms of an Apple operating system software license agreement.
- * 
+ *
  * Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
  */
-/* 
+/*
  * Mach Operating System
  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
  * All Rights Reserved.
- * 
+ *
  * Permission to use, copy, modify and distribute this software and its
  * documentation is hereby granted, provided that both the copyright
  * notice and this permission notice appear in all copies of the
  * software, derivative works or modified versions, and any portions
  * thereof, and that both notices appear in supporting documentation.
- * 
+ *
  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- * 
+ *
  * Carnegie Mellon requests users of this software to return to
- * 
+ *
  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
  *  School of Computer Science
  *  Carnegie Mellon University
  *  Pittsburgh PA 15213-3890
- * 
+ *
  * any improvements or extensions that they make and grant Carnegie Mellon
  * the rights to redistribute these changes.
  */
@@ -64,7 +64,6 @@
 
 #include <mach_cluster_stats.h>
 #include <mach_pagemap.h>
-#include <mach_kdb.h>
 #include <libkern/OSAtomic.h>
 
 #include <mach/mach_types.h>
 #include <kern/macro_help.h>
 #include <kern/zalloc.h>
 #include <kern/misc_protos.h>
+#include <kern/policy_internal.h>
 
-#include <ppc/proc_reg.h>
-
+#include <vm/vm_compressor.h>
+#include <vm/vm_compressor_pager.h>
 #include <vm/vm_fault.h>
 #include <vm/vm_map.h>
 #include <vm/vm_object.h>
 #include <vm/vm_external.h>
 #include <vm/memory_object.h>
 #include <vm/vm_purgeable_internal.h>  /* Needed by some vm_page.h macros */
+#include <vm/vm_shared_region.h>
+
+#include <sys/codesign.h>
+#include <sys/reason.h>
+#include <sys/signalvar.h>
 
-#include <sys/kdebug.h>
+#include <san/kasan.h>
 
 #define VM_FAULT_CLASSIFY      0
 
-/* Zero-filled pages are marked "m->zero_fill" and put on the
- * special zero-fill inactive queue  only if they belong to
- * an object at least this big.
+#define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */
+
+unsigned int   vm_object_pagein_throttle = 16;
+
+/*
+ * We apply a hard throttle to the demand zero rate of tasks that we believe are running out of control which
+ * kicks in when swap space runs out.  64-bit programs have massive address spaces and can leak enormous amounts
+ * of memory if they're buggy and can run the system completely out of swap space.  If this happens, we
+ * impose a hard throttle on them to prevent them from taking the last bit of memory left.  This helps
+ * keep the UI active so that the user has a chance to kill the offending task before the system
+ * completely hangs.
+ *
+ * The hard throttle is only applied when the system is nearly completely out of swap space and is only applied
+ * to tasks that appear to be bloated.  When swap runs out, any task using more than vm_hard_throttle_threshold
+ * will be throttled.  The throttling is done by giving the thread that's trying to demand zero a page a
+ * delay of HARD_THROTTLE_DELAY microseconds before being allowed to try the page fault again.
  */
-#define        VM_ZF_OBJECT_SIZE_THRESHOLD     (0x200000)
 
-#define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */
+extern void throttle_lowpri_io(int);
+
+extern struct vnode *vnode_pager_lookup_vnode(memory_object_t);
+
+uint64_t vm_hard_throttle_threshold;
+
+
+
+#define NEED_TO_HARD_THROTTLE_THIS_TASK()      (vm_wants_task_throttled(current_task()) ||     \
+                                                (vm_page_free_count < vm_page_throttle_limit && \
+                                                 proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO) > THROTTLE_LEVEL_THROTTLED))
+
 
-int    vm_object_pagein_throttle = 16;
+#define HARD_THROTTLE_DELAY    5000    /* 5000 us == 5 ms */
+#define SOFT_THROTTLE_DELAY    200     /* 200 us == .2 ms */
 
-extern int cs_debug;
+#define        VM_PAGE_CREATION_THROTTLE_PERIOD_SECS   6
+#define        VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC  20000
 
-#if    MACH_KDB
-extern struct db_watchpoint *db_watchpoint_list;
-#endif /* MACH_KDB */
 
+boolean_t current_thread_aborted(void);
 
 /* Forward declarations of internal routines. */
-extern kern_return_t vm_fault_wire_fast(
+static kern_return_t vm_fault_wire_fast(
                                vm_map_t        map,
                                vm_map_offset_t va,
+                               vm_prot_t       prot,
+                               vm_tag_t        wire_tag,
                                vm_map_entry_t  entry,
                                pmap_t          pmap,
-                               vm_map_offset_t pmap_addr);
-
-extern void vm_fault_continue(void);
-
-extern void vm_fault_copy_cleanup(
+                               vm_map_offset_t pmap_addr,
+                               ppnum_t         *physpage_p);
+
+static kern_return_t vm_fault_internal(
+               vm_map_t        map,
+               vm_map_offset_t vaddr,
+               vm_prot_t       caller_prot,
+               boolean_t       change_wiring,
+               vm_tag_t        wire_tag,
+               int             interruptible,
+               pmap_t          pmap,
+               vm_map_offset_t pmap_addr,
+               ppnum_t         *physpage_p);
+
+static void vm_fault_copy_cleanup(
                                vm_page_t       page,
                                vm_page_t       top_page);
 
-extern void vm_fault_copy_dst_cleanup(
+static void vm_fault_copy_dst_cleanup(
                                vm_page_t       page);
 
 #if    VM_FAULT_CLASSIFY
@@ -149,6 +189,21 @@ extern void vm_fault_classify(vm_object_t  object,
 extern void vm_fault_classify_init(void);
 #endif
 
+unsigned long vm_pmap_enter_blocked = 0;
+unsigned long vm_pmap_enter_retried = 0;
+
+unsigned long vm_cs_validates = 0;
+unsigned long vm_cs_revalidates = 0;
+unsigned long vm_cs_query_modified = 0;
+unsigned long vm_cs_validated_dirtied = 0;
+unsigned long vm_cs_bitmap_validated = 0;
+
+void vm_pre_fault(vm_map_offset_t);
+
+extern char *kdp_compressor_decompressed_page;
+extern addr64_t        kdp_compressor_decompressed_page_paddr;
+extern ppnum_t kdp_compressor_decompressed_page_ppnum;
+
 /*
  *     Routine:        vm_fault_init
  *     Purpose:
@@ -157,6 +212,42 @@ extern void vm_fault_classify_init(void);
 void
 vm_fault_init(void)
 {
+       int i, vm_compressor_temp;
+       boolean_t need_default_val = TRUE;
+       /*
+        * Choose a value for the hard throttle threshold based on the amount of ram.  The threshold is
+        * computed as a percentage of available memory, and the percentage used is scaled inversely with
+        * the amount of memory.  The percentage runs between 10% and 35%.  We use 35% for small memory systems
+        * and reduce the value down to 10% for very large memory configurations.  This helps give us a
+        * definition of a memory hog that makes more sense relative to the amount of ram in the machine.
+        * The formula here simply uses the number of gigabytes of ram to adjust the percentage.
+        */
+
+       vm_hard_throttle_threshold = sane_size * (35 - MIN((int)(sane_size / (1024*1024*1024)), 25)) / 100;
+
+       /*
+        * Configure compressed pager behavior. A boot arg takes precedence over a device tree entry.
+        */
+
+       if (PE_parse_boot_argn("vm_compressor", &vm_compressor_temp, sizeof (vm_compressor_temp))) {
+               for ( i = 0; i < VM_PAGER_MAX_MODES; i++) {
+                       if (vm_compressor_temp > 0 &&
+                           ((vm_compressor_temp & ( 1 << i)) == vm_compressor_temp)) {
+                               need_default_val = FALSE;
+                               vm_compressor_mode = vm_compressor_temp;
+                               break;
+                       }
+               }
+               if (need_default_val)
+                       printf("Ignoring \"vm_compressor\" boot arg %d\n", vm_compressor_temp);
+       }
+       if (need_default_val) {
+               /* If no boot arg or incorrect boot arg, try device tree. */
+               PE_get_default("kern.vm_compressor", &vm_compressor_mode, sizeof(vm_compressor_mode));
+       }
+       PE_parse_boot_argn("vm_compressor_threads", &vm_compressor_thread_count, sizeof (vm_compressor_thread_count));
+
+       printf("\"vm_compressor_mode\" is %d\n", vm_compressor_mode);
 }
 
 /*
@@ -175,14 +266,14 @@ vm_fault_init(void)
  */
 void
 vm_fault_cleanup(
-       register vm_object_t    object,
-       register vm_page_t      top_page)
+       vm_object_t     object,
+       vm_page_t       top_page)
 {
        vm_object_paging_end(object);
-       vm_object_unlock(object);
+       vm_object_unlock(object);
 
        if (top_page != VM_PAGE_NULL) {
-               object = top_page->object;
+               object = VM_PAGE_OBJECT(top_page);
 
                vm_object_lock(object);
                VM_PAGE_FREE(top_page);
@@ -213,11 +304,14 @@ struct {
 
 
 boolean_t      vm_page_deactivate_behind = TRUE;
-/* 
- * default sizes given VM_BEHAVIOR_DEFAULT reference behavior 
+/*
+ * default sizes given VM_BEHAVIOR_DEFAULT reference behavior
  */
-int vm_default_ahead = 0;
-int vm_default_behind = MAX_UPL_TRANSFER;
+#define VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW    128
+#define VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER   16              /* don't make this too big... */
+                                                                /* we use it to size an array on the stack */
+
+int vm_default_behind = VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW;
 
 #define MAX_SEQUENTIAL_RUN     (1024 * 1024 * 1024)
 
@@ -334,6 +428,8 @@ vm_fault_is_sequential(
 }
 
 
+int vm_page_deactivate_behind_count = 0;
+
 /*
  * vm_page_deactivate_behind
  *
@@ -353,10 +449,17 @@ vm_fault_deactivate_behind(
        vm_object_offset_t      offset,
        vm_behavior_t           behavior)
 {
-       vm_page_t       m = NULL;
+       int             n;
+       int             pages_in_run = 0;
+       int             max_pages_in_run = 0;
        int             sequential_run;
        int             sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
+       vm_object_offset_t      run_offset = 0;
+       vm_object_offset_t      pg_offset = 0;
+       vm_page_t       m;
+       vm_page_t       page_run[VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER];
 
+       pages_in_run = 0;
 #if TRACEFAULTPAGE
        dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */
 #endif
@@ -381,12 +484,16 @@ vm_fault_deactivate_behind(
        case VM_BEHAVIOR_RANDOM:
                break;
        case VM_BEHAVIOR_SEQUENTIAL:
-               if (sequential_run >= (int)PAGE_SIZE)
-                       m = vm_page_lookup(object, offset - PAGE_SIZE_64);
+               if (sequential_run >= (int)PAGE_SIZE) {
+                       run_offset = 0 - PAGE_SIZE_64;
+                       max_pages_in_run = 1;
+               }
                break;
        case VM_BEHAVIOR_RSEQNTL:
-               if (sequential_run >= (int)PAGE_SIZE)
-                       m = vm_page_lookup(object, offset + PAGE_SIZE_64);
+               if (sequential_run >= (int)PAGE_SIZE) {
+                       run_offset = PAGE_SIZE_64;
+                       max_pages_in_run = 1;
+               }
                break;
        case VM_BEHAVIOR_DEFAULT:
        default:
@@ -397,32 +504,160 @@ vm_fault_deactivate_behind(
                 * long enough on an object with default access behavior
                 * to consider it for deactivation
                 */
-               if ((uint64_t)sequential_run >= behind) {
+               if ((uint64_t)sequential_run >= behind && (sequential_run % (VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER * PAGE_SIZE)) == 0) {
+                       /*
+                        * the comparisons between offset and behind are done
+                        * in this kind of odd fashion in order to prevent wrap around
+                        * at the end points
+                        */
                        if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) {
-                               if (offset >= behind)
-                                       m = vm_page_lookup(object, offset - behind);
+                               if (offset >= behind) {
+                                       run_offset = 0 - behind;
+                                       pg_offset = PAGE_SIZE_64;
+                                       max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER;
+                               }
                        } else {
-                               if (offset < -behind)
-                                       m = vm_page_lookup(object, offset + behind);
+                               if (offset < -behind) {
+                                       run_offset = behind;
+                                       pg_offset = 0 - PAGE_SIZE_64;
+                                       max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER;
+                               }
                        }
                }
                break;
        }
        }
-       if (m) {
-               if (!m->busy && !m->no_cache && !m->throttled && !m->fictitious && !m->absent) {
-                       pmap_clear_reference(m->phys_page);
-                       m->deactivated = TRUE;
+        for (n = 0; n < max_pages_in_run; n++) {
+               m = vm_page_lookup(object, offset + run_offset + (n * pg_offset));
+
+               if (m && !m->laundry && !m->busy && !m->no_cache && (m->vm_page_q_state != VM_PAGE_ON_THROTTLED_Q) && !m->fictitious && !m->absent) {
+                       page_run[pages_in_run++] = m;
+
+                       /*
+                        * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
+                        *
+                        * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
+                        * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
+                        * new reference happens. If no futher references happen on the page after that remote TLB flushes
+                        * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
+                        * by pageout_scan, which is just fine since the last reference would have happened quite far
+                        * in the past (TLB caches don't hang around for very long), and of course could just as easily
+                        * have happened before we did the deactivate_behind.
+                        */
+                       pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
+               }
+       }
+       if (pages_in_run) {
+               vm_page_lockspin_queues();
+
+               for (n = 0; n < pages_in_run; n++) {
+
+                       m = page_run[n];
+
+                       vm_page_deactivate_internal(m, FALSE);
+
+                       vm_page_deactivate_behind_count++;
 #if TRACEFAULTPAGE
                        dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m);  /* (TEST/DEBUG) */
 #endif
-                       return TRUE;
                }
+               vm_page_unlock_queues();
+
+               return TRUE;
        }
        return FALSE;
 }
 
 
+#if (DEVELOPMENT || DEBUG)
+uint32_t       vm_page_creation_throttled_hard = 0;
+uint32_t       vm_page_creation_throttled_soft = 0;
+uint64_t       vm_page_creation_throttle_avoided = 0;
+#endif /* DEVELOPMENT || DEBUG */
+
+static int
+vm_page_throttled(boolean_t page_kept)
+{
+        clock_sec_t     elapsed_sec;
+        clock_sec_t     tv_sec;
+        clock_usec_t    tv_usec;
+
+       thread_t thread = current_thread();
+
+       if (thread->options & TH_OPT_VMPRIV)
+               return (0);
+
+       if (thread->t_page_creation_throttled) {
+               thread->t_page_creation_throttled = 0;
+
+               if (page_kept == FALSE)
+                       goto no_throttle;
+       }
+       if (NEED_TO_HARD_THROTTLE_THIS_TASK()) {
+#if (DEVELOPMENT || DEBUG)
+               thread->t_page_creation_throttled_hard++;
+               OSAddAtomic(1, &vm_page_creation_throttled_hard);
+#endif /* DEVELOPMENT || DEBUG */
+               return (HARD_THROTTLE_DELAY);
+       }
+
+       if ((vm_page_free_count < vm_page_throttle_limit || (VM_CONFIG_COMPRESSOR_IS_PRESENT && SWAPPER_NEEDS_TO_UNTHROTTLE())) &&
+           thread->t_page_creation_count > (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS * VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC)) {
+
+               if (vm_page_free_wanted == 0 && vm_page_free_wanted_privileged == 0) {
+#if (DEVELOPMENT || DEBUG)
+                       OSAddAtomic64(1, &vm_page_creation_throttle_avoided);
+#endif
+                       goto no_throttle;
+               }
+               clock_get_system_microtime(&tv_sec, &tv_usec);
+
+               elapsed_sec = tv_sec - thread->t_page_creation_time;
+
+               if (elapsed_sec <= VM_PAGE_CREATION_THROTTLE_PERIOD_SECS ||
+                   (thread->t_page_creation_count / elapsed_sec) >= VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC) {
+
+                       if (elapsed_sec >= (3 * VM_PAGE_CREATION_THROTTLE_PERIOD_SECS)) {
+                               /*
+                                * we'll reset our stats to give a well behaved app
+                                * that was unlucky enough to accumulate a bunch of pages
+                                * over a long period of time a chance to get out of
+                                * the throttled state... we reset the counter and timestamp
+                                * so that if it stays under the rate limit for the next second
+                                * it will be back in our good graces... if it exceeds it, it
+                                * will remain in the throttled state
+                                */
+                               thread->t_page_creation_time = tv_sec;
+                               thread->t_page_creation_count = VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC * (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS - 1);
+                       }
+                       ++vm_page_throttle_count;
+
+                       thread->t_page_creation_throttled = 1;
+
+                       if (VM_CONFIG_COMPRESSOR_IS_PRESENT && HARD_THROTTLE_LIMIT_REACHED()) {
+#if (DEVELOPMENT || DEBUG)
+                               thread->t_page_creation_throttled_hard++;
+                               OSAddAtomic(1, &vm_page_creation_throttled_hard);
+#endif /* DEVELOPMENT || DEBUG */
+                               return (HARD_THROTTLE_DELAY);
+                       } else {
+#if (DEVELOPMENT || DEBUG)
+                               thread->t_page_creation_throttled_soft++;
+                               OSAddAtomic(1, &vm_page_creation_throttled_soft);
+#endif /* DEVELOPMENT || DEBUG */
+                               return (SOFT_THROTTLE_DELAY);
+                       }
+               }
+               thread->t_page_creation_time = tv_sec;
+               thread->t_page_creation_count = 0;
+       }
+no_throttle:
+       thread->t_page_creation_count++;
+
+       return (0);
+}
+
+
 /*
  * check for various conditions that would
  * prevent us from creating a ZF page...
@@ -432,12 +667,18 @@ vm_fault_deactivate_behind(
  * object == m->object
  */
 static vm_fault_return_t
-vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, boolean_t interruptible_state)
+vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, boolean_t interruptible_state, boolean_t page_throttle)
 {
-        if (object->shadow_severed) {
+       int throttle_delay;
+
+        if (object->shadow_severed ||
+           VM_OBJECT_PURGEABLE_FAULT_ERROR(object)) {
                /*
-                * the shadow chain was severed
-                * just have to return an error at this point
+                * Either:
+                * 1. the shadow chain was severed,
+                * 2. the purgeable object is volatile or empty and is marked
+                *    to fault on access while volatile.
+                * Just have to return an error at this point
                 */
                if (m != VM_PAGE_NULL)
                        VM_PAGE_FREE(m);
@@ -467,18 +708,28 @@ vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, boolean_t int
                        return (VM_FAULT_RETRY);
                }
        }
-       if (VM_PAGE_ZFILL_THROTTLED()) {
-               /*
-                * we're throttling zero-fills...
-                * treat this as if we couldn't grab a page
-                */
-               if (m != VM_PAGE_NULL)
-                       VM_PAGE_FREE(m);
-               vm_fault_cleanup(object, first_m);
+       if (page_throttle == TRUE) {
+               if ((throttle_delay = vm_page_throttled(FALSE))) {
+                       /*
+                        * we're throttling zero-fills...
+                        * treat this as if we couldn't grab a page
+                        */
+                       if (m != VM_PAGE_NULL)
+                               VM_PAGE_FREE(m);
+                       vm_fault_cleanup(object, first_m);
 
-               thread_interrupt_level(interruptible_state);
+                       VM_DEBUG_EVENT(vmf_check_zfdelay, VMF_CHECK_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
+
+                       delay(throttle_delay);
+
+                       if (current_thread_aborted()) {
+                               thread_interrupt_level(interruptible_state);
+                               return VM_FAULT_INTERRUPTED;
+                       }
+                       thread_interrupt_level(interruptible_state);
 
-               return (VM_FAULT_MEMORY_SHORTAGE);
+                       return (VM_FAULT_MEMORY_SHORTAGE);
+               }
        }
        return (VM_FAULT_SUCCESS);
 }
@@ -495,6 +746,9 @@ static int
 vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill)
 {
         int my_fault = DBG_ZERO_FILL_FAULT;
+       vm_object_t     object;
+
+       object = VM_PAGE_OBJECT(m);
 
        /*
         * This is is a zero-fill page fault...
@@ -508,41 +762,51 @@ vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill)
         * execution.  i.e. it is the responsibility
         * of higher layers to call for an instruction
         * sync after changing the contents and before
-        * sending a program into this area.  We 
+        * sending a program into this area.  We
         * choose this approach for performance
         */
        m->pmapped = TRUE;
 
        m->cs_validated = FALSE;
        m->cs_tainted = FALSE;
+       m->cs_nx = FALSE;
+
+       if (no_zero_fill == TRUE) {
+               my_fault = DBG_NZF_PAGE_FAULT;
 
-       if (no_zero_fill == TRUE)
-               my_fault = DBG_NZF_PAGE_FAULT;
-       else {
+               if (m->absent && m->busy)
+                       return (my_fault);
+       else {
                vm_page_zero_fill(m);
 
                VM_STAT_INCR(zero_fill_count);
                DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
        }
        assert(!m->laundry);
-       assert(m->object != kernel_object);
-       //assert(m->pageq.next == NULL && m->pageq.prev == NULL);
+       assert(object != kernel_object);
+       //assert(m->pageq.next == 0 && m->pageq.prev == 0);
+
+       if (!VM_DYNAMIC_PAGING_ENABLED() &&
+               (object->purgable == VM_PURGABLE_DENY ||
+                object->purgable == VM_PURGABLE_NONVOLATILE ||
+                object->purgable == VM_PURGABLE_VOLATILE )) {
 
-       if (!IP_VALID(memory_manager_default) &&
-               (m->object->purgable == VM_PURGABLE_DENY ||
-                m->object->purgable == VM_PURGABLE_NONVOLATILE)) {
-               vm_page_lock_queues();
+               vm_page_lockspin_queues();
 
-                queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
-                m->throttled = TRUE;
-                vm_page_throttled_count++;
+               if (!VM_DYNAMIC_PAGING_ENABLED()) {
+                       assert(!VM_PAGE_WIRED(m));
 
-               vm_page_unlock_queues();
-       } else {
-               if (m->object->size > VM_ZF_OBJECT_SIZE_THRESHOLD) {
-                       m->zero_fill = TRUE;
-                       OSAddAtomic(1, (SInt32 *)&vm_zf_count);
+                       /*
+                        * can't be on the pageout queue since we don't
+                        * have a pager to try and clean to
+                        */
+                       vm_page_queues_remove(m, TRUE);
+                       vm_page_check_pageable_safe(m);
+                       vm_page_queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
+                       m->vm_page_q_state = VM_PAGE_ON_THROTTLED_Q;
+                       vm_page_throttled_count++;
                }
+               vm_page_unlock_queues();
        }
        return (my_fault);
 }
@@ -558,7 +822,7 @@ vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill)
  *             The required permissions for the page is given
  *             in "fault_type".  Desired permissions are included
  *             in "protection".
- *             fault_info is passed along to determine pagein cluster 
+ *             fault_info is passed along to determine pagein cluster
  *             limits... it contains the expected reference pattern,
  *             cluster size if available, etc...
  *
@@ -589,7 +853,15 @@ vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill)
  *             be destroyed when this guarantee is no longer required.
  *             The "result_page" is also left busy.  It is not removed
  *             from the pageout queues.
+ *     Special Case:
+ *             A return value of VM_FAULT_SUCCESS_NO_PAGE means that the
+ *             fault succeeded but there's no VM page (i.e. the VM object
+ *             does not actually hold VM pages, but device memory or
+ *             large pages).  The object is still locked and we still hold a
+ *             paging_in_progress reference.
  */
+unsigned int vm_fault_page_blocked_access = 0;
+unsigned int vm_fault_page_forced_retry = 0;
 
 vm_fault_return_t
 vm_fault_page(
@@ -598,10 +870,11 @@ vm_fault_page(
        vm_object_offset_t first_offset,        /* Offset into object */
        vm_prot_t       fault_type,     /* What access is requested */
        boolean_t       must_be_resident,/* Must page be resident? */
+       boolean_t       caller_lookup,  /* caller looked up page */
        /* Modifies in place: */
        vm_prot_t       *protection,    /* Protection for mapping */
-       /* Returns: */
        vm_page_t       *result_page,   /* Page found, if successful */
+       /* Returns: */
        vm_page_t       *top_page,      /* Page in top object, if
                                         * not result_page.  */
        int             *type_of_fault, /* if non-null, fill in with type of fault
@@ -609,13 +882,9 @@ vm_fault_page(
        /* More arguments: */
        kern_return_t   *error_code,    /* code if page is in error */
        boolean_t       no_zero_fill,   /* don't zero fill absent pages */
-#if MACH_PAGEMAP
-       boolean_t       data_supply,    /* treat as data_supply if 
+       boolean_t       data_supply,    /* treat as data_supply if
                                         * it is a write fault and a full
                                         * page is provided */
-#else
-       __unused boolean_t data_supply,
-#endif
        vm_object_fault_info_t fault_info)
 {
        vm_page_t               m;
@@ -625,102 +894,68 @@ vm_fault_page(
        vm_object_t             next_object;
        vm_object_t             copy_object;
        boolean_t               look_for_page;
+       boolean_t               force_fault_retry = FALSE;
        vm_prot_t               access_required = fault_type;
        vm_prot_t               wants_copy_flag;
        CLUSTER_STAT(int pages_at_higher_offsets;)
        CLUSTER_STAT(int pages_at_lower_offsets;)
        kern_return_t           wait_result;
        boolean_t               interruptible_state;
+       boolean_t               data_already_requested = FALSE;
+       vm_behavior_t           orig_behavior;
+       vm_size_t               orig_cluster_size;
        vm_fault_return_t       error;
        int                     my_fault;
        uint32_t                try_failed_count;
        int                     interruptible; /* how may fault be interrupted? */
+       int                     external_state = VM_EXTERNAL_STATE_UNKNOWN;
        memory_object_t         pager;
+       vm_fault_return_t       retval;
+       int                     grab_options;
 
 /*
- * MACH page map - an optional optimization where a bit map is maintained
- * by the VM subsystem for internal objects to indicate which pages of
- * the object currently reside on backing store.  This existence map
- * duplicates information maintained by the vnode pager.  It is 
- * created at the time of the first pageout against the object, i.e. 
- * at the same time pager for the object is created.  The optimization
- * is designed to eliminate pager interaction overhead, if it is 
- * 'known' that the page does not exist on backing store.
- *
- * MUST_ASK_PAGER() evaluates to TRUE if the page specified by object/offset is 
- * either marked as paged out in the existence map for the object or no 
- * existence map exists for the object.  MUST_ASK_PAGER() is one of the
- * criteria in the decision to invoke the pager.   It is also used as one
- * of the criteria to terminate the scan for adjacent pages in a clustered
- * pagein operation.  Note that MUST_ASK_PAGER() always evaluates to TRUE for
- * permanent objects.  Note also that if the pager for an internal object 
- * has not been created, the pager is not invoked regardless of the value 
- * of MUST_ASK_PAGER() and that clustered pagein scans are only done on an object
- * for which a pager has been created.
+ * MUST_ASK_PAGER() evaluates to TRUE if the page specified by object/offset is
+ * marked as paged out in the compressor pager or the pager doesn't exist.
+ * Note also that if the pager for an internal object
+ * has not been created, the pager is not invoked regardless of the value
+ * of MUST_ASK_PAGER().
  *
  * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
- * is marked as paged out in the existence map for the object.  PAGED_OUT()
+ * is marked as paged out in the compressor pager.
  * PAGED_OUT() is used to determine if a page has already been pushed
  * into a copy object in order to avoid a redundant page out operation.
  */
-#if MACH_PAGEMAP
-#define MUST_ASK_PAGER(o, f) (vm_external_state_get((o)->existence_map, (f)) \
-                       != VM_EXTERNAL_STATE_ABSENT)
-#define PAGED_OUT(o, f) (vm_external_state_get((o)->existence_map, (f)) \
-                       == VM_EXTERNAL_STATE_EXISTS)
-#else
-#define MUST_ASK_PAGER(o, f) (TRUE)
-#define PAGED_OUT(o, f) (FALSE)
-#endif
+#define MUST_ASK_PAGER(o, f, s)                                        \
+       ((s = VM_COMPRESSOR_PAGER_STATE_GET((o), (f))) != VM_EXTERNAL_STATE_ABSENT)
+
+#define PAGED_OUT(o, f) \
+       (VM_COMPRESSOR_PAGER_STATE_GET((o), (f)) == VM_EXTERNAL_STATE_EXISTS)
 
 /*
  *     Recovery actions
  */
-#define PREPARE_RELEASE_PAGE(m)                                \
-       MACRO_BEGIN                                     \
-       vm_page_lock_queues();                          \
-       MACRO_END
-
-#define DO_RELEASE_PAGE(m)                             \
-       MACRO_BEGIN                                     \
-       PAGE_WAKEUP_DONE(m);                            \
-       if (!m->active && !m->inactive && !m->throttled)\
-               vm_page_activate(m);                    \
-       vm_page_unlock_queues();                        \
-       MACRO_END
-
 #define RELEASE_PAGE(m)                                        \
        MACRO_BEGIN                                     \
-       PREPARE_RELEASE_PAGE(m);                        \
-       DO_RELEASE_PAGE(m);                             \
+       PAGE_WAKEUP_DONE(m);                            \
+       if ( !VM_PAGE_PAGEABLE(m)) {                    \
+               vm_page_lockspin_queues();              \
+               if ( !VM_PAGE_PAGEABLE(m)) {            \
+                       if (VM_CONFIG_COMPRESSOR_IS_ACTIVE)     \
+                                vm_page_deactivate(m);         \
+                        else                                   \
+                               vm_page_activate(m);            \
+               }                                               \
+               vm_page_unlock_queues();                        \
+       }                                                       \
        MACRO_END
 
 #if TRACEFAULTPAGE
        dbgTrace(0xBEEF0002, (unsigned int) first_object, (unsigned int) first_offset); /* (TEST/DEBUG) */
 #endif
 
-
-#if    MACH_KDB
-               /*
-                *      If there are watchpoints set, then
-                *      we don't want to give away write permission
-                *      on a read fault.  Make the task write fault,
-                *      so that the watchpoint code notices the access.
-                */
-           if (db_watchpoint_list) {
-               /*
-                *      If we aren't asking for write permission,
-                *      then don't give it away.  We're using write
-                *      faults to set the dirty bit.
-                */
-               if (!(fault_type & VM_PROT_WRITE))
-                       *protection &= ~VM_PROT_WRITE;
-       }
-#endif /* MACH_KDB */
-
        interruptible = fault_info->interruptible;
        interruptible_state = thread_interrupt_level(interruptible);
+
        /*
         *      INVARIANTS (through entire routine):
         *
@@ -755,7 +990,7 @@ vm_fault_page(
 
        XPR(XPR_VM_FAULT,
                "vm_f_page: obj 0x%X, offset 0x%X, type %d, prot %d\n",
-               (integer_t)object, offset, fault_type, *protection, 0);
+               object, offset, fault_type, *protection, 0);
 
        /*
         * default type of fault
@@ -766,6 +1001,14 @@ vm_fault_page(
 #if TRACEFAULTPAGE
                dbgTrace(0xBEEF0003, (unsigned int) 0, (unsigned int) 0);       /* (TEST/DEBUG) */
 #endif
+
+               grab_options = 0;
+#if CONFIG_SECLUDED_MEMORY
+               if (object->can_grab_secluded) {
+                       grab_options |= VM_PAGE_GRAB_SECLUDED;
+               }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
                if (!object->alive) {
                        /*
                         * object is no longer valid
@@ -777,10 +1020,53 @@ vm_fault_page(
                        return (VM_FAULT_MEMORY_ERROR);
                }
 
+               if (!object->pager_created && object->phys_contiguous) {
+                       /*
+                        * A physically-contiguous object without a pager:
+                        * must be a "large page" object.  We do not deal
+                        * with VM pages for this object.
+                        */
+                       caller_lookup = FALSE;
+                       m = VM_PAGE_NULL;
+                       goto phys_contig_object;
+               }
+
+               if (object->blocked_access) {
+                       /*
+                        * Access to this VM object has been blocked.
+                        * Replace our "paging_in_progress" reference with
+                        * a "activity_in_progress" reference and wait for
+                        * access to be unblocked.
+                        */
+                       caller_lookup = FALSE; /* no longer valid after sleep */
+                       vm_object_activity_begin(object);
+                       vm_object_paging_end(object);
+                       while (object->blocked_access) {
+                               vm_object_sleep(object,
+                                               VM_OBJECT_EVENT_UNBLOCKED,
+                                               THREAD_UNINT);
+                       }
+                       vm_fault_page_blocked_access++;
+                       vm_object_paging_begin(object);
+                       vm_object_activity_end(object);
+               }
+
                /*
                 * See whether the page at 'offset' is resident
                 */
-               m = vm_page_lookup(object, offset);
+               if (caller_lookup == TRUE) {
+                       /*
+                        * The caller has already looked up the page
+                        * and gave us the result in "result_page".
+                        * We can use this for the first lookup but
+                        * it loses its validity as soon as we unlock
+                        * the object.
+                        */
+                       m = *result_page;
+                       caller_lookup = FALSE; /* no longer valid after that */
+               } else {
+                       m = vm_page_lookup(object, offset);
+               }
 #if TRACEFAULTPAGE
                dbgTrace(0xBEEF0004, (unsigned int) m, (unsigned int) object);  /* (TEST/DEBUG) */
 #endif
@@ -790,20 +1076,16 @@ vm_fault_page(
                                /*
                                 * The page is being brought in,
                                 * wait for it and then retry.
-                                *
-                                * A possible optimization: if the page
-                                * is known to be resident, we can ignore
-                                * pages that are absent (regardless of
-                                * whether they're busy).
                                 */
 #if TRACEFAULTPAGE
                                dbgTrace(0xBEEF0005, (unsigned int) m, (unsigned int) 0);       /* (TEST/DEBUG) */
 #endif
                                wait_result = PAGE_SLEEP(object, m, interruptible);
+
                                XPR(XPR_VM_FAULT,
                                    "vm_f_page: block busy obj 0x%X, offset 0x%X, page 0x%X\n",
-                                       (integer_t)object, offset,
-                                       (integer_t)m, 0, 0);
+                                   object, offset,
+                                   m, 0, 0);
                                counter(c_vm_fault_page_block_busy_kernel++);
 
                                if (wait_result != THREAD_AWAKENED) {
@@ -811,14 +1093,19 @@ vm_fault_page(
                                        thread_interrupt_level(interruptible_state);
 
                                        if (wait_result == THREAD_RESTART)
-                                               return (VM_FAULT_RETRY);
+                                               return (VM_FAULT_RETRY);
                                        else
                                                return (VM_FAULT_INTERRUPTED);
                                }
                                continue;
                        }
+                       if (m->laundry) {
+                               m->free_when_done = FALSE;
 
-                       if (m->phys_page == vm_page_guard_addr) {
+                               if (!m->cleaning)
+                                       vm_pageout_steal_laundry(m, FALSE);
+                       }
+                       if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
                                /*
                                 * Guard page: off limits !
                                 */
@@ -835,6 +1122,7 @@ vm_fault_page(
                                        *top_page = first_m;
                                        if (type_of_fault)
                                                *type_of_fault = DBG_GUARD_FAULT;
+                                       thread_interrupt_level(interruptible_state);
                                        return VM_FAULT_SUCCESS;
                                } else {
                                        /*
@@ -906,20 +1194,20 @@ vm_fault_page(
                                        /*
                                         * check for any conditions that prevent
                                         * us from creating a new zero-fill page
-                                        * vm_fault_check will do all of the 
+                                        * vm_fault_check will do all of the
                                         * fault cleanup in the case of an error condition
                                         * including resetting the thread_interrupt_level
                                         */
-                                       error = vm_fault_check(object, m, first_m, interruptible_state);
+                                       error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE);
 
                                        if (error != VM_FAULT_SUCCESS)
                                                return (error);
 
                                        XPR(XPR_VM_FAULT,
                                            "vm_f_page: zero obj 0x%X, off 0x%X, page 0x%X, first_obj 0x%X\n",
-                                               (integer_t)object, offset,
-                                               (integer_t)m,
-                                               (integer_t)first_object, 0);
+                                               object, offset,
+                                               m,
+                                               first_object, 0);
 
                                        if (object != first_object) {
                                                /*
@@ -934,7 +1222,7 @@ vm_fault_page(
                                                vm_object_unlock(object);
 
                                                /*
-                                                * grab the original page we 
+                                                * grab the original page we
                                                 * 'soldered' in place and
                                                 * retake lock on 'first_object'
                                                 */
@@ -953,6 +1241,8 @@ vm_fault_page(
                                                m->absent = FALSE;
                                                m->busy = TRUE;
                                        }
+                                       if (fault_info->mark_zf_absent && no_zero_fill == TRUE)
+                                               m->absent = TRUE;
                                        /*
                                         * zero-fill the page and put it on
                                         * the correct paging queue
@@ -972,25 +1262,25 @@ vm_fault_page(
                                                m->busy = TRUE;
 
                                                vm_page_lockspin_queues();
-                                               VM_PAGE_QUEUES_REMOVE(m);
+                                               vm_page_queues_remove(m, FALSE);
                                                vm_page_unlock_queues();
                                        }
                                        XPR(XPR_VM_FAULT,
                                            "vm_f_page: unavail obj 0x%X, off 0x%X, next_obj 0x%X, newoff 0x%X\n",
-                                               (integer_t)object, offset,
-                                               (integer_t)next_object,
-                                               offset+object->shadow_offset,0);
+                                               object, offset,
+                                               next_object,
+                                               offset+object->vo_shadow_offset,0);
 
-                                       offset += object->shadow_offset;
-                                       fault_info->lo_offset += object->shadow_offset;
-                                       fault_info->hi_offset += object->shadow_offset;
+                                       offset += object->vo_shadow_offset;
+                                       fault_info->lo_offset += object->vo_shadow_offset;
+                                       fault_info->hi_offset += object->vo_shadow_offset;
                                        access_required = VM_PROT_READ;
 
                                        vm_object_lock(next_object);
                                        vm_object_unlock(object);
                                        object = next_object;
                                        vm_object_paging_begin(object);
-                                       
+
                                        /*
                                         * reset to default type of fault
                                         */
@@ -1017,15 +1307,15 @@ vm_fault_page(
 #endif
                                XPR(XPR_VM_FAULT,
                                    "vm_f_page: cleaning obj 0x%X, offset 0x%X, page 0x%X\n",
-                                       (integer_t)object, offset,
-                                       (integer_t)m, 0, 0);
+                                       object, offset,
+                                       m, 0, 0);
                                /*
                                 * take an extra ref so that object won't die
                                 */
                                vm_object_reference_locked(object);
 
                                vm_fault_cleanup(object, first_m);
-                               
+
                                counter(c_vm_fault_page_block_backoff_kernel++);
                                vm_object_lock(object);
                                assert(object->ref_count > 0);
@@ -1049,7 +1339,8 @@ vm_fault_page(
                                        return (VM_FAULT_RETRY);
                                }
                        }
-                       if (type_of_fault == NULL && m->speculative) {
+                       if (type_of_fault == NULL && (m->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q) &&
+                           !(fault_info != NULL && fault_info->stealth)) {
                                /*
                                 * If we were passed a non-NULL pointer for
                                 * "type_of_fault", than we came from
@@ -1060,37 +1351,19 @@ vm_fault_page(
                                 * take it off the speculative queue, we'll
                                 * let the caller of vm_fault_page deal
                                 * with getting it onto the correct queue
+                                *
+                                * If the caller specified in fault_info that
+                                * it wants a "stealth" fault, we also leave
+                                * the page in the speculative queue.
                                 */
                                vm_page_lockspin_queues();
-                               VM_PAGE_QUEUES_REMOVE(m);
+                               if (m->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q)
+                                       vm_page_queues_remove(m, FALSE);
                                vm_page_unlock_queues();
                        }
+                       assert(object == VM_PAGE_OBJECT(m));
 
-                       if (m->encrypted) {
-                               /*
-                                * ENCRYPTED SWAP:
-                                * the user needs access to a page that we
-                                * encrypted before paging it out.
-                                * Decrypt the page now.
-                                * Keep it busy to prevent anyone from
-                                * accessing it during the decryption.
-                                */
-                               m->busy = TRUE;
-                               vm_page_decrypt(m, 0);
-                               assert(object == m->object);
-                               assert(m->busy);
-                               PAGE_WAKEUP_DONE(m);
-
-                               /*
-                                * Retry from the top, in case
-                                * something changed while we were
-                                * decrypting.
-                                */
-                               continue;
-                       }
-                       ASSERT_PAGE_DECRYPTED(m);
-
-                       if (m->object->code_signed) {
+                       if (object->code_signed) {
                                /*
                                 * CODE SIGNING:
                                 * We just paged in a page from a signed
@@ -1114,14 +1387,14 @@ vm_fault_page(
 #endif
                        XPR(XPR_VM_FAULT,
                            "vm_f_page: found page obj 0x%X, offset 0x%X, page 0x%X\n",
-                               (integer_t)object, offset, (integer_t)m, 0, 0);
+                               object, offset, m, 0, 0);
                        assert(!m->busy);
                        assert(!m->absent);
 
                        m->busy = TRUE;
                        break;
                }
-               
+
 
                /*
                 * we get here when there is no page present in the object at
@@ -1130,16 +1403,41 @@ vm_fault_page(
                 * this object can provide the data or we're the top object...
                 * object is locked;  m == NULL
                 */
-               look_for_page = (object->pager_created && (MUST_ASK_PAGER(object, offset) == TRUE) && !data_supply);
-               
+
+               if (must_be_resident) {
+                       if (fault_type == VM_PROT_NONE &&
+                           object == kernel_object) {
+                               /*
+                                * We've been called from vm_fault_unwire()
+                                * while removing a map entry that was allocated
+                                * with KMA_KOBJECT and KMA_VAONLY.  This page
+                                * is not present and there's nothing more to
+                                * do here (nothing to unwire).
+                                */
+                               vm_fault_cleanup(object, first_m);
+                               thread_interrupt_level(interruptible_state);
+
+                               return VM_FAULT_MEMORY_ERROR;
+                       }
+
+                       goto dont_look_for_page;
+               }
+
+               /* Don't expect to fault pages into the kernel object. */
+               assert(object != kernel_object);
+
+               data_supply = FALSE;
+
+               look_for_page = (object->pager_created && (MUST_ASK_PAGER(object, offset, external_state) == TRUE) && !data_supply);
+
 #if TRACEFAULTPAGE
                dbgTrace(0xBEEF000C, (unsigned int) look_for_page, (unsigned int) object);      /* (TEST/DEBUG) */
 #endif
-               if ((look_for_page || (object == first_object)) && !must_be_resident && !object->phys_contiguous) {
+               if (!look_for_page && object == first_object && !object->phys_contiguous) {
                        /*
-                        * Allocate a new page for this object/offset pair
+                        * Allocate a new page for this object/offset pair as a placeholder
                         */
-                       m = vm_page_grab();
+                       m = vm_page_grab_options(grab_options);
 #if TRACEFAULTPAGE
                        dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object);  /* (TEST/DEBUG) */
 #endif
@@ -1150,10 +1448,16 @@ vm_fault_page(
 
                                return (VM_FAULT_MEMORY_SHORTAGE);
                        }
-                       vm_page_insert(m, object, offset);
+
+                       if (fault_info && fault_info->batch_pmap_op == TRUE) {
+                               vm_page_insert_internal(m, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
+                       } else {
+                               vm_page_insert(m, object, offset);
+                       }
                }
-               if (look_for_page && !must_be_resident) {
+               if (look_for_page) {
                        kern_return_t   rc;
+                       int             my_fault_type;
 
                        /*
                         *      If the memory manager is not ready, we
@@ -1168,7 +1472,7 @@ vm_fault_page(
 
                                XPR(XPR_VM_FAULT,
                                "vm_f_page: ready wait obj 0x%X, offset 0x%X\n",
-                                       (integer_t)object, offset, 0, 0, 0);
+                                       object, offset, 0, 0, 0);
 
                                /*
                                 * take an extra ref so object won't die
@@ -1220,8 +1524,8 @@ vm_fault_page(
                                vm_object_lock(object);
                                assert(object->ref_count > 0);
 
-                               if (object->paging_in_progress > vm_object_pagein_throttle) {
-                                       vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGING_IN_PROGRESS, interruptible);
+                               if (object->paging_in_progress >= vm_object_pagein_throttle) {
+                                       vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS, interruptible);
 
                                        vm_object_unlock(object);
                                        wait_result = thread_block(THREAD_CONTINUE_NULL);
@@ -1236,61 +1540,230 @@ vm_fault_page(
                                        return (VM_FAULT_RETRY);
                                }
                        }
-                       if (m != VM_PAGE_NULL) {
-                               /*
-                                * Indicate that the page is waiting for data
-                                * from the memory manager.
-                                */
-                               m->list_req_pending = TRUE;
-                               m->absent = TRUE;
-                       }
+                       if (object->internal) {
+                               int compressed_count_delta;
+
+                               assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
 
+                               if (m == VM_PAGE_NULL) {
+                                       /*
+                                        * Allocate a new page for this object/offset pair as a placeholder
+                                        */
+                                       m = vm_page_grab_options(grab_options);
 #if TRACEFAULTPAGE
-                       dbgTrace(0xBEEF0012, (unsigned int) object, (unsigned int) 0);  /* (TEST/DEBUG) */
+                                       dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object);  /* (TEST/DEBUG) */
 #endif
+                                       if (m == VM_PAGE_NULL) {
 
-                       /*
-                        * It's possible someone called vm_object_destroy while we weren't
-                        * holding the object lock.  If that has happened, then bail out 
-                        * here.
-                        */
+                                               vm_fault_cleanup(object, first_m);
+                                               thread_interrupt_level(interruptible_state);
 
-                       pager = object->pager;
+                                               return (VM_FAULT_MEMORY_SHORTAGE);
+                                       }
 
-                       if (pager == MEMORY_OBJECT_NULL) {
-                               vm_fault_cleanup(object, first_m);
-                               thread_interrupt_level(interruptible_state);
-                               return VM_FAULT_MEMORY_ERROR;
-                       }
+                                       m->absent = TRUE;
+                                       if (fault_info && fault_info->batch_pmap_op == TRUE) {
+                                               vm_page_insert_internal(m, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
+                                       } else {
+                                               vm_page_insert(m, object, offset);
+                                       }
+                               }
+                               assert(m->busy);
 
-                       /*
-                        * We have an absent page in place for the faulting offset,
-                        * so we can release the object lock.
-                        */
+                               m->absent = TRUE;
+                               pager = object->pager;
 
-                       vm_object_unlock(object);
+                               assert(object->paging_in_progress > 0);
+                               vm_object_unlock(object);
 
-                       /*
-                        * If this object uses a copy_call strategy,
-                        * and we are interested in a copy of this object
-                        * (having gotten here only by following a
-                        * shadow chain), then tell the memory manager
-                        * via a flag added to the desired_access
-                        * parameter, so that it can detect a race
-                        * between our walking down the shadow chain
-                        * and its pushing pages up into a copy of
-                        * the object that it manages.
-                        */
-                       if (object->copy_strategy == MEMORY_OBJECT_COPY_CALL && object != first_object)
-                               wants_copy_flag = VM_PROT_WANTS_COPY;
-                       else
+                               rc = vm_compressor_pager_get(
+                                       pager,
+                                       offset + object->paging_offset,
+                                       VM_PAGE_GET_PHYS_PAGE(m),
+                                       &my_fault_type,
+                                       0,
+                                       &compressed_count_delta);
+
+                               if (type_of_fault == NULL) {
+                                       int     throttle_delay;
+
+                                       /*
+                                        * we weren't called from vm_fault, so we
+                                        * need to apply page creation throttling
+                                        * do it before we re-acquire any locks
+                                        */
+                                       if (my_fault_type == DBG_COMPRESSOR_FAULT) {
+                                               if ((throttle_delay = vm_page_throttled(TRUE))) {
+                                                       VM_DEBUG_EVENT(vmf_compressordelay, VMF_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 1, 0);
+                                                       delay(throttle_delay);
+                                               }
+                                       }
+                               }
+                               vm_object_lock(object);
+                               assert(object->paging_in_progress > 0);
+
+                               vm_compressor_pager_count(
+                                       pager,
+                                       compressed_count_delta,
+                                       FALSE, /* shared_lock */
+                                       object);
+
+                               switch (rc) {
+                               case KERN_SUCCESS:
+                                       m->absent = FALSE;
+                                       m->dirty = TRUE;
+                                       if ((object->wimg_bits &
+                                            VM_WIMG_MASK) !=
+                                           VM_WIMG_USE_DEFAULT) {
+                                               /*
+                                                * If the page is not cacheable,
+                                                * we can't let its contents
+                                                * linger in the data cache
+                                                * after the decompression.
+                                                */
+                                               pmap_sync_page_attributes_phys(
+                                                       VM_PAGE_GET_PHYS_PAGE(m));
+                                       } else {
+                                               m->written_by_kernel = TRUE;
+                                       }
+
+                                       /*
+                                        * If the object is purgeable, its
+                                        * owner's purgeable ledgers have been
+                                        * updated in vm_page_insert() but the
+                                        * page was also accounted for in a
+                                        * "compressed purgeable" ledger, so
+                                        * update that now.
+                                        */
+                                       if ((object->purgable !=
+                                            VM_PURGABLE_DENY) &&
+                                           (object->vo_purgeable_owner !=
+                                            NULL)) {
+                                               /*
+                                                * One less compressed
+                                                * purgeable page.
+                                                */
+                                               vm_purgeable_compressed_update(
+                                                       object,
+                                                       -1);
+                                       }
+
+                                       break;
+                               case KERN_MEMORY_FAILURE:
+                                       m->unusual = TRUE;
+                                       m->error = TRUE;
+                                       m->absent = FALSE;
+                                       break;
+                               case KERN_MEMORY_ERROR:
+                                       assert(m->absent);
+                                       break;
+                               default:
+                                       panic("vm_fault_page(): unexpected "
+                                             "error %d from "
+                                             "vm_compressor_pager_get()\n",
+                                             rc);
+                               }
+                               PAGE_WAKEUP_DONE(m);
+
+                               rc = KERN_SUCCESS;
+                               goto data_requested;
+                       }
+                       my_fault_type = DBG_PAGEIN_FAULT;
+
+                       if (m != VM_PAGE_NULL) {
+                               VM_PAGE_FREE(m);
+                               m = VM_PAGE_NULL;
+                       }
+
+#if TRACEFAULTPAGE
+                       dbgTrace(0xBEEF0012, (unsigned int) object, (unsigned int) 0);  /* (TEST/DEBUG) */
+#endif
+
+                       /*
+                        * It's possible someone called vm_object_destroy while we weren't
+                        * holding the object lock.  If that has happened, then bail out
+                        * here.
+                        */
+
+                       pager = object->pager;
+
+                       if (pager == MEMORY_OBJECT_NULL) {
+                               vm_fault_cleanup(object, first_m);
+                               thread_interrupt_level(interruptible_state);
+                               return VM_FAULT_MEMORY_ERROR;
+                       }
+
+                       /*
+                        * We have an absent page in place for the faulting offset,
+                        * so we can release the object lock.
+                        */
+
+                       if (object->object_slid == TRUE) {
+                               set_thread_rwlock_boost();
+                       }
+
+                       vm_object_unlock(object);
+
+                       /*
+                        * If this object uses a copy_call strategy,
+                        * and we are interested in a copy of this object
+                        * (having gotten here only by following a
+                        * shadow chain), then tell the memory manager
+                        * via a flag added to the desired_access
+                        * parameter, so that it can detect a race
+                        * between our walking down the shadow chain
+                        * and its pushing pages up into a copy of
+                        * the object that it manages.
+                        */
+                       if (object->copy_strategy == MEMORY_OBJECT_COPY_CALL && object != first_object)
+                               wants_copy_flag = VM_PROT_WANTS_COPY;
+                       else
                                wants_copy_flag = VM_PROT_NONE;
 
                        XPR(XPR_VM_FAULT,
                            "vm_f_page: data_req obj 0x%X, offset 0x%X, page 0x%X, acc %d\n",
-                               (integer_t)object, offset, (integer_t)m,
+                               object, offset, m,
                                access_required | wants_copy_flag, 0);
 
+                       if (object->copy == first_object) {
+                               /*
+                                * if we issue the memory_object_data_request in
+                                * this state, we are subject to a deadlock with
+                                * the underlying filesystem if it is trying to
+                                * shrink the file resulting in a push of pages
+                                * into the copy object...  that push will stall
+                                * on the placeholder page, and if the pushing thread
+                                * is holding a lock that is required on the pagein
+                                * path (such as a truncate lock), we'll deadlock...
+                                * to avoid this potential deadlock, we throw away
+                                * our placeholder page before calling memory_object_data_request
+                                * and force this thread to retry the vm_fault_page after
+                                * we have issued the I/O.  the second time through this path
+                                * we will find the page already in the cache (presumably still
+                                * busy waiting for the I/O to complete) and then complete
+                                * the fault w/o having to go through memory_object_data_request again
+                                */
+                               assert(first_m != VM_PAGE_NULL);
+                               assert(VM_PAGE_OBJECT(first_m) == first_object);
+
+                               vm_object_lock(first_object);
+                               VM_PAGE_FREE(first_m);
+                               vm_object_paging_end(first_object);
+                               vm_object_unlock(first_object);
+
+                               first_m = VM_PAGE_NULL;
+                               force_fault_retry = TRUE;
+
+                               vm_fault_page_forced_retry++;
+                       }
+
+                       if (data_already_requested == TRUE) {
+                               orig_behavior = fault_info->behavior;
+                               orig_cluster_size = fault_info->cluster_size;
+
+                               fault_info->behavior = VM_BEHAVIOR_RANDOM;
+                               fault_info->cluster_size = PAGE_SIZE;
+                       }
                        /*
                         * Call the memory manager to retrieve the data.
                         */
@@ -1301,11 +1774,23 @@ vm_fault_page(
                                access_required | wants_copy_flag,
                                (memory_object_fault_info_t)fault_info);
 
+                       if (data_already_requested == TRUE) {
+                               fault_info->behavior = orig_behavior;
+                               fault_info->cluster_size = orig_cluster_size;
+                       } else
+                               data_already_requested = TRUE;
+
+                       DTRACE_VM2(maj_fault, int, 1, (uint64_t *), NULL);
 #if TRACEFAULTPAGE
                        dbgTrace(0xBEEF0013, (unsigned int) object, (unsigned int) rc); /* (TEST/DEBUG) */
 #endif
                        vm_object_lock(object);
 
+                       if (object->object_slid == TRUE) {
+                               clear_thread_rwlock_boost();
+                       }
+
+               data_requested:
                        if (rc != KERN_SUCCESS) {
 
                                vm_fault_cleanup(object, first_m);
@@ -1314,18 +1799,34 @@ vm_fault_page(
                                return ((rc == MACH_SEND_INTERRUPTED) ?
                                        VM_FAULT_INTERRUPTED :
                                        VM_FAULT_MEMORY_ERROR);
+                       } else {
+                               clock_sec_t     tv_sec;
+                               clock_usec_t    tv_usec;
+
+                               if (my_fault_type == DBG_PAGEIN_FAULT) {
+                                       clock_get_system_microtime(&tv_sec, &tv_usec);
+                                       current_thread()->t_page_creation_time = tv_sec;
+                                       current_thread()->t_page_creation_count = 0;
+                               }
                        }
-                       if ((interruptible != THREAD_UNINT) && (current_thread()->sched_mode & TH_MODE_ABORT)) {
+                       if ((interruptible != THREAD_UNINT) && (current_thread()->sched_flags & TH_SFLAG_ABORT)) {
 
                                vm_fault_cleanup(object, first_m);
                                thread_interrupt_level(interruptible_state);
 
                                return (VM_FAULT_INTERRUPTED);
                        }
+                       if (force_fault_retry == TRUE) {
+
+                               vm_fault_cleanup(object, first_m);
+                               thread_interrupt_level(interruptible_state);
+
+                               return (VM_FAULT_RETRY);
+                       }
                        if (m == VM_PAGE_NULL && object->phys_contiguous) {
                                /*
                                 * No page here means that the object we
-                                * initially looked up was "physically 
+                                * initially looked up was "physically
                                 * contiguous" (i.e. device memory).  However,
                                 * with Virtual VRAM, the object might not
                                 * be backed by that device memory anymore,
@@ -1336,14 +1837,15 @@ vm_fault_page(
                                 * page fault against the object's new backing
                                 * store (different memory object).
                                 */
-                               break;
+                       phys_contig_object:
+                               goto done;
                        }
                        /*
                         * potentially a pagein fault
                         * if we make it through the state checks
                         * above, than we'll count it as such
                         */
-                       my_fault = DBG_PAGEIN_FAULT;
+                       my_fault = my_fault_type;
 
                        /*
                         * Retry with same object/offset, since new data may
@@ -1352,9 +1854,9 @@ vm_fault_page(
                         */
                        continue;
                }
-
+dont_look_for_page:
                /*
-                * We get here if the object has no pager, or an existence map 
+                * We get here if the object has no pager, or an existence map
                 * exists and indicates the page isn't present on the pager
                 * or we're unwiring a page.  If a pager exists, but there
                 * is no existence map, then the m->absent case above handles
@@ -1370,8 +1872,8 @@ vm_fault_page(
 
                XPR(XPR_VM_FAULT,
                    "vm_f_page: no pager obj 0x%X, offset 0x%X, page 0x%X, next_obj 0x%X\n",
-                       (integer_t)object, offset, (integer_t)m,
-                       (integer_t)object->shadow, 0);
+                       object, offset, m,
+                       object->shadow, 0);
 
                next_object = object->shadow;
 
@@ -1391,23 +1893,23 @@ vm_fault_page(
                                vm_object_lock(object);
                        }
                        m = first_m;
-                       assert(m->object == object);
+                       assert(VM_PAGE_OBJECT(m) == object);
                        first_m = VM_PAGE_NULL;
 
                        /*
                         * check for any conditions that prevent
                         * us from creating a new zero-fill page
-                        * vm_fault_check will do all of the 
+                        * vm_fault_check will do all of the
                         * fault cleanup in the case of an error condition
                         * including resetting the thread_interrupt_level
                         */
-                       error = vm_fault_check(object, m, first_m, interruptible_state);
+                       error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE);
 
                        if (error != VM_FAULT_SUCCESS)
                                return (error);
 
                        if (m == VM_PAGE_NULL) {
-                               m = vm_page_grab();
+                               m = vm_page_grab_options(grab_options);
 
                                if (m == VM_PAGE_NULL) {
                                        vm_fault_cleanup(object, VM_PAGE_NULL);
@@ -1417,6 +1919,9 @@ vm_fault_page(
                                }
                                vm_page_insert(m, object, offset);
                        }
+                       if (fault_info->mark_zf_absent && no_zero_fill == TRUE)
+                               m->absent = TRUE;
+
                        my_fault = vm_fault_zero_page(m, no_zero_fill);
 
                        break;
@@ -1429,9 +1934,9 @@ vm_fault_page(
                        if ((object != first_object) || must_be_resident)
                                vm_object_paging_end(object);
 
-                       offset += object->shadow_offset;
-                       fault_info->lo_offset += object->shadow_offset;
-                       fault_info->hi_offset += object->shadow_offset;
+                       offset += object->vo_shadow_offset;
+                       fault_info->lo_offset += object->vo_shadow_offset;
+                       fault_info->hi_offset += object->vo_shadow_offset;
                        access_required = VM_PROT_READ;
 
                        vm_object_lock(next_object);
@@ -1464,27 +1969,16 @@ vm_fault_page(
        dbgTrace(0xBEEF0015, (unsigned int) object, (unsigned int) m);  /* (TEST/DEBUG) */
 #endif
 #if    EXTRA_ASSERTIONS
-       if (m != VM_PAGE_NULL) {
-               assert(m->busy && !m->absent);
-               assert((first_m == VM_PAGE_NULL) ||
-                       (first_m->busy && !first_m->absent &&
-                        !first_m->active && !first_m->inactive));
-       }
+       assert(m->busy && !m->absent);
+       assert((first_m == VM_PAGE_NULL) ||
+              (first_m->busy && !first_m->absent &&
+               !first_m->active && !first_m->inactive && !first_m->secluded));
 #endif /* EXTRA_ASSERTIONS */
 
-       /*
-        * ENCRYPTED SWAP:
-        * If we found a page, we must have decrypted it before we
-        * get here...
-        */
-       if (m != VM_PAGE_NULL) {
-               ASSERT_PAGE_DECRYPTED(m);
-       }
-
        XPR(XPR_VM_FAULT,
            "vm_f_page: FOUND obj 0x%X, off 0x%X, page 0x%X, 1_obj 0x%X, 1_m 0x%X\n",
-               (integer_t)object, offset, (integer_t)m,
-               (integer_t)first_object, (integer_t)first_m);
+               object, offset, m,
+               first_object, first_m);
 
        /*
         * If the page is being written, but isn't
@@ -1492,7 +1986,7 @@ vm_fault_page(
         * we have to copy it into a new page owned
         * by the top-level object.
         */
-       if ((object != first_object) && (m != VM_PAGE_NULL)) {
+       if (object != first_object) {
 
 #if TRACEFAULTPAGE
                dbgTrace(0xBEEF0016, (unsigned int) object, (unsigned int) fault_type); /* (TEST/DEBUG) */
@@ -1544,7 +2038,7 @@ vm_fault_page(
                        /*
                         * Allocate a page for the copy
                         */
-                       copy_m = vm_page_grab();
+                       copy_m = vm_page_grab_options(grab_options);
 
                        if (copy_m == VM_PAGE_NULL) {
                                RELEASE_PAGE(m);
@@ -1556,8 +2050,8 @@ vm_fault_page(
                        }
                        XPR(XPR_VM_FAULT,
                            "vm_f_page: page_copy obj 0x%X, offset 0x%X, m 0x%X, copy_m 0x%X\n",
-                               (integer_t)object, offset,
-                               (integer_t)m, (integer_t)copy_m, 0);
+                               object, offset,
+                               m, copy_m, 0);
 
                        vm_page_copy(m, copy_m);
 
@@ -1574,14 +2068,29 @@ vm_fault_page(
                         * avoid the pmap_disconnect() call.
                         */
                        if (m->pmapped)
-                               pmap_disconnect(m->phys_page);
+                               pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
 
+                       if (m->clustered) {
+                               VM_PAGE_COUNT_AS_PAGEIN(m);
+                               VM_PAGE_CONSUME_CLUSTERED(m);
+                       }
                        assert(!m->cleaning);
 
                        /*
                         * We no longer need the old page or object.
                         */
-                       PAGE_WAKEUP_DONE(m);
+                       RELEASE_PAGE(m);
+
+                       /*
+                        * This check helps with marking the object as having a sequential pattern
+                        * Normally we'll miss doing this below because this fault is about COW to
+                        * the first_object i.e. bring page in from disk, push to object above but
+                        * don't update the file object's sequential pattern.
+                        */
+                       if (object->internal == FALSE) {
+                               vm_fault_is_sequential(object, offset, fault_info->behavior);
+                       }
+
                        vm_object_paging_end(object);
                        vm_object_unlock(object);
 
@@ -1600,14 +2109,14 @@ vm_fault_page(
                         */
                        VM_PAGE_FREE(first_m);
                        first_m = VM_PAGE_NULL;
-                       
+
                        /*
                         * and replace it with the
                         * page we just copied into
                         */
                        assert(copy_m->busy);
                        vm_page_insert(copy_m, object, offset);
-                       copy_m->dirty = TRUE;
+                       SET_PAGE_DIRTY(copy_m, TRUE);
 
                        m = copy_m;
                        /*
@@ -1615,8 +2124,8 @@ vm_fault_page(
                         * way, let's try to collapse the top object.
                         * But we have to play ugly games with
                         * paging_in_progress to do that...
-                        */     
-                       vm_object_paging_end(object); 
+                        */
+                       vm_object_paging_end(object);
                        vm_object_collapse(object, offset, TRUE);
                        vm_object_paging_begin(object);
 
@@ -1632,7 +2141,7 @@ vm_fault_page(
         */
        try_failed_count = 0;
 
-       while ((copy_object = first_object->copy) != VM_OBJECT_NULL && (m != VM_PAGE_NULL)) {
+       while ((copy_object = first_object->copy) != VM_OBJECT_NULL) {
                vm_object_offset_t      copy_offset;
                vm_page_t               copy_m;
 
@@ -1680,9 +2189,9 @@ vm_fault_page(
                /*
                 * Does the page exist in the copy?
                 */
-               copy_offset = first_offset - copy_object->shadow_offset;
+               copy_offset = first_offset - copy_object->vo_shadow_offset;
 
-               if (copy_object->size <= copy_offset)
+               if (copy_object->vo_size <= copy_offset)
                        /*
                         * Copy object doesn't cover this page -- do nothing.
                         */
@@ -1713,12 +2222,7 @@ vm_fault_page(
                                copy_object->ref_count--;
                                assert(copy_object->ref_count > 0);
                                copy_m = vm_page_lookup(copy_object, copy_offset);
-                               /*
-                                * ENCRYPTED SWAP:
-                                * it's OK if the "copy_m" page is encrypted,
-                                * because we're not moving it nor handling its
-                                * contents.
-                                */
+
                                if (copy_m != VM_PAGE_NULL && copy_m->busy) {
                                        PAGE_ASSERT_WAIT(copy_m, interruptible);
 
@@ -1793,7 +2297,7 @@ vm_fault_page(
                         * Must copy page into copy-object.
                         */
                        vm_page_copy(m, copy_m);
-                       
+
                        /*
                         * If the old page was in use by any users
                         * of the copy-object, it must be removed
@@ -1801,35 +2305,38 @@ vm_fault_page(
                         * pmaps use it.)
                         */
                        if (m->pmapped)
-                               pmap_disconnect(m->phys_page);
+                               pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
 
+                       if (m->clustered) {
+                               VM_PAGE_COUNT_AS_PAGEIN(m);
+                               VM_PAGE_CONSUME_CLUSTERED(m);
+                       }
                        /*
                         * If there's a pager, then immediately
                         * page out this page, using the "initialize"
                         * option.  Else, we use the copy.
                         */
-                       if ((!copy_object->pager_created)
-#if MACH_PAGEMAP
-                           || vm_external_state_get(copy_object->existence_map, copy_offset) == VM_EXTERNAL_STATE_ABSENT
-#endif
-                           ) {
+                       if ((!copy_object->pager_ready)
+                           || VM_COMPRESSOR_PAGER_STATE_GET(copy_object, copy_offset) == VM_EXTERNAL_STATE_ABSENT
+                          ) {
 
                                vm_page_lockspin_queues();
                                assert(!m->cleaning);
                                vm_page_activate(copy_m);
                                vm_page_unlock_queues();
 
-                               copy_m->dirty = TRUE;
+                               SET_PAGE_DIRTY(copy_m, TRUE);
                                PAGE_WAKEUP_DONE(copy_m);
-                       } 
-                       else {
+
+                       } else {
+
                                assert(copy_m->busy == TRUE);
                                assert(!m->cleaning);
 
                                /*
                                 * dirty is protected by the object lock
                                 */
-                               copy_m->dirty = TRUE;
+                               SET_PAGE_DIRTY(copy_m, TRUE);
 
                                /*
                                 * The page is already ready for pageout:
@@ -1867,6 +2374,7 @@ vm_fault_page(
                                 */
                                vm_object_lock(object);
                        }
+
                        /*
                         * Because we're pushing a page upward
                         * in the object tree, we must restart
@@ -1892,31 +2400,33 @@ vm_fault_page(
                copy_object->ref_count--;
                assert(copy_object->ref_count > 0);
 
-               VM_OBJ_RES_DECR(copy_object);   
+               VM_OBJ_RES_DECR(copy_object);
                vm_object_unlock(copy_object);
 
                break;
        }
+
+done:
        *result_page = m;
        *top_page = first_m;
 
        XPR(XPR_VM_FAULT,
                "vm_f_page: DONE obj 0x%X, offset 0x%X, m 0x%X, first_m 0x%X\n",
-               (integer_t)object, offset, (integer_t)m, (integer_t)first_m, 0);
+               object, offset, m, first_m, 0);
 
        if (m != VM_PAGE_NULL) {
+               assert(VM_PAGE_OBJECT(m) == object);
+
+               retval = VM_FAULT_SUCCESS;
+
                if (my_fault == DBG_PAGEIN_FAULT) {
 
-                       VM_STAT_INCR(pageins);
-                       DTRACE_VM2(pgin, int, 1, (uint64_t *), NULL);
-                       DTRACE_VM2(maj_fault, int, 1, (uint64_t *), NULL);
-                       current_task()->pageins++;
+                       VM_PAGE_COUNT_AS_PAGEIN(m);
 
-                       if (m->object->internal) {
-                               DTRACE_VM2(anonpgin, int, 1, (uint64_t *), NULL);
-                       } else {
-                               DTRACE_VM2(fspgin, int, 1, (uint64_t *), NULL);
-                       }
+                       if (object->internal)
+                               my_fault = DBG_PAGEIND_FAULT;
+                       else
+                               my_fault = DBG_PAGEINV_FAULT;
 
                        /*
                         * evaluate access pattern and update state
@@ -1926,18 +2436,24 @@ vm_fault_page(
                        vm_fault_is_sequential(object, offset, fault_info->behavior);
 
                        vm_fault_deactivate_behind(object, offset, fault_info->behavior);
+               } else if (my_fault == DBG_COMPRESSOR_FAULT || my_fault == DBG_COMPRESSOR_SWAPIN_FAULT) {
+
+                       VM_STAT_INCR(decompressions);
                }
                if (type_of_fault)
                        *type_of_fault = my_fault;
-       } else
-               vm_object_unlock(object);
+       } else {
+               retval = VM_FAULT_SUCCESS_NO_VM_PAGE;
+               assert(first_m == VM_PAGE_NULL);
+               assert(object == first_object);
+       }
 
        thread_interrupt_level(interruptible_state);
 
 #if TRACEFAULTPAGE
        dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0);       /* (TEST/DEBUG) */
 #endif
-       return (VM_FAULT_SUCCESS);
+       return retval;
 
 backoff:
        thread_interrupt_level(interruptible_state);
@@ -1951,6 +2467,21 @@ backoff:
 
 
 
+/*
+ * CODE SIGNING:
+ * When soft faulting a page, we have to validate the page if:
+ * 1. the page is being mapped in user space
+ * 2. the page hasn't already been found to be "tainted"
+ * 3. the page belongs to a code-signed object
+ * 4. the page has not been validated yet or has been mapped for write.
+ */
+#define VM_FAULT_NEED_CS_VALIDATION(pmap, page, page_obj)              \
+       ((pmap) != kernel_pmap /*1*/ &&                                 \
+        !(page)->cs_tainted /*2*/ &&                                   \
+        (page_obj)->code_signed /*3*/ &&                                       \
+        (!(page)->cs_validated || (page)->wpmapped /*4*/))
+
+
 /*
  * page queue lock must NOT be held
  * m->object must be locked
@@ -1960,6 +2491,9 @@ backoff:
  * careful not to modify the VM object in any way that is not
  * legal under a shared lock...
  */
+extern int panic_on_cs_killed;
+extern int proc_selfpid(void);
+extern char *proc_name_address(void *p);
 unsigned long cs_enter_tainted_rejected = 0;
 unsigned long cs_enter_tainted_accepted = 0;
 kern_return_t
@@ -1967,79 +2501,87 @@ vm_fault_enter(vm_page_t m,
               pmap_t pmap,
               vm_map_offset_t vaddr,
               vm_prot_t prot,
+              vm_prot_t caller_prot,
               boolean_t wired,
               boolean_t change_wiring,
+              vm_tag_t  wire_tag,
               boolean_t no_cache,
+              boolean_t cs_bypass,
+              __unused int      user_tag,
+              int       pmap_options,
+              boolean_t *need_retry,
               int *type_of_fault)
 {
-       unsigned int    cache_attr;
-       kern_return_t   kr;
+       kern_return_t   kr, pe_result;
        boolean_t       previously_pmapped = m->pmapped;
+       boolean_t       must_disconnect = 0;
+       boolean_t       map_is_switched, map_is_switch_protected;
+       int             cs_enforcement_enabled;
+       vm_prot_t       fault_type;
+       vm_object_t     object;
+
+       fault_type = change_wiring ? VM_PROT_NONE : caller_prot;
+       object = VM_PAGE_OBJECT(m);
+
+       vm_object_lock_assert_held(object);
 
-       vm_object_lock_assert_held(m->object);
-#if DEBUG
-       mutex_assert(&vm_page_queue_lock, MA_NOTOWNED);
-#endif /* DEBUG */
+#if KASAN
+       if (pmap == kernel_pmap) {
+               kasan_notify_address(vaddr, PAGE_SIZE);
+       }
+#endif
 
-       if (m->phys_page == vm_page_guard_addr) {
+       LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
+
+       if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
                assert(m->fictitious);
                return KERN_SUCCESS;
        }
 
-        cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
+       if (*type_of_fault == DBG_ZERO_FILL_FAULT) {
+
+               vm_object_lock_assert_exclusive(object);
 
-       if (m->object->code_signed && !m->cs_validated &&
-           pmap != kernel_pmap) {
+       } else if ((fault_type & VM_PROT_WRITE) == 0 && !m->wpmapped) {
                /*
-                * CODE SIGNING:
-                * This page comes from a VM object backed by a
-                * signed memory object and it hasn't been validated yet.
-                * We're about to enter it into a process address space,
-                * so we need to validate its signature now.
+                * This is not a "write" fault, so we
+                * might not have taken the object lock
+                * exclusively and we might not be able
+                * to update the "wpmapped" bit in
+                * vm_fault_enter().
+                * Let's just grant read access to
+                * the page for now and we'll
+                * soft-fault again if we need write
+                * access later...
                 */
-               vm_object_lock_assert_exclusive(m->object);
 
-               /* VM map still locked, so 1 ref will remain on VM object */
-
-               vm_page_validate_cs(m);
+               /* This had better not be a JIT page. */
+               if (!pmap_has_prot_policy(prot)) {
+                       prot &= ~VM_PROT_WRITE;
+               } else {
+                       assert(cs_bypass);
+               }
        }
-
        if (m->pmapped == FALSE) {
-               /*
-                * This is the first time this page is being
-                * mapped in an address space (pmapped == FALSE).
-                *
-                * Part of that page may still be in the data cache
-                * and not flushed to memory.  In case we end up
-                * accessing that page via the instruction cache,
-                * we need to ensure that the 2 caches are in sync.
-                */
-               pmap_sync_page_data_phys(m->phys_page);
 
-               if ((*type_of_fault == DBG_CACHE_HIT_FAULT) && m->clustered) {
-                       /*
-                        * found it in the cache, but this
-                        * is the first fault-in of the page (m->pmapped == FALSE)
-                        * so it must have come in as part of
-                        * a cluster... account 1 pagein against it
-                        */
-                       VM_STAT_INCR(pageins);
-                       DTRACE_VM2(pgin, int, 1, (uint64_t *), NULL);
+               if (m->clustered) {
+                       if (*type_of_fault == DBG_CACHE_HIT_FAULT) {
+                               /*
+                                * found it in the cache, but this
+                                * is the first fault-in of the page (m->pmapped == FALSE)
+                                * so it must have come in as part of
+                                * a cluster... account 1 pagein against it
+                                */
+                               if (object->internal)
+                                       *type_of_fault = DBG_PAGEIND_FAULT;
+                               else
+                                       *type_of_fault = DBG_PAGEINV_FAULT;
 
-                       if (m->object->internal) {
-                               DTRACE_VM2(anonpgin, int, 1, (uint64_t *), NULL);
-                       } else {
-                               DTRACE_VM2(fspgin, int, 1, (uint64_t *), NULL);
+                               VM_PAGE_COUNT_AS_PAGEIN(m);
                        }
-
-                       current_task()->pageins++;
-
-                       *type_of_fault = DBG_PAGEIN_FAULT;
+                       VM_PAGE_CONSUME_CLUSTERED(m);
                }
-               VM_PAGE_CONSUME_CLUSTERED(m);
-
-       } else if (cache_attr != VM_WIMG_DEFAULT)
-               pmap_sync_page_attributes_phys(m->phys_page);
+       }
 
        if (*type_of_fault != DBG_COW_FAULT) {
                DTRACE_VM2(as_fault, int, 1, (uint64_t *), NULL);
@@ -2049,107 +2591,780 @@ vm_fault_enter(vm_page_t m,
                }
        }
 
-       if (m->cs_tainted) {
+       /* Validate code signature if necessary. */
+       if (VM_FAULT_NEED_CS_VALIDATION(pmap, m, object)) {
+               vm_object_lock_assert_exclusive(object);
+
+               if (m->cs_validated) {
+                       vm_cs_revalidates++;
+               }
+
+               /* VM map is locked, so 1 ref will remain on VM object -
+                * so no harm if vm_page_validate_cs drops the object lock */
+               vm_page_validate_cs(m);
+       }
+
+#define page_immutable(m,prot) ((m)->cs_validated /*&& ((prot) & VM_PROT_EXECUTE)*/)
+#define page_nx(m) ((m)->cs_nx)
+
+       map_is_switched = ((pmap != vm_map_pmap(current_task()->map)) &&
+                          (pmap == vm_map_pmap(current_thread()->map)));
+       map_is_switch_protected = current_thread()->map->switch_protect;
+
+       /* If the map is switched, and is switch-protected, we must protect
+        * some pages from being write-faulted: immutable pages because by
+        * definition they may not be written, and executable pages because that
+        * would provide a way to inject unsigned code.
+        * If the page is immutable, we can simply return. However, we can't
+        * immediately determine whether a page is executable anywhere. But,
+        * we can disconnect it everywhere and remove the executable protection
+        * from the current map. We do that below right before we do the
+        * PMAP_ENTER.
+        */
+       cs_enforcement_enabled = cs_enforcement(NULL);
+
+       if(cs_enforcement_enabled && map_is_switched &&
+          map_is_switch_protected && page_immutable(m, prot) &&
+          (prot & VM_PROT_WRITE))
+       {
+               return KERN_CODESIGN_ERROR;
+       }
+
+       if (cs_enforcement_enabled && page_nx(m) && (prot & VM_PROT_EXECUTE)) {
+               if (cs_debug)
+                       printf("page marked to be NX, not letting it be mapped EXEC\n");
+               return KERN_CODESIGN_ERROR;
+       }
+
+       if (cs_enforcement_enabled &&
+           !m->cs_validated &&
+           (prot & VM_PROT_EXECUTE) &&
+           !(caller_prot & VM_PROT_EXECUTE)) {
                /*
-                * CODE SIGNING:
-                * This page has been tainted and can not be trusted.
-                * Let's notify the current process and let it take any
-                * necessary precautions before we enter the tainted page
-                * into its address space.
+                * FOURK PAGER:
+                * This page has not been validated and will not be
+                * allowed to be mapped for "execute".
+                * But the caller did not request "execute" access for this
+                * fault, so we should not raise a code-signing violation
+                * (and possibly kill the process) below.
+                * Instead, let's just remove the "execute" access request.
+                *
+                * This can happen on devices with a 4K page size if a 16K
+                * page contains a mix of signed&executable and
+                * unsigned&non-executable 4K pages, making the whole 16K
+                * mapping "executable".
                 */
-               if (cs_invalid_page()) {
-                       /* reject the tainted page: abort the page fault */
-                       kr = KERN_MEMORY_ERROR;
+               if (!pmap_has_prot_policy(prot)) {
+                       prot &= ~VM_PROT_EXECUTE;
+               } else {
+                       assert(cs_bypass);
+               }
+       }
+
+       /* A page could be tainted, or pose a risk of being tainted later.
+        * Check whether the receiving process wants it, and make it feel
+        * the consequences (that hapens in cs_invalid_page()).
+        * For CS Enforcement, two other conditions will
+        * cause that page to be tainted as well:
+        * - pmapping an unsigned page executable - this means unsigned code;
+        * - writeable mapping of a validated page - the content of that page
+        *   can be changed without the kernel noticing, therefore unsigned
+        *   code can be created
+        */
+       if (!cs_bypass &&
+           (m->cs_tainted ||
+            (cs_enforcement_enabled &&
+             (/* The page is unsigned and wants to be executable */
+              (!m->cs_validated && (prot & VM_PROT_EXECUTE))  ||
+              /* The page should be immutable, but is in danger of being modified
+               * This is the case where we want policy from the code directory -
+               * is the page immutable or not? For now we have to assume that
+               * code pages will be immutable, data pages not.
+               * We'll assume a page is a code page if it has a code directory
+               * and we fault for execution.
+               * That is good enough since if we faulted the code page for
+               * writing in another map before, it is wpmapped; if we fault
+               * it for writing in this map later it will also be faulted for executing
+               * at the same time; and if we fault for writing in another map
+               * later, we will disconnect it from this pmap so we'll notice
+               * the change.
+               */
+             (page_immutable(m, prot) && ((prot & VM_PROT_WRITE) || m->wpmapped))
+             ))
+                   ))
+       {
+               /* We will have a tainted page. Have to handle the special case
+                * of a switched map now. If the map is not switched, standard
+                * procedure applies - call cs_invalid_page().
+                * If the map is switched, the real owner is invalid already.
+                * There is no point in invalidating the switching process since
+                * it will not be executing from the map. So we don't call
+                * cs_invalid_page() in that case. */
+               boolean_t reject_page, cs_killed;
+               if(map_is_switched) {
+                       assert(pmap==vm_map_pmap(current_thread()->map));
+                       assert(!(prot & VM_PROT_WRITE) || (map_is_switch_protected == FALSE));
+                       reject_page = FALSE;
+               } else {
+                       if (cs_debug > 5)
+                               printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s slid: %s prot: 0x%x\n",
+                                      object->code_signed ? "yes" : "no",
+                                      m->cs_validated ? "yes" : "no",
+                                      m->cs_tainted ? "yes" : "no",
+                                      m->wpmapped ? "yes" : "no",
+                                      m->slid ? "yes" : "no",
+                                      (int)prot);
+                       reject_page = cs_invalid_page((addr64_t) vaddr, &cs_killed);
+               }
+
+               if (reject_page) {
+                       /* reject the invalid page: abort the page fault */
+                       int                     pid;
+                       const char              *procname;
+                       task_t                  task;
+                       vm_object_t             file_object, shadow;
+                       vm_object_offset_t      file_offset;
+                       char                    *pathname, *filename;
+                       vm_size_t               pathname_len, filename_len;
+                       boolean_t               truncated_path;
+#define __PATH_MAX 1024
+                       struct timespec         mtime, cs_mtime;
+                       int                     shadow_depth;
+                       os_reason_t             codesigning_exit_reason = OS_REASON_NULL;
+
+                       kr = KERN_CODESIGN_ERROR;
                        cs_enter_tainted_rejected++;
+
+                       /* get process name and pid */
+                       procname = "?";
+                       task = current_task();
+                       pid = proc_selfpid();
+                       if (task->bsd_info != NULL)
+                               procname = proc_name_address(task->bsd_info);
+
+                       /* get file's VM object */
+                       file_object = object;
+                       file_offset = m->offset;
+                       for (shadow = file_object->shadow,
+                                    shadow_depth = 0;
+                            shadow != VM_OBJECT_NULL;
+                            shadow = file_object->shadow,
+                               shadow_depth++) {
+                               vm_object_lock_shared(shadow);
+                               if (file_object != object) {
+                                       vm_object_unlock(file_object);
+                               }
+                               file_offset += file_object->vo_shadow_offset;
+                               file_object = shadow;
+                       }
+
+                       mtime.tv_sec = 0;
+                       mtime.tv_nsec = 0;
+                       cs_mtime.tv_sec = 0;
+                       cs_mtime.tv_nsec = 0;
+
+                       /* get file's pathname and/or filename */
+                       pathname = NULL;
+                       filename = NULL;
+                       pathname_len = 0;
+                       filename_len = 0;
+                       truncated_path = FALSE;
+                       /* no pager -> no file -> no pathname, use "<nil>" in that case */
+                       if (file_object->pager != NULL) {
+                               pathname = (char *)kalloc(__PATH_MAX * 2);
+                               if (pathname) {
+                                       pathname[0] = '\0';
+                                       pathname_len = __PATH_MAX;
+                                       filename = pathname + pathname_len;
+                                       filename_len = __PATH_MAX;
+                               }
+                               vnode_pager_get_object_name(file_object->pager,
+                                                           pathname,
+                                                           pathname_len,
+                                                           filename,
+                                                           filename_len,
+                                                           &truncated_path);
+                               if (pathname) {
+                                       /* safety first... */
+                                       pathname[__PATH_MAX-1] = '\0';
+                                       filename[__PATH_MAX-1] = '\0';
+                               }
+                               vnode_pager_get_object_mtime(file_object->pager,
+                                                            &mtime,
+                                                            &cs_mtime);
+                       }
+                       printf("CODE SIGNING: process %d[%s]: "
+                              "rejecting invalid page at address 0x%llx "
+                              "from offset 0x%llx in file \"%s%s%s\" "
+                              "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
+                              "(signed:%d validated:%d tainted:%d nx:%d "
+                              "wpmapped:%d slid:%d dirty:%d depth:%d)\n",
+                              pid, procname, (addr64_t) vaddr,
+                              file_offset,
+                              (pathname ? pathname : "<nil>"),
+                              (truncated_path ? "/.../" : ""),
+                              (truncated_path ? filename : ""),
+                              cs_mtime.tv_sec, cs_mtime.tv_nsec,
+                              ((cs_mtime.tv_sec == mtime.tv_sec &&
+                                cs_mtime.tv_nsec == mtime.tv_nsec)
+                               ? "=="
+                               : "!="),
+                              mtime.tv_sec, mtime.tv_nsec,
+                              object->code_signed,
+                              m->cs_validated,
+                              m->cs_tainted,
+                              m->cs_nx,
+                              m->wpmapped,
+                              m->slid,
+                              m->dirty,
+                              shadow_depth);
+
+                       /*
+                        * We currently only generate an exit reason if cs_invalid_page directly killed a process. If cs_invalid_page
+                        * did not kill the process (more the case on desktop), vm_fault_enter will not satisfy the fault and whether the
+                        * process dies is dependent on whether there is a signal handler registered for SIGSEGV and how that handler
+                        * will deal with the segmentation fault.
+                        */
+                       if (cs_killed) {
+                               KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
+                                                               pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE, 0, 0);
+
+                               codesigning_exit_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE);
+                               if (codesigning_exit_reason == NULL) {
+                                       printf("vm_fault_enter: failed to allocate codesigning exit reason\n");
+                               } else {
+                                       mach_vm_address_t data_addr = 0;
+                                       struct codesigning_exit_reason_info *ceri = NULL;
+                                       uint32_t reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(1, sizeof(*ceri));
+
+                                       if (os_reason_alloc_buffer_noblock(codesigning_exit_reason, reason_buffer_size_estimate)) {
+                                               printf("vm_fault_enter: failed to allocate buffer for codesigning exit reason\n");
+                                       } else {
+                                               if (KERN_SUCCESS == kcdata_get_memory_addr(&codesigning_exit_reason->osr_kcd_descriptor,
+                                                               EXIT_REASON_CODESIGNING_INFO, sizeof(*ceri), &data_addr)) {
+                                                       ceri = (struct codesigning_exit_reason_info *)data_addr;
+                                                       static_assert(__PATH_MAX == sizeof(ceri->ceri_pathname));
+
+                                                       ceri->ceri_virt_addr = vaddr;
+                                                       ceri->ceri_file_offset = file_offset;
+                                                       if (pathname)
+                                                               strncpy((char *)&ceri->ceri_pathname, pathname, sizeof(ceri->ceri_pathname));
+                                                       else
+                                                               ceri->ceri_pathname[0] = '\0';
+                                                       if (filename)
+                                                               strncpy((char *)&ceri->ceri_filename, filename, sizeof(ceri->ceri_filename));
+                                                       else
+                                                               ceri->ceri_filename[0] = '\0';
+                                                       ceri->ceri_path_truncated = (truncated_path);
+                                                       ceri->ceri_codesig_modtime_secs = cs_mtime.tv_sec;
+                                                       ceri->ceri_codesig_modtime_nsecs = cs_mtime.tv_nsec;
+                                                       ceri->ceri_page_modtime_secs = mtime.tv_sec;
+                                                       ceri->ceri_page_modtime_nsecs = mtime.tv_nsec;
+                                                       ceri->ceri_object_codesigned = (object->code_signed);
+                                                       ceri->ceri_page_codesig_validated = (m->cs_validated);
+                                                       ceri->ceri_page_codesig_tainted = (m->cs_tainted);
+                                                       ceri->ceri_page_codesig_nx = (m->cs_nx);
+                                                       ceri->ceri_page_wpmapped = (m->wpmapped);
+                                                       ceri->ceri_page_slid = (m->slid);
+                                                       ceri->ceri_page_dirty = (m->dirty);
+                                                       ceri->ceri_page_shadow_depth = shadow_depth;
+                                               } else {
+#if DEBUG || DEVELOPMENT
+                                                       panic("vm_fault_enter: failed to allocate kcdata for codesigning exit reason");
+#else
+                                                       printf("vm_fault_enter: failed to allocate kcdata for codesigning exit reason\n");
+#endif /* DEBUG || DEVELOPMENT */
+                                                       /* Free the buffer */
+                                                       os_reason_alloc_buffer_noblock(codesigning_exit_reason, 0);
+                                               }
+                                       }
+                               }
+
+                               set_thread_exit_reason(current_thread(), codesigning_exit_reason, FALSE);
+                       }
+                       if (panic_on_cs_killed &&
+                           object->object_slid) {
+                               panic("CODE SIGNING: process %d[%s]: "
+                                     "rejecting invalid page at address 0x%llx "
+                                     "from offset 0x%llx in file \"%s%s%s\" "
+                                     "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
+                                     "(signed:%d validated:%d tainted:%d nx:%d"
+                                     "wpmapped:%d slid:%d dirty:%d depth:%d)\n",
+                                     pid, procname, (addr64_t) vaddr,
+                                     file_offset,
+                                     (pathname ? pathname : "<nil>"),
+                                     (truncated_path ? "/.../" : ""),
+                                     (truncated_path ? filename : ""),
+                                     cs_mtime.tv_sec, cs_mtime.tv_nsec,
+                                     ((cs_mtime.tv_sec == mtime.tv_sec &&
+                                       cs_mtime.tv_nsec == mtime.tv_nsec)
+                                      ? "=="
+                                      : "!="),
+                                     mtime.tv_sec, mtime.tv_nsec,
+                                     object->code_signed,
+                                     m->cs_validated,
+                                     m->cs_tainted,
+                                     m->cs_nx,
+                                     m->wpmapped,
+                                     m->slid,
+                                     m->dirty,
+                                     shadow_depth);
+                       }
+
+                       if (file_object != object) {
+                               vm_object_unlock(file_object);
+                       }
+                       if (pathname_len != 0) {
+                               kfree(pathname, __PATH_MAX * 2);
+                               pathname = NULL;
+                               filename = NULL;
+                       }
                } else {
-                       /* proceed with the tainted page */
+                       /* proceed with the invalid page */
                        kr = KERN_SUCCESS;
+                       if (!m->cs_validated &&
+                           !object->code_signed) {
+                               /*
+                                * This page has not been (fully) validated but
+                                * does not belong to a code-signed object
+                                * so it should not be forcefully considered
+                                * as tainted.
+                                * We're just concerned about it here because
+                                * we've been asked to "execute" it but that
+                                * does not mean that it should cause other
+                                * accesses to fail.
+                                * This happens when a debugger sets a
+                                * breakpoint and we then execute code in
+                                * that page.  Marking the page as "tainted"
+                                * would cause any inspection tool ("leaks",
+                                * "vmmap", "CrashReporter", ...) to get killed
+                                * due to code-signing violation on that page,
+                                * even though they're just reading it and not
+                                * executing from it.
+                                */
+                       } else {
+                               /*
+                                * Page might have been tainted before or not;
+                                * now it definitively is. If the page wasn't
+                                * tainted, we must disconnect it from all
+                                * pmaps later, to force existing mappings
+                                * through that code path for re-consideration
+                                * of the validity of that page.
+                                */
+                               must_disconnect = !m->cs_tainted;
+                               m->cs_tainted = TRUE;
+                       }
                        cs_enter_tainted_accepted++;
                }
-               if (cs_debug || kr != KERN_SUCCESS) {
-                       printf("CODESIGNING: vm_fault_enter(0x%llx): "
-                              "page %p obj %p off 0x%llx *** TAINTED ***\n",
-                              (long long)vaddr, m, m->object, m->offset);
+               if (kr != KERN_SUCCESS) {
+                       if (cs_debug) {
+                               printf("CODESIGNING: vm_fault_enter(0x%llx): "
+                                      "*** INVALID PAGE ***\n",
+                                      (long long)vaddr);
+                       }
+#if !SECURE_KERNEL
+                       if (cs_enforcement_panic) {
+                               panic("CODESIGNING: panicking on invalid page\n");
+                       }
+#endif
                }
+
        } else {
                /* proceed with the valid page */
                kr = KERN_SUCCESS;
        }
 
-       if (kr == KERN_SUCCESS) {
-               /*
-                * NOTE: we may only hold the vm_object lock SHARED
-                * at this point, but the update of pmapped is ok
-                * since this is the ONLY bit updated behind the SHARED
-                * lock... however, we need to figure out how to do an atomic
-                * update on a bit field to make this less fragile... right
-                * now I don'w know how to coerce 'C' to give me the offset info
-                * that's needed for an AtomicCompareAndSwap
-                */
-               m->pmapped = TRUE;
-
-               PMAP_ENTER(pmap, vaddr, m, prot, cache_attr, wired);
-       }
+       boolean_t       page_queues_locked = FALSE;
+#define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED()  \
+MACRO_BEGIN                                    \
+       if (! page_queues_locked) {             \
+               page_queues_locked = TRUE;      \
+               vm_page_lockspin_queues();      \
+       }                                       \
+MACRO_END
+#define __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED()    \
+MACRO_BEGIN                                    \
+       if (page_queues_locked) {               \
+               page_queues_locked = FALSE;     \
+               vm_page_unlock_queues();        \
+       }                                       \
+MACRO_END
 
        /*
         * Hold queues lock to manipulate
         * the page queues.  Change wiring
         * case is obvious.
         */
-       if (change_wiring) {
-               vm_page_lockspin_queues();
+       assert((m->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) || object != compressor_object);
+
+#if CONFIG_BACKGROUND_QUEUE
+       vm_page_update_background_state(m);
+#endif
+       if (m->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
+               /*
+                * Compressor pages are neither wired
+                * nor pageable and should never change.
+                */
+               assert(object == compressor_object);
+       } else if (change_wiring) {
+               __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
 
                if (wired) {
                        if (kr == KERN_SUCCESS) {
-                               vm_page_wire(m);
+                               vm_page_wire(m, wire_tag, TRUE);
                        }
                } else {
-                       vm_page_unwire(m);
+                       vm_page_unwire(m, TRUE);
                }
-               vm_page_unlock_queues();
+               /* we keep the page queues lock, if we need it later */
 
        } else {
+               if (object->internal == TRUE) {
+                       /*
+                        * don't allow anonymous pages on
+                        * the speculative queues
+                        */
+                       no_cache = FALSE;
+               }
                if (kr != KERN_SUCCESS) {
-                       vm_page_lock_queues();
+                       __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
                        vm_page_deactivate(m);
-                       vm_page_unlock_queues();
-               } else {
-                       if (((!m->active && !m->inactive) || no_cache) && !m->wire_count && !m->throttled) {
-                               vm_page_lockspin_queues();
+                       /* we keep the page queues lock, if we need it later */
+               } else if (((m->vm_page_q_state == VM_PAGE_NOT_ON_Q) ||
+                           (m->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q) ||
+                           (m->vm_page_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
+                           ((m->vm_page_q_state != VM_PAGE_ON_THROTTLED_Q) && no_cache)) &&
+                          !VM_PAGE_WIRED(m)) {
+
+                       if (vm_page_local_q &&
+                           (*type_of_fault == DBG_COW_FAULT ||
+                            *type_of_fault == DBG_ZERO_FILL_FAULT) ) {
+                               struct vpl      *lq;
+                               uint32_t        lid;
+
+                               assert(m->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+
+                               __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
+                               vm_object_lock_assert_exclusive(object);
+
                                /*
-                                * test again now that we hold the page queue lock
+                                * we got a local queue to stuff this
+                                * new page on...
+                                * its safe to manipulate local and
+                                * local_id at this point since we're
+                                * behind an exclusive object lock and
+                                * the page is not on any global queue.
+                                *
+                                * we'll use the current cpu number to
+                                * select the queue note that we don't
+                                * need to disable preemption... we're
+                                * going to be behind the local queue's
+                                * lock to do the real work
                                 */
-                               if (((!m->active && !m->inactive) || no_cache) && !m->wire_count) {
+                               lid = cpu_number();
 
-                                       /*
-                                        * If this is a no_cache mapping and the page has never been
-                                        * mapped before or was previously a no_cache page, then we
-                                        * want to leave pages in the speculative state so that they
-                                        * can be readily recycled if free memory runs low.  Otherwise
-                                        * the page is activated as normal. 
-                                        */
+                               lq = &vm_page_local_q[lid].vpl_un.vpl;
 
-                                       if (no_cache && (!previously_pmapped || m->no_cache)) {
-                                               m->no_cache = TRUE;
+                               VPL_LOCK(&lq->vpl_lock);
 
-                                               if (m->active || m->inactive)
-                                                       VM_PAGE_QUEUES_REMOVE(m);
+                               vm_page_check_pageable_safe(m);
+                               vm_page_queue_enter(&lq->vpl_queue, m,
+                                                   vm_page_t, pageq);
+                               m->vm_page_q_state = VM_PAGE_ON_ACTIVE_LOCAL_Q;
+                               m->local_id = lid;
+                               lq->vpl_count++;
 
-                                               if (!m->speculative) 
-                                                       vm_page_speculate(m, TRUE);
+                               if (object->internal)
+                                       lq->vpl_internal_count++;
+                               else
+                                       lq->vpl_external_count++;
 
-                                       } else if (!m->active && !m->inactive)
-                                               vm_page_activate(m);
+                               VPL_UNLOCK(&lq->vpl_lock);
 
+                               if (lq->vpl_count > vm_page_local_q_soft_limit)
+                               {
+                                       /*
+                                        * we're beyond the soft limit
+                                        * for the local queue
+                                        * vm_page_reactivate_local will
+                                        * 'try' to take the global page
+                                        * queue lock... if it can't
+                                        * that's ok... we'll let the
+                                        * queue continue to grow up
+                                        * to the hard limit... at that
+                                        * point we'll wait for the
+                                        * lock... once we've got the
+                                        * lock, we'll transfer all of
+                                        * the pages from the local
+                                        * queue to the global active
+                                        * queue
+                                        */
+                                       vm_page_reactivate_local(lid, FALSE, FALSE);
                                }
+                       } else {
 
-                               vm_page_unlock_queues();
-                       }
-               }
-       }
+                               __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
+
+                               /*
+                                * test again now that we hold the
+                                * page queue lock
+                                */
+                               if (!VM_PAGE_WIRED(m)) {
+                                       if (m->vm_page_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
+                                               vm_page_queues_remove(m, FALSE);
+
+                                               vm_pageout_cleaned_reactivated++;
+                                               vm_pageout_cleaned_fault_reactivated++;
+                                       }
+
+                                       if ( !VM_PAGE_ACTIVE_OR_INACTIVE(m) ||
+                                            no_cache) {
+                                               /*
+                                                * If this is a no_cache mapping
+                                                * and the page has never been
+                                                * mapped before or was
+                                                * previously a no_cache page,
+                                                * then we want to leave pages
+                                                * in the speculative state so
+                                                * that they can be readily
+                                                * recycled if free memory runs
+                                                * low.  Otherwise the page is
+                                                * activated as normal.
+                                                */
+
+                                               if (no_cache &&
+                                                   (!previously_pmapped ||
+                                                    m->no_cache)) {
+                                                       m->no_cache = TRUE;
+
+                                                       if (m->vm_page_q_state != VM_PAGE_ON_SPECULATIVE_Q)
+                                                               vm_page_speculate(m, FALSE);
+
+                                               } else if ( !VM_PAGE_ACTIVE_OR_INACTIVE(m)) {
+                                                       vm_page_activate(m);
+                                               }
+                                       }
+                               }
+                               /* we keep the page queues lock, if we need it later */
+                       }
+               }
+       }
+       /* we're done with the page queues lock, if we ever took it */
+       __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
+
+
+       /* If we have a KERN_SUCCESS from the previous checks, we either have
+        * a good page, or a tainted page that has been accepted by the process.
+        * In both cases the page will be entered into the pmap.
+        * If the page is writeable, we need to disconnect it from other pmaps
+        * now so those processes can take note.
+        */
+       if (kr == KERN_SUCCESS) {
+               /*
+                * NOTE: we may only hold the vm_object lock SHARED
+                * at this point, so we need the phys_page lock to
+                * properly serialize updating the pmapped and
+                * xpmapped bits
+                */
+               if ((prot & VM_PROT_EXECUTE) && !m->xpmapped) {
+                       ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
+
+                       pmap_lock_phys_page(phys_page);
+                       /*
+                        * go ahead and take the opportunity
+                        * to set 'pmapped' here so that we don't
+                        * need to grab this lock a 2nd time
+                        * just below
+                        */
+                       m->pmapped = TRUE;
+
+                       if (!m->xpmapped) {
+
+                               m->xpmapped = TRUE;
+
+                               pmap_unlock_phys_page(phys_page);
+
+                               if (!object->internal)
+                                       OSAddAtomic(1, &vm_page_xpmapped_external_count);
+
+#if defined(__arm__) || defined(__arm64__)
+                               pmap_sync_page_data_phys(phys_page);
+#else
+                               if (object->internal &&
+                                   object->pager != NULL) {
+                                       /*
+                                        * This page could have been
+                                        * uncompressed by the
+                                        * compressor pager and its
+                                        * contents might be only in
+                                        * the data cache.
+                                        * Since it's being mapped for
+                                        * "execute" for the fist time,
+                                        * make sure the icache is in
+                                        * sync.
+                                        */
+                                       assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
+                                       pmap_sync_page_data_phys(phys_page);
+                               }
+#endif
+                       } else
+                               pmap_unlock_phys_page(phys_page);
+               } else {
+                       if (m->pmapped == FALSE) {
+                               ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
+
+                               pmap_lock_phys_page(phys_page);
+                               m->pmapped = TRUE;
+                               pmap_unlock_phys_page(phys_page);
+                       }
+               }
+               if (vm_page_is_slideable(m)) {
+                       boolean_t was_busy = m->busy;
+
+                       vm_object_lock_assert_exclusive(object);
+
+                       m->busy = TRUE;
+                       kr = vm_page_slide(m, 0);
+                       assert(m->busy);
+                       if(!was_busy) {
+                               PAGE_WAKEUP_DONE(m);
+                       }
+                       if (kr != KERN_SUCCESS) {
+                               /*
+                                * This page has not been slid correctly,
+                                * do not do the pmap_enter() !
+                                * Let vm_fault_enter() return the error
+                                * so the caller can fail the fault.
+                                */
+                               goto after_the_pmap_enter;
+                       }
+               }
+
+               if (fault_type & VM_PROT_WRITE) {
+
+                       if (m->wpmapped == FALSE) {
+                               vm_object_lock_assert_exclusive(object);
+                               if (!object->internal && object->pager) {
+                                       task_update_logical_writes(current_task(), PAGE_SIZE, TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager));
+                               }
+                               m->wpmapped = TRUE;
+                       }
+                       if (must_disconnect) {
+                               /*
+                                * We can only get here
+                                * because of the CSE logic
+                                */
+                               assert(cs_enforcement_enabled);
+                               pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
+                               /*
+                                * If we are faulting for a write, we can clear
+                                * the execute bit - that will ensure the page is
+                                * checked again before being executable, which
+                                * protects against a map switch.
+                                * This only happens the first time the page
+                                * gets tainted, so we won't get stuck here
+                                * to make an already writeable page executable.
+                                */
+                               if (!cs_bypass){
+                                       assert(!pmap_has_prot_policy(prot));
+                                       prot &= ~VM_PROT_EXECUTE;
+                               }
+                       }
+               }
+               assert(VM_PAGE_OBJECT(m) == object);
+
+               /* Prevent a deadlock by not
+                * holding the object lock if we need to wait for a page in
+                * pmap_enter() - <rdar://problem/7138958> */
+               PMAP_ENTER_OPTIONS(pmap, vaddr, m, prot, fault_type, 0,
+                                  wired,
+                                  pmap_options | PMAP_OPTIONS_NOWAIT,
+                                  pe_result);
+#if __x86_64__
+               if (pe_result == KERN_INVALID_ARGUMENT &&
+                   pmap == PMAP_NULL &&
+                   wired) {
+                       /*
+                        * Wiring a page in a pmap-less VM map:
+                        * VMware's "vmmon" kernel extension does this
+                        * to grab pages.
+                        * Let it proceed even though the PMAP_ENTER() failed.
+                        */
+                       pe_result = KERN_SUCCESS;
+               }
+#endif /* __x86_64__ */
+
+               if(pe_result == KERN_RESOURCE_SHORTAGE) {
+
+                       if (need_retry) {
+                               /*
+                                * this will be non-null in the case where we hold the lock
+                                * on the top-object in this chain... we can't just drop
+                                * the lock on the object we're inserting the page into
+                                * and recall the PMAP_ENTER since we can still cause
+                                * a deadlock if one of the critical paths tries to
+                                * acquire the lock on the top-object and we're blocked
+                                * in PMAP_ENTER waiting for memory... our only recourse
+                                * is to deal with it at a higher level where we can
+                                * drop both locks.
+                                */
+                               *need_retry = TRUE;
+                               vm_pmap_enter_retried++;
+                               goto after_the_pmap_enter;
+                       }
+                       /* The nonblocking version of pmap_enter did not succeed.
+                        * and we don't need to drop other locks and retry
+                        * at the level above us, so
+                        * use the blocking version instead. Requires marking
+                        * the page busy and unlocking the object */
+                       boolean_t was_busy = m->busy;
+
+                       vm_object_lock_assert_exclusive(object);
+
+                       m->busy = TRUE;
+                       vm_object_unlock(object);
+
+                       PMAP_ENTER_OPTIONS(pmap, vaddr, m, prot, fault_type,
+                                          0, wired,
+                                          pmap_options, pe_result);
+
+                       assert(VM_PAGE_OBJECT(m) == object);
+
+                       /* Take the object lock again. */
+                       vm_object_lock(object);
+
+                       /* If the page was busy, someone else will wake it up.
+                        * Otherwise, we have to do it now. */
+                       assert(m->busy);
+                       if(!was_busy) {
+                               PAGE_WAKEUP_DONE(m);
+                       }
+                       vm_pmap_enter_blocked++;
+               }
+
+               kr = pe_result;
+       }
+
+after_the_pmap_enter:
        return kr;
 }
 
+void
+vm_pre_fault(vm_map_offset_t vaddr)
+{
+       if (pmap_find_phys(current_map()->pmap, vaddr) == 0) {
+
+               vm_fault(current_map(),      /* map */
+                       vaddr,               /* vaddr */
+                       VM_PROT_READ,        /* fault_type */
+                       FALSE,               /* change_wiring */
+                       VM_KERN_MEMORY_NONE, /* tag - not wiring */
+                       THREAD_UNINT,        /* interruptible */
+                       NULL,                /* caller_pmap */
+                       0                    /* caller_pmap_addr */);
+       }
+}
+
 
 /*
  *     Routine:        vm_fault
@@ -2166,19 +3381,54 @@ vm_fault_enter(vm_page_t m,
  */
 
 extern int _map_enter_debug;
+extern uint64_t        get_current_unique_pid(void);
 
 unsigned long vm_fault_collapse_total = 0;
 unsigned long vm_fault_collapse_skipped = 0;
 
+
+kern_return_t
+vm_fault_external(
+       vm_map_t        map,
+       vm_map_offset_t vaddr,
+       vm_prot_t       fault_type,
+       boolean_t       change_wiring,
+       int             interruptible,
+       pmap_t          caller_pmap,
+       vm_map_offset_t caller_pmap_addr)
+{
+       return vm_fault_internal(map, vaddr, fault_type, change_wiring, vm_tag_bt(),
+                                interruptible, caller_pmap, caller_pmap_addr,
+                                NULL);
+}
+
 kern_return_t
 vm_fault(
        vm_map_t        map,
        vm_map_offset_t vaddr,
        vm_prot_t       fault_type,
        boolean_t       change_wiring,
+       vm_tag_t        wire_tag,               /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
        int             interruptible,
        pmap_t          caller_pmap,
        vm_map_offset_t caller_pmap_addr)
+{
+       return vm_fault_internal(map, vaddr, fault_type, change_wiring, wire_tag,
+                                interruptible, caller_pmap, caller_pmap_addr,
+                                NULL);
+}
+
+kern_return_t
+vm_fault_internal(
+       vm_map_t        map,
+       vm_map_offset_t vaddr,
+       vm_prot_t       caller_prot,
+       boolean_t       change_wiring,
+       vm_tag_t        wire_tag,               /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
+       int             interruptible,
+       pmap_t          caller_pmap,
+       vm_map_offset_t caller_pmap_addr,
+       ppnum_t         *physpage_p)
 {
        vm_map_version_t        version;        /* Map version for verificiation */
        boolean_t               wired;          /* Should mapping be wired down? */
@@ -2193,6 +3443,7 @@ vm_fault(
        vm_page_t               m;      /* Fast access to result_page */
        kern_return_t           error_code;
        vm_object_t             cur_object;
+       vm_object_t             m_object = NULL;
        vm_object_offset_t      cur_offset;
        vm_page_t               cur_m;
        vm_object_t             new_object;
@@ -2201,32 +3452,60 @@ vm_fault(
        boolean_t               interruptible_state;
        vm_map_t                real_map = map;
        vm_map_t                original_map = map;
+       boolean_t               object_locks_dropped = FALSE;
+       vm_prot_t               fault_type;
        vm_prot_t               original_fault_type;
        struct vm_object_fault_info fault_info;
        boolean_t               need_collapse = FALSE;
+       boolean_t               need_retry = FALSE;
+       boolean_t               *need_retry_ptr = NULL;
        int                     object_lock_type = 0;
        int                     cur_object_lock_type;
+       vm_object_t             top_object = VM_OBJECT_NULL;
+       int                     throttle_delay;
+       int                     compressed_count_delta;
+       int                     grab_options;
+       vm_map_offset_t         trace_vaddr;
+       vm_map_offset_t         trace_real_vaddr;
+#if DEVELOPMENT || DEBUG
+       vm_map_offset_t         real_vaddr;
+
+       real_vaddr = vaddr;
+#endif /* DEVELOPMENT || DEBUG */
+       trace_real_vaddr = vaddr;
+       vaddr = vm_map_trunc_page(vaddr, PAGE_MASK);
+
+       if (map == kernel_map) {
+               trace_vaddr = VM_KERNEL_ADDRHIDE(vaddr);
+               trace_real_vaddr = VM_KERNEL_ADDRHIDE(trace_real_vaddr);
+       } else {
+               trace_vaddr = vaddr;
+       }
 
-
-       KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_START,
-                             (int)((uint64_t)vaddr >> 32),
-                             (int)vaddr,
-                             0,
+       KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+                     (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_START,
+                             ((uint64_t)trace_vaddr >> 32),
+                             trace_vaddr,
+                             (map == kernel_map),
                              0,
                              0);
 
        if (get_preemption_level() != 0) {
-               KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
-                                     (int)((uint64_t)vaddr >> 32),
-                                     (int)vaddr,
+               KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+                                     (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
+                                     ((uint64_t)trace_vaddr >> 32),
+                                     trace_vaddr,
                                      KERN_FAILURE,
                                      0,
                                      0);
 
                return (KERN_FAILURE);
        }
+
        interruptible_state = thread_interrupt_level(interruptible);
 
+       fault_type = (change_wiring ? VM_PROT_NONE : caller_prot);
+
        VM_STAT_INCR(faults);
        current_task()->faults++;
        original_fault_type = fault_type;
@@ -2238,6 +3517,14 @@ vm_fault(
 
        cur_object_lock_type = OBJECT_LOCK_SHARED;
 
+       if ((map == kernel_map) && (caller_prot & VM_PROT_WRITE)) {
+               if (compressor_map) {
+                       if ((vaddr >= vm_map_min(compressor_map)) && (vaddr < vm_map_max(compressor_map))) {
+                               panic("Write fault on compressor map, va: %p type: %u bounds: %p->%p", (void *) vaddr, caller_prot, (void *) vm_map_min(compressor_map), (void *) vm_map_max(compressor_map));
+
+                       }
+               }
+       }
 RetryFault:
        /*
         * assume we will hit a page in the cache
@@ -2266,6 +3553,10 @@ RetryFault:
        }
        pmap = real_map->pmap;
        fault_info.interruptible = interruptible;
+       fault_info.stealth = FALSE;
+       fault_info.io_sync = FALSE;
+       fault_info.mark_zf_absent = FALSE;
+       fault_info.batch_pmap_op = FALSE;
 
        /*
         * If the page is wired, we must fault for the current protection
@@ -2273,7 +3564,6 @@ RetryFault:
         */
        if (wired) {
                fault_type = prot | VM_PROT_WRITE;
-       
                /*
                 * since we're treating this fault as a 'write'
                 * we must hold the top object lock exclusively
@@ -2330,6 +3620,24 @@ RetryFault:
         *
         */
 
+#if defined(__arm64__)
+       /*
+        * Fail if reading an execute-only page in a
+        * pmap that enforces execute-only protection.
+        */
+       if (fault_type == VM_PROT_READ &&
+               (prot & VM_PROT_EXECUTE) &&
+               !(prot & VM_PROT_READ) &&
+               pmap_enforces_execute_only(pmap)) {
+                       vm_object_unlock(object);
+                       vm_map_unlock_read(map);
+                       if (real_map != map) {
+                               vm_map_unlock(real_map);
+                       }
+                       kr = KERN_PROTECTION_FAILURE;
+                       goto done;
+       }
+#endif
 
        /*
         * If this page is to be inserted in a copy delay object
@@ -2343,10 +3651,32 @@ RetryFault:
        cur_object = object;
        cur_offset = offset;
 
+       grab_options = 0;
+#if CONFIG_SECLUDED_MEMORY
+       if (object->can_grab_secluded) {
+               grab_options |= VM_PAGE_GRAB_SECLUDED;
+       }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
        while (TRUE) {
+               if (!cur_object->pager_created &&
+                   cur_object->phys_contiguous) /* superpage */
+                       break;
+
+               if (cur_object->blocked_access) {
+                       /*
+                        * Access to this VM object has been blocked.
+                        * Let the slow path handle it.
+                        */
+                       break;
+               }
+
                m = vm_page_lookup(cur_object, cur_offset);
+               m_object = NULL;
 
                if (m != VM_PAGE_NULL) {
+                       m_object = cur_object;
+
                        if (m->busy) {
                                wait_result_t   result;
 
@@ -2355,7 +3685,6 @@ RetryFault:
                                 * have object that 'm' belongs to locked exclusively
                                 */
                                if (object != cur_object) {
-                                       vm_object_unlock(object);
 
                                        if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
 
@@ -2364,11 +3693,13 @@ RetryFault:
                                                if (vm_object_lock_upgrade(cur_object) == FALSE) {
                                                        /*
                                                         * couldn't upgrade so go do a full retry
-                                                        * immediately since we've already dropped
-                                                        * the top object lock associated with this page
-                                                        * and the current one got dropped due to the
-                                                        * failed upgrade... the state is no longer valid
+                                                        * immediately since we can no longer be
+                                                        * certain about cur_object (since we
+                                                        * don't hold a reference on it)...
+                                                        * first drop the top object lock
                                                         */
+                                                       vm_object_unlock(object);
+
                                                        vm_map_unlock_read(map);
                                                        if (real_map != map)
                                                                vm_map_unlock(real_map);
@@ -2395,6 +3726,32 @@ RetryFault:
                                                continue;
                                        }
                                }
+                               if ((m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q) && m_object->internal) {
+                                       /*
+                                        * m->busy == TRUE and the object is locked exclusively
+                                        * if m->pageout_queue == TRUE after we acquire the
+                                        * queues lock, we are guaranteed that it is stable on
+                                        * the pageout queue and therefore reclaimable
+                                        *
+                                        * NOTE: this is only true for the internal pageout queue
+                                        * in the compressor world
+                                        */
+                                       assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
+
+                                       vm_page_lock_queues();
+
+                                       if (m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q) {
+                                               vm_pageout_throttle_up(m);
+                                               vm_page_unlock_queues();
+
+                                               PAGE_WAKEUP_DONE(m);
+                                               goto reclaimed_from_pageout;
+                                       }
+                                       vm_page_unlock_queues();
+                               }
+                               if (object != cur_object)
+                                       vm_object_unlock(object);
+
                                vm_map_unlock_read(map);
                                if (real_map != map)
                                        vm_map_unlock(real_map);
@@ -2414,59 +3771,28 @@ RetryFault:
                                kr = KERN_ABORTED;
                                goto done;
                        }
-                       if (m->phys_page == vm_page_guard_addr) {
-                               /*
-                                * Guard page: let the slow path deal with it
-                                */
-                               break;
-                       }
-                       if (m->unusual && (m->error || m->restart || m->private || m->absent)) {
-                               /*
-                                * Unusual case... let the slow path deal with it
-                                */
-                               break;
-                       }
-                       if (m->encrypted) {
-                               /*
-                                * ENCRYPTED SWAP:
-                                * We've soft-faulted (because it's not in the page
-                                * table) on an encrypted page.
-                                * Keep the page "busy" so that no one messes with
-                                * it during the decryption.
-                                * Release the extra locks we're holding, keep only
-                                * the page's VM object lock.
-                                *
-                                * in order to set 'busy' on 'm', we must
-                                * have object that 'm' belongs to locked exclusively
-                                */
-                               if (object != cur_object) {
-                                       vm_object_unlock(object);
-
+reclaimed_from_pageout:
+                       if (m->laundry) {
+                               if (object != cur_object) {
                                        if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
+                                               cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
 
-                                               cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+                                               vm_object_unlock(object);
+                                               vm_object_unlock(cur_object);
 
-                                               if (vm_object_lock_upgrade(cur_object) == FALSE) {
-                                                       /*
-                                                        * couldn't upgrade so go do a full retry
-                                                        * immediately since we've already dropped
-                                                        * the top object lock associated with this page
-                                                        * and the current one got dropped due to the
-                                                        * failed upgrade... the state is no longer valid
-                                                        */
-                                                       vm_map_unlock_read(map);
-                                                       if (real_map != map)
-                                                               vm_map_unlock(real_map);
+                                               vm_map_unlock_read(map);
+                                               if (real_map != map)
+                                                       vm_map_unlock(real_map);
 
-                                                       goto RetryFault;
-                                               }
+                                               goto RetryFault;
                                        }
+
                                } else if (object_lock_type == OBJECT_LOCK_SHARED) {
 
-                                       object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+                                       object_lock_type = OBJECT_LOCK_EXCLUSIVE;
 
                                        if (vm_object_lock_upgrade(object) == FALSE) {
-                                               /*
+                                               /*
                                                 * couldn't upgrade, so explictly take the lock
                                                 * exclusively and go relookup the page since we
                                                 * will have dropped the object lock and
@@ -2475,34 +3801,69 @@ RetryFault:
                                                 * no need for a full retry since we're
                                                 * at the top level of the object chain
                                                 */
-                                               vm_object_lock(object);
+                                               vm_object_lock(object);
 
                                                continue;
                                        }
                                }
-                               m->busy = TRUE;
+                               vm_pageout_steal_laundry(m, FALSE);
+                       }
 
+                       if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
+                               /*
+                                * Guard page: let the slow path deal with it
+                                */
+                               break;
+                       }
+                       if (m->unusual && (m->error || m->restart || m->private || m->absent)) {
+                               /*
+                                * Unusual case... let the slow path deal with it
+                                */
+                               break;
+                       }
+                       if (VM_OBJECT_PURGEABLE_FAULT_ERROR(m_object)) {
+                               if (object != cur_object)
+                                       vm_object_unlock(object);
                                vm_map_unlock_read(map);
-                               if (real_map != map) 
-                                       vm_map_unlock(real_map);
-
-                               vm_page_decrypt(m, 0);
-
-                               assert(m->busy);
-                               PAGE_WAKEUP_DONE(m);
-
+                               if (real_map != map)
+                                       vm_map_unlock(real_map);
                                vm_object_unlock(cur_object);
+                               kr = KERN_MEMORY_ERROR;
+                               goto done;
+                       }
+                       if (vm_page_is_slideable(m)) {
                                /*
-                                * Retry from the top, in case anything
-                                * changed while we were decrypting...
+                                * We might need to slide this page, and so,
+                                * we want to hold the VM object exclusively.
                                 */
-                               goto RetryFault;
+                               if (object != cur_object) {
+                                       if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
+                                               vm_object_unlock(object);
+                                               vm_object_unlock(cur_object);
+
+                                               cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+
+                                               vm_map_unlock_read(map);
+                                               if (real_map != map)
+                                                       vm_map_unlock(real_map);
+
+                                               goto RetryFault;
+                                       }
+                               } else if (object_lock_type == OBJECT_LOCK_SHARED) {
+
+                                       vm_object_unlock(object);
+                                       object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+                                       vm_map_unlock_read(map);
+                                       goto RetryFault;
+                               }
                        }
-                       ASSERT_PAGE_DECRYPTED(m);
+                       assert(m_object == VM_PAGE_OBJECT(m));
 
-                       if (m->object->code_signed && !m->cs_validated) {
+                       if (VM_FAULT_NEED_CS_VALIDATION(map->pmap, m, m_object) ||
+                           (physpage_p != NULL && (prot & VM_PROT_WRITE))) {
+upgrade_for_validation:
                                /*
-                                * We will need to validate this page
+                                * We might need to validate this page
                                 * against its code signature, so we
                                 * want to hold the VM object exclusively.
                                 */
@@ -2547,23 +3908,47 @@ RetryFault:
                         *              --> must disallow write.
                         */
 
-                       if (object == cur_object && object->copy == VM_OBJECT_NULL)
+                       if (object == cur_object && object->copy == VM_OBJECT_NULL) {
+
                                goto FastPmapEnter;
+                       }
 
                        if ((fault_type & VM_PROT_WRITE) == 0) {
+                               if (!pmap_has_prot_policy(prot)) {
+                                       prot &= ~VM_PROT_WRITE;
+                               } else {
+                                       /*
+                                        * For a protection that the pmap cares
+                                        * about, we must hand over the full
+                                        * set of protections (so that the pmap
+                                        * layer can apply any desired policy).
+                                        * This means that cs_bypass must be
+                                        * set, as this can force us to pass
+                                        * RWX.
+                                        */
+                                       assert(fault_info.cs_bypass);
+                               }
 
-                               prot &= ~VM_PROT_WRITE;
-
-                               /*
-                                * Set up to map the page...
-                                * mark the page busy, drop
-                                * unneeded object lock
-                                */     
                                if (object != cur_object) {
-                                       /*      
-                                        * don't need the original object anymore
+                                       /*
+                                        * We still need to hold the top object
+                                        * lock here to prevent a race between
+                                        * a read fault (taking only "shared"
+                                        * locks) and a write fault (taking
+                                        * an "exclusive" lock on the top
+                                        * object.
+                                        * Otherwise, as soon as we release the
+                                        * top lock, the write fault could
+                                        * proceed and actually complete before
+                                        * the read fault, and the copied page's
+                                        * translation could then be overwritten
+                                        * by the read fault's translation for
+                                        * the original page.
+                                        *
+                                        * Let's just record what the top object
+                                        * is and we'll release it later.
                                         */
-                                       vm_object_unlock(object);
+                                       top_object = object;
 
                                        /*
                                         * switch to the object that has the new page
@@ -2572,6 +3957,8 @@ RetryFault:
                                        object_lock_type = cur_object_lock_type;
                                }
 FastPmapEnter:
+                               assert(m_object == VM_PAGE_OBJECT(m));
+
                                /*
                                 * prepare for the pmap_enter...
                                 * object and map are both locked
@@ -2580,34 +3967,87 @@ FastPmapEnter:
                                 * cur_object == NULL or it's been unlocked
                                 * no paging references on either object or cur_object
                                 */
-#if    MACH_KDB
-                               if (db_watchpoint_list && (fault_type & VM_PROT_WRITE) == 0)
-                                       prot &= ~VM_PROT_WRITE;
-#endif
+                               if (top_object != VM_OBJECT_NULL || object_lock_type != OBJECT_LOCK_EXCLUSIVE)
+                                       need_retry_ptr = &need_retry;
+                               else
+                                       need_retry_ptr = NULL;
+
                                if (caller_pmap) {
                                        kr = vm_fault_enter(m,
                                                            caller_pmap,
                                                            caller_pmap_addr,
                                                            prot,
+                                                           caller_prot,
                                                            wired,
                                                            change_wiring,
+                                                           wire_tag,
                                                            fault_info.no_cache,
+                                                           fault_info.cs_bypass,
+                                                           fault_info.user_tag,
+                                                           fault_info.pmap_options,
+                                                           need_retry_ptr,
                                                            &type_of_fault);
                                } else {
                                        kr = vm_fault_enter(m,
                                                            pmap,
                                                            vaddr,
                                                            prot,
+                                                           caller_prot,
                                                            wired,
                                                            change_wiring,
+                                                           wire_tag,
                                                            fault_info.no_cache,
+                                                           fault_info.cs_bypass,
+                                                           fault_info.user_tag,
+                                                           fault_info.pmap_options,
+                                                           need_retry_ptr,
                                                            &type_of_fault);
                                }
+#if DEVELOPMENT || DEBUG
+                               {
+                               int     event_code = 0;
+
+                               if (m_object->internal)
+                                       event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL));
+                               else if (m_object->object_slid)
+                                       event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE));
+                               else
+                                       event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
+
+                               KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->offset, get_current_unique_pid(), 0);
+
+                               DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag);
+                               }
+#endif
+                               if (kr == KERN_SUCCESS &&
+                                   physpage_p != NULL) {
+                                       /* for vm_map_wire_and_extract() */
+                                       *physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
+                                       if (prot & VM_PROT_WRITE) {
+                                               vm_object_lock_assert_exclusive(m_object);
+                                               m->dirty = TRUE;
+                                       }
+                               }
+
+                               if (top_object != VM_OBJECT_NULL) {
+                                       /*
+                                        * It's safe to drop the top object
+                                        * now that we've done our
+                                        * vm_fault_enter().  Any other fault
+                                        * in progress for that virtual
+                                        * address will either find our page
+                                        * and translation or put in a new page
+                                        * and translation.
+                                        */
+                                       vm_object_unlock(top_object);
+                                       top_object = VM_OBJECT_NULL;
+                               }
 
                                if (need_collapse == TRUE)
                                        vm_object_collapse(object, offset, TRUE);
 
-                               if (type_of_fault == DBG_PAGEIN_FAULT) {
+                               if (need_retry == FALSE &&
+                                   (type_of_fault == DBG_PAGEIND_FAULT || type_of_fault == DBG_PAGEINV_FAULT || type_of_fault == DBG_CACHE_HIT_FAULT)) {
                                        /*
                                         * evaluate access pattern and update state
                                         * vm_fault_deactivate_behind depends on the
@@ -2629,11 +4069,30 @@ FastPmapEnter:
                                if (real_map != map)
                                        vm_map_unlock(real_map);
 
+                               if (need_retry == TRUE) {
+                                       /*
+                                        * vm_fault_enter couldn't complete the PMAP_ENTER...
+                                        * at this point we don't hold any locks so it's safe
+                                        * to ask the pmap layer to expand the page table to
+                                        * accommodate this mapping... once expanded, we'll
+                                        * re-drive the fault which should result in vm_fault_enter
+                                        * being able to successfully enter the mapping this time around
+                                        */
+                                       (void)pmap_enter_options(
+                                               pmap, vaddr, 0, 0, 0, 0, 0,
+                                               PMAP_OPTIONS_NOENTER, NULL);
+
+                                       need_retry = FALSE;
+                                       goto RetryFault;
+                               }
                                goto done;
                        }
                        /*
                         * COPY ON WRITE FAULT
-                        *
+                        */
+                       assert(object_lock_type == OBJECT_LOCK_EXCLUSIVE);
+
+                        /*
                         * If objects match, then
                         * object->copy must not be NULL (else control
                         * would be in previous code block), and we
@@ -2647,26 +4106,34 @@ FastPmapEnter:
                                 */
                                break;
                        }
-                       assert(object_lock_type == OBJECT_LOCK_EXCLUSIVE);
 
                        /*
                         * This is now a shadow based copy on write
                         * fault -- it requires a copy up the shadow
                         * chain.
-                        *
+                        */
+                       assert(m_object == VM_PAGE_OBJECT(m));
+
+                       if ((cur_object_lock_type == OBJECT_LOCK_SHARED) &&
+                           VM_FAULT_NEED_CS_VALIDATION(NULL, m, m_object)) {
+                               goto upgrade_for_validation;
+                       }
+
+                       /*
                         * Allocate a page in the original top level
                         * object. Give up if allocate fails.  Also
                         * need to remember current page, as it's the
                         * source of the copy.
                         *
-                        * at this point we hold locks on both 
+                        * at this point we hold locks on both
                         * object and cur_object... no need to take
                         * paging refs or mark pages BUSY since
                         * we don't drop either object lock until
                         * the page has been copied and inserted
                         */
                        cur_m = m;
-                       m = vm_page_grab();
+                       m = vm_page_grab_options(grab_options);
+                       m_object = NULL;
 
                        if (m == VM_PAGE_NULL) {
                                /*
@@ -2683,14 +4150,20 @@ FastPmapEnter:
                         */
                        vm_page_copy(cur_m, m);
                        vm_page_insert(m, object, offset);
-                       m->dirty = TRUE;
+                       m_object = object;
+                       SET_PAGE_DIRTY(m, FALSE);
 
                        /*
                         * Now cope with the source page and object
                         */
                        if (object->ref_count > 1 && cur_m->pmapped)
-                               pmap_disconnect(cur_m->phys_page);
+                               pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m));
 
+                       if (cur_m->clustered) {
+                               VM_PAGE_COUNT_AS_PAGEIN(cur_m);
+                               VM_PAGE_CONSUME_CLUSTERED(cur_m);
+                               vm_fault_is_sequential(cur_object, cur_offset, fault_info.behavior);
+                       }
                        need_collapse = TRUE;
 
                        if (!cur_object->internal &&
@@ -2736,12 +4209,230 @@ FastPmapEnter:
                         * No page at cur_object, cur_offset... m == NULL
                         */
                        if (cur_object->pager_created) {
-                               if (MUST_ASK_PAGER(cur_object, cur_offset) == TRUE) {
+                               int     compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN;
+
+                               if (MUST_ASK_PAGER(cur_object, cur_offset, compressor_external_state) == TRUE) {
+                                       int             my_fault_type;
+                                       int             c_flags = C_DONT_BLOCK;
+                                       boolean_t       insert_cur_object = FALSE;
+
                                        /*
                                         * May have to talk to a pager...
-                                        * take the slow path.
+                                        * if so, take the slow path by
+                                        * doing a 'break' from the while (TRUE) loop
+                                        *
+                                        * external_state will only be set to VM_EXTERNAL_STATE_EXISTS
+                                        * if the compressor is active and the page exists there
                                         */
-                                       break;
+                                       if (compressor_external_state != VM_EXTERNAL_STATE_EXISTS)
+                                               break;
+
+                                       if (map == kernel_map || real_map == kernel_map) {
+                                               /*
+                                                * can't call into the compressor with the kernel_map
+                                                * lock held, since the compressor may try to operate
+                                                * on the kernel map in order to return an empty c_segment
+                                                */
+                                               break;
+                                       }
+                                       if (object != cur_object) {
+                                               if (fault_type & VM_PROT_WRITE)
+                                                       c_flags |= C_KEEP;
+                                               else
+                                                       insert_cur_object = TRUE;
+                                       }
+                                       if (insert_cur_object == TRUE) {
+
+                                               if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
+
+                                                       cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+
+                                                       if (vm_object_lock_upgrade(cur_object) == FALSE) {
+                                                               /*
+                                                                * couldn't upgrade so go do a full retry
+                                                                * immediately since we can no longer be
+                                                                * certain about cur_object (since we
+                                                                * don't hold a reference on it)...
+                                                                * first drop the top object lock
+                                                                */
+                                                               vm_object_unlock(object);
+
+                                                               vm_map_unlock_read(map);
+                                                               if (real_map != map)
+                                                                       vm_map_unlock(real_map);
+
+                                                               goto RetryFault;
+                                                       }
+                                               }
+                                       } else if (object_lock_type == OBJECT_LOCK_SHARED) {
+
+                                               object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+
+                                               if (object != cur_object) {
+                                                       /*
+                                                        * we can't go for the upgrade on the top
+                                                        * lock since the upgrade may block waiting
+                                                        * for readers to drain... since we hold
+                                                        * cur_object locked at this point, waiting
+                                                        * for the readers to drain would represent
+                                                        * a lock order inversion since the lock order
+                                                        * for objects is the reference order in the
+                                                        * shadown chain
+                                                        */
+                                                       vm_object_unlock(object);
+                                                       vm_object_unlock(cur_object);
+
+                                                       vm_map_unlock_read(map);
+                                                       if (real_map != map)
+                                                               vm_map_unlock(real_map);
+
+                                                       goto RetryFault;
+                                               }
+                                               if (vm_object_lock_upgrade(object) == FALSE) {
+                                                       /*
+                                                        * couldn't upgrade, so explictly take the lock
+                                                        * exclusively and go relookup the page since we
+                                                        * will have dropped the object lock and
+                                                        * a different thread could have inserted
+                                                        * a page at this offset
+                                                        * no need for a full retry since we're
+                                                        * at the top level of the object chain
+                                                        */
+                                                       vm_object_lock(object);
+
+                                                       continue;
+                                               }
+                                       }
+                                       m = vm_page_grab_options(grab_options);
+                                       m_object = NULL;
+
+                                       if (m == VM_PAGE_NULL) {
+                                               /*
+                                                * no free page currently available...
+                                                * must take the slow path
+                                                */
+                                               break;
+                                       }
+
+                                       /*
+                                        * The object is and remains locked
+                                        * so no need to take a
+                                        * "paging_in_progress" reference.
+                                        */
+                                       boolean_t shared_lock;
+                                       if ((object == cur_object &&
+                                            object_lock_type == OBJECT_LOCK_EXCLUSIVE) ||
+                                           (object != cur_object &&
+                                            cur_object_lock_type == OBJECT_LOCK_EXCLUSIVE)) {
+                                               shared_lock = FALSE;
+                                       } else {
+                                               shared_lock = TRUE;
+                                       }
+
+                                       kr = vm_compressor_pager_get(
+                                               cur_object->pager,
+                                               (cur_offset +
+                                                cur_object->paging_offset),
+                                               VM_PAGE_GET_PHYS_PAGE(m),
+                                               &my_fault_type,
+                                               c_flags,
+                                               &compressed_count_delta);
+
+                                       vm_compressor_pager_count(
+                                               cur_object->pager,
+                                               compressed_count_delta,
+                                               shared_lock,
+                                               cur_object);
+
+                                       if (kr != KERN_SUCCESS) {
+                                               vm_page_release(m, FALSE);
+                                               m = VM_PAGE_NULL;
+                                               break;
+                                       }
+                                       m->dirty = TRUE;
+
+                                       /*
+                                        * If the object is purgeable, its
+                                        * owner's purgeable ledgers will be
+                                        * updated in vm_page_insert() but the
+                                        * page was also accounted for in a
+                                        * "compressed purgeable" ledger, so
+                                        * update that now.
+                                        */
+                                       if (object != cur_object &&
+                                           !insert_cur_object) {
+                                               /*
+                                                * We're not going to insert
+                                                * the decompressed page into
+                                                * the object it came from.
+                                                *
+                                                * We're dealing with a
+                                                * copy-on-write fault on
+                                                * "object".
+                                                * We're going to decompress
+                                                * the page directly into the
+                                                * target "object" while
+                                                * keepin the compressed
+                                                * page for "cur_object", so
+                                                * no ledger update in that
+                                                * case.
+                                                */
+                                       } else if ((cur_object->purgable ==
+                                                   VM_PURGABLE_DENY) ||
+                                                  (cur_object->vo_purgeable_owner ==
+                                                   NULL)) {
+                                               /*
+                                                * "cur_object" is not purgeable
+                                                * or is not owned, so no
+                                                * purgeable ledgers to update.
+                                                */
+                                       } else {
+                                               /*
+                                                * One less compressed
+                                                * purgeable page for
+                                                * cur_object's owner.
+                                                */
+                                               vm_purgeable_compressed_update(
+                                                       cur_object,
+                                                       -1);
+                                       }
+
+                                       if (insert_cur_object) {
+                                               vm_page_insert(m, cur_object, cur_offset);
+                                               m_object = cur_object;
+                                       } else {
+                                               vm_page_insert(m, object, offset);
+                                               m_object = object;
+                                       }
+
+                                       if ((m_object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_USE_DEFAULT) {
+                                                /*
+                                                * If the page is not cacheable,
+                                                * we can't let its contents
+                                                * linger in the data cache
+                                                * after the decompression.
+                                                */
+                                               pmap_sync_page_attributes_phys(VM_PAGE_GET_PHYS_PAGE(m));
+                                       }
+
+                                       type_of_fault = my_fault_type;
+
+                                       VM_STAT_INCR(decompressions);
+
+                                       if (cur_object != object) {
+                                               if (insert_cur_object) {
+                                                       top_object = object;
+                                                       /*
+                                                        * switch to the object that has the new page
+                                                        */
+                                                       object = cur_object;
+                                                       object_lock_type = cur_object_lock_type;
+                                               } else {
+                                                       vm_object_unlock(cur_object);
+                                                       cur_object = object;
+                                               }
+                                       }
+                                       goto FastPmapEnter;
                                }
                                /*
                                 * existence map present and indicates
@@ -2753,8 +4444,11 @@ FastPmapEnter:
                                 * Zero fill fault.  Page gets
                                 * inserted into the original object.
                                 */
-                               if (cur_object->shadow_severed) {
-
+                               if (cur_object->shadow_severed ||
+                                   VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object) ||
+                                   cur_object == compressor_object ||
+                                   cur_object == kernel_object ||
+                                   cur_object == vm_submap_object) {
                                        if (object != cur_object)
                                                vm_object_unlock(cur_object);
                                        vm_object_unlock(object);
@@ -2766,32 +4460,10 @@ FastPmapEnter:
                                        kr = KERN_MEMORY_ERROR;
                                        goto done;
                                }
-                               if (VM_PAGE_ZFILL_THROTTLED()) {
-                                       /*
-                                        * drop all of our locks...
-                                        * wait until the free queue is
-                                        * pumped back up and then
-                                        * redrive the fault
-                                        */
-                                       if (object != cur_object)
-                                               vm_object_unlock(cur_object);
-                                       vm_object_unlock(object);
-                                       vm_map_unlock_read(map);
-                                       if (real_map != map)
-                                               vm_map_unlock(real_map);
-
-                                       if (vm_page_wait((change_wiring) ? 
-                                                        THREAD_UNINT :
-                                                        THREAD_ABORTSAFE))
-                                               goto RetryFault;
-
-                                       kr = KERN_ABORTED;
-                                       goto done;
-                               }
                                if (vm_backing_store_low) {
                                        /*
                                         * we are protecting the system from
-                                        * backing store exhaustion... 
+                                        * backing store exhaustion...
                                         * must take the slow path if we're
                                         * not privileged
                                         */
@@ -2822,6 +4494,7 @@ FastPmapEnter:
                                        }
                                }
                                m = vm_page_alloc(object, offset);
+                               m_object = NULL;
 
                                if (m == VM_PAGE_NULL) {
                                        /*
@@ -2830,10 +4503,11 @@ FastPmapEnter:
                                         */
                                        break;
                                }
+                               m_object = object;
 
                                /*
                                 * Now zero fill page...
-                                * the page is probably going to 
+                                * the page is probably going to
                                 * be written soon, so don't bother
                                 * to clear the modified bit
                                 *
@@ -2847,7 +4521,7 @@ FastPmapEnter:
                        /*
                         * On to the next level in the shadow chain
                         */
-                       cur_offset += cur_object->shadow_offset;
+                       cur_offset += cur_object->vo_shadow_offset;
                        new_object = cur_object->shadow;
 
                        /*
@@ -2896,6 +4570,28 @@ handle_copy_delay:
        if (real_map != map)
                vm_map_unlock(real_map);
 
+       if (__improbable(object == compressor_object ||
+               object == kernel_object ||
+               object == vm_submap_object)) {
+               /*
+                * These objects are explicitly managed and populated by the
+                * kernel.  The virtual ranges backed by these objects should
+                * either have wired pages or "holes" that are not supposed to
+                * be accessed at all until they get explicitly populated.
+                * We should never have to resolve a fault on a mapping backed
+                * by one of these VM objects and providing a zero-filled page
+                * would be wrong here, so let's fail the fault and let the
+                * caller crash or recover.
+                */
+               vm_object_unlock(object);
+               kr = KERN_MEMORY_ERROR;
+               goto done;
+       }
+
+       assert(object != compressor_object);
+       assert(object != kernel_object);
+       assert(object != vm_submap_object);
+
        /*
         * Make a reference to this object to
         * prevent its disposal while we are messing with
@@ -2910,8 +4606,10 @@ handle_copy_delay:
 
        error_code = 0;
 
+       result_page = VM_PAGE_NULL;
        kr = vm_fault_page(object, offset, fault_type,
                           (change_wiring && !wired),
+                          FALSE, /* page not looked up */
                           &prot, &result_page, &top_page,
                           &type_of_fault,
                           &error_code, map->no_zero_fill,
@@ -2925,14 +4623,14 @@ handle_copy_delay:
         * if kr == VM_FAULT_SUCCESS, then the paging reference
         * is still held along with the ref_count on the original object
         *
-        *      if m != NULL, then the object it belongs to 
-        *      is returned locked with a paging reference
+        *      the object is returned locked with a paging reference
         *
-        *      if top_page != NULL, then it's BUSY and the 
+        *      if top_page != NULL, then it's BUSY and the
         *      object it belongs to has a paging reference
         *      but is returned unlocked
         */
-       if (kr != VM_FAULT_SUCCESS) {
+       if (kr != VM_FAULT_SUCCESS &&
+           kr != VM_FAULT_SUCCESS_NO_VM_PAGE) {
                /*
                 * we didn't succeed, lose the object reference immediately.
                 */
@@ -2943,7 +4641,7 @@ handle_copy_delay:
                 */
                switch (kr) {
                case VM_FAULT_MEMORY_SHORTAGE:
-                       if (vm_page_wait((change_wiring) ? 
+                       if (vm_page_wait((change_wiring) ?
                                         THREAD_UNINT :
                                         THREAD_ABORTSAFE))
                                goto RetryFault;
@@ -2961,14 +4659,19 @@ handle_copy_delay:
                        else
                                kr = KERN_MEMORY_ERROR;
                        goto done;
+               default:
+                       panic("vm_fault: unexpected error 0x%x from "
+                             "vm_fault_page()\n", kr);
                }
        }
        m = result_page;
+       m_object = NULL;
 
        if (m != VM_PAGE_NULL) {
+               m_object = VM_PAGE_OBJECT(m);
                assert((change_wiring && !wired) ?
-                   (top_page == VM_PAGE_NULL) :
-                   ((top_page == VM_PAGE_NULL) == (m->object == object)));
+                      (top_page == VM_PAGE_NULL) :
+                      ((top_page == VM_PAGE_NULL) == (m_object == object)));
        }
 
        /*
@@ -2978,26 +4681,55 @@ handle_copy_delay:
 #define RELEASE_PAGE(m)                                        \
        MACRO_BEGIN                                     \
        PAGE_WAKEUP_DONE(m);                            \
-       vm_page_lockspin_queues();                      \
-       if (!m->active && !m->inactive && !m->throttled)\
-               vm_page_activate(m);                    \
-       vm_page_unlock_queues();                        \
+       if ( !VM_PAGE_PAGEABLE(m)) {                    \
+               vm_page_lockspin_queues();              \
+               if ( !VM_PAGE_PAGEABLE(m))              \
+                       vm_page_activate(m);            \
+               vm_page_unlock_queues();                \
+       }                                               \
        MACRO_END
 
+
+       object_locks_dropped = FALSE;
        /*
         * We must verify that the maps have not changed
-        * since our last lookup.
+        * since our last lookup. vm_map_verify() needs the
+        * map lock (shared) but we are holding object locks.
+        * So we do a try_lock() first and, if that fails, we
+        * drop the object locks and go in for the map lock again.
         */
-       if (m != VM_PAGE_NULL) {
-               old_copy_object = m->object->copy;
-               vm_object_unlock(m->object);
-       } else
-               old_copy_object = VM_OBJECT_NULL;
+       if (!vm_map_try_lock_read(original_map)) {
+
+               if (m != VM_PAGE_NULL) {
+                       old_copy_object = m_object->copy;
+                       vm_object_unlock(m_object);
+               } else {
+                       old_copy_object = VM_OBJECT_NULL;
+                       vm_object_unlock(object);
+               }
+
+               object_locks_dropped = TRUE;
+
+               vm_map_lock_read(original_map);
+       }
 
-       /*
-        * no object locks are held at this point
-        */
        if ((map != original_map) || !vm_map_verify(map, &version)) {
+
+               if (object_locks_dropped == FALSE) {
+                       if (m != VM_PAGE_NULL) {
+                               old_copy_object = m_object->copy;
+                               vm_object_unlock(m_object);
+                       } else {
+                               old_copy_object = VM_OBJECT_NULL;
+                               vm_object_unlock(object);
+                       }
+               
+                       object_locks_dropped = TRUE;
+               }
+
+               /*
+                * no object locks are held at this point
+                */
                vm_object_t             retry_object;
                vm_object_offset_t      retry_offset;
                vm_prot_t               retry_prot;
@@ -3012,7 +4744,6 @@ handle_copy_delay:
                 * take another fault.
                 */
                map = original_map;
-               vm_map_lock_read(map);
 
                kr = vm_map_lookup_locked(&map, vaddr,
                                          fault_type & ~VM_PROT_WRITE,
@@ -3027,17 +4758,19 @@ handle_copy_delay:
                        vm_map_unlock_read(map);
 
                        if (m != VM_PAGE_NULL) {
+                               assert(VM_PAGE_OBJECT(m) == m_object);
+
                                /*
                                 * retake the lock so that
                                 * we can drop the paging reference
                                 * in vm_fault_cleanup and do the
                                 * PAGE_WAKEUP_DONE in RELEASE_PAGE
                                 */
-                               vm_object_lock(m->object);
+                               vm_object_lock(m_object);
 
                                RELEASE_PAGE(m);
 
-                               vm_fault_cleanup(m->object, top_page);
+                               vm_fault_cleanup(m_object, top_page);
                        } else {
                                /*
                                 * retake the lock so that
@@ -3061,17 +4794,19 @@ handle_copy_delay:
                                vm_map_unlock(real_map);
 
                        if (m != VM_PAGE_NULL) {
+                               assert(VM_PAGE_OBJECT(m) == m_object);
+
                                /*
                                 * retake the lock so that
                                 * we can drop the paging reference
                                 * in vm_fault_cleanup and do the
                                 * PAGE_WAKEUP_DONE in RELEASE_PAGE
                                 */
-                               vm_object_lock(m->object);
+                               vm_object_lock(m_object);
 
                                RELEASE_PAGE(m);
 
-                               vm_fault_cleanup(m->object, top_page);
+                               vm_fault_cleanup(m_object, top_page);
                        } else {
                                /*
                                 * retake the lock so that
@@ -3090,20 +4825,31 @@ handle_copy_delay:
                 * Check whether the protection has changed or the object
                 * has been copied while we left the map unlocked.
                 */
-               prot &= retry_prot;
+               if (pmap_has_prot_policy(retry_prot)) {
+                       /* If the pmap layer cares, pass the full set. */
+                       prot = retry_prot;
+               } else {
+                       prot &= retry_prot;
+               }
        }
-       if (m != VM_PAGE_NULL) {
-               vm_object_lock(m->object);
 
-               if (m->object->copy != old_copy_object) {
-                       /*
-                        * The copy object changed while the top-level object
-                        * was unlocked, so take away write permission.
-                        */
-                       prot &= ~VM_PROT_WRITE;
-               }
-       } else
-               vm_object_lock(object);
+       if (object_locks_dropped == TRUE) {
+               if (m != VM_PAGE_NULL) {
+                       vm_object_lock(m_object);
+
+                       if (m_object->copy != old_copy_object) {
+                               /*
+                                * The copy object changed while the top-level object
+                                * was unlocked, so take away write permission.
+                                */
+                               assert(!pmap_has_prot_policy(prot));
+                               prot &= ~VM_PROT_WRITE;
+                       }
+               } else
+                       vm_object_lock(object);
+
+               object_locks_dropped = FALSE;
+       }
 
        /*
         * If we want to wire down this page, but no longer have
@@ -3111,14 +4857,16 @@ handle_copy_delay:
         */
        if (wired && (fault_type != (prot | VM_PROT_WRITE))) {
 
-               vm_map_verify_done(map, &version);
+               vm_map_unlock_read(map);
                if (real_map != map)
                        vm_map_unlock(real_map);
 
                if (m != VM_PAGE_NULL) {
+                       assert(VM_PAGE_OBJECT(m) == m_object);
+
                        RELEASE_PAGE(m);
 
-                       vm_fault_cleanup(m->object, top_page);
+                       vm_fault_cleanup(m_object, top_page);
                } else
                        vm_fault_cleanup(object, top_page);
 
@@ -3139,39 +4887,77 @@ handle_copy_delay:
                                            caller_pmap,
                                            caller_pmap_addr,
                                            prot,
+                                           caller_prot,
                                            wired,
                                            change_wiring,
+                                           wire_tag,
                                            fault_info.no_cache,
+                                           fault_info.cs_bypass,
+                                           fault_info.user_tag,
+                                           fault_info.pmap_options,
+                                           NULL,
                                            &type_of_fault);
                } else {
                        kr = vm_fault_enter(m,
                                            pmap,
                                            vaddr,
                                            prot,
+                                           caller_prot,
                                            wired,
                                            change_wiring,
+                                           wire_tag,
                                            fault_info.no_cache,
+                                           fault_info.cs_bypass,
+                                           fault_info.user_tag,
+                                           fault_info.pmap_options,
+                                           NULL,
                                            &type_of_fault);
                }
+               assert(VM_PAGE_OBJECT(m) == m_object);
+
+#if DEVELOPMENT || DEBUG
+       {
+               int     event_code = 0;
+
+               if (m_object->internal)
+                       event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL));
+               else if (m_object->object_slid)
+                       event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE));
+               else
+                       event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
+
+               KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->offset, get_current_unique_pid(), 0);
+
+               DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag);
+               }
+#endif
                if (kr != KERN_SUCCESS) {
                        /* abort this page fault */
-                       vm_map_verify_done(map, &version);
+                       vm_map_unlock_read(map);
                        if (real_map != map)
                                vm_map_unlock(real_map);
                        PAGE_WAKEUP_DONE(m);
-                       vm_fault_cleanup(m->object, top_page);
+                       vm_fault_cleanup(m_object, top_page);
                        vm_object_deallocate(object);
                        goto done;
                }
+               if (physpage_p != NULL) {
+                       /* for vm_map_wire_and_extract() */
+                       *physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
+                       if (prot & VM_PROT_WRITE) {
+                               vm_object_lock_assert_exclusive(m_object);
+                               m->dirty = TRUE;
+                       }
+               }
        } else {
 
                vm_map_entry_t          entry;
                vm_map_offset_t         laddr;
                vm_map_offset_t         ldelta, hdelta;
 
-               /* 
+               /*
                 * do a pmap block mapping from the physical address
-                * in the object 
+                * in the object
                 */
 
 #ifdef ppc
@@ -3181,9 +4967,9 @@ handle_copy_delay:
                /* to execute, we return with a protection failure.      */
 
                if ((fault_type & VM_PROT_EXECUTE) &&
-                       (!pmap_eligible_for_execute((ppnum_t)(object->shadow_offset >> 12)))) {
+                       (!pmap_eligible_for_execute((ppnum_t)(object->vo_shadow_offset >> 12)))) {
 
-                       vm_map_verify_done(map, &version);
+                       vm_map_unlock_read(map);
 
                        if (real_map != map)
                                vm_map_unlock(real_map);
@@ -3216,63 +5002,105 @@ handle_copy_delay:
                        if (hdelta > (entry->vme_end - laddr))
                                hdelta = entry->vme_end - laddr;
                        if (entry->is_sub_map) {
-                               
-                               laddr = (laddr - entry->vme_start) 
-                                                       + entry->offset;
-                               vm_map_lock_read(entry->object.sub_map);
+
+                               laddr = ((laddr - entry->vme_start)
+                                        + VME_OFFSET(entry));
+                               vm_map_lock_read(VME_SUBMAP(entry));
 
                                if (map != real_map)
                                        vm_map_unlock_read(map);
                                if (entry->use_pmap) {
                                        vm_map_unlock_read(real_map);
-                                       real_map = entry->object.sub_map;
+                                       real_map = VME_SUBMAP(entry);
                                }
-                               map = entry->object.sub_map;
-                               
+                               map = VME_SUBMAP(entry);
+
                        } else {
                                break;
                        }
                }
 
-               if (vm_map_lookup_entry(map, laddr, &entry) && 
-                                       (entry->object.vm_object != NULL) &&
-                                       (entry->object.vm_object == object)) {
+               if (vm_map_lookup_entry(map, laddr, &entry) &&
+                   (VME_OBJECT(entry) != NULL) &&
+                   (VME_OBJECT(entry) == object)) {
+                       int superpage;
+
+                       if (!object->pager_created &&
+                           object->phys_contiguous &&
+                           VME_OFFSET(entry) == 0 &&
+                           (entry->vme_end - entry->vme_start == object->vo_size) &&
+                           VM_MAP_PAGE_ALIGNED(entry->vme_start, (object->vo_size-1))) {
+                               superpage = VM_MEM_SUPERPAGE;
+                       } else {
+                               superpage = 0;
+                       }
+
+                       if (superpage && physpage_p) {
+                               /* for vm_map_wire_and_extract() */
+                               *physpage_p = (ppnum_t)
+                                       ((((vm_map_offset_t)
+                                          object->vo_shadow_offset)
+                                         + VME_OFFSET(entry)
+                                         + (laddr - entry->vme_start))
+                                        >> PAGE_SHIFT);
+                       }
 
                        if (caller_pmap) {
                                /*
                                 * Set up a block mapped area
                                 */
-                               pmap_map_block(caller_pmap, 
-                                              (addr64_t)(caller_pmap_addr - ldelta), 
-                                              (((vm_map_offset_t) (entry->object.vm_object->shadow_offset)) +
-                                               entry->offset + (laddr - entry->vme_start) - ldelta) >> 12,
-                                              ((ldelta + hdelta) >> 12), prot, 
-                                              (VM_WIMG_MASK & (int)object->wimg_bits), 0);
-                       } else { 
+                               assert((uint32_t)((ldelta + hdelta) >> PAGE_SHIFT) == ((ldelta + hdelta) >> PAGE_SHIFT));
+                               kr = pmap_map_block(caller_pmap,
+                                                   (addr64_t)(caller_pmap_addr - ldelta),
+                                                   (ppnum_t)((((vm_map_offset_t) (VME_OBJECT(entry)->vo_shadow_offset)) +
+                                                              VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta) >> PAGE_SHIFT),
+                                                   (uint32_t)((ldelta + hdelta) >> PAGE_SHIFT), prot,
+                                                   (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0);
+
+                               if (kr != KERN_SUCCESS) {
+                                       goto cleanup;
+                               }
+                       } else {
                                /*
                                 * Set up a block mapped area
                                 */
-                               pmap_map_block(real_map->pmap, 
-                                              (addr64_t)(vaddr - ldelta), 
-                                              (((vm_map_offset_t)(entry->object.vm_object->shadow_offset)) +
-                                               entry->offset + (laddr - entry->vme_start) - ldelta) >> 12,
-                                              ((ldelta + hdelta) >> 12), prot, 
-                                              (VM_WIMG_MASK & (int)object->wimg_bits), 0);
+                               assert((uint32_t)((ldelta + hdelta) >> PAGE_SHIFT) == ((ldelta + hdelta) >> PAGE_SHIFT));
+                               kr = pmap_map_block(real_map->pmap,
+                                                   (addr64_t)(vaddr - ldelta),
+                                                   (ppnum_t)((((vm_map_offset_t)(VME_OBJECT(entry)->vo_shadow_offset)) +
+                                                              VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta) >> PAGE_SHIFT),
+                                                   (uint32_t)((ldelta + hdelta) >> PAGE_SHIFT), prot,
+                                                   (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0);
+
+                               if (kr != KERN_SUCCESS) {
+                                       goto cleanup;
+                               }
                        }
                }
        }
 
+       /*
+        * Success
+        */
+       kr = KERN_SUCCESS;
+
+       /*
+        * TODO: could most of the done cases just use cleanup?
+        */
+cleanup:
        /*
         * Unlock everything, and return
         */
-       vm_map_verify_done(map, &version);
+       vm_map_unlock_read(map);
        if (real_map != map)
                vm_map_unlock(real_map);
 
        if (m != VM_PAGE_NULL) {
+               assert(VM_PAGE_OBJECT(m) == m_object);
+
                PAGE_WAKEUP_DONE(m);
 
-               vm_fault_cleanup(m->object, top_page);
+               vm_fault_cleanup(m_object, top_page);
        } else
                vm_fault_cleanup(object, top_page);
 
@@ -3280,13 +5108,35 @@ handle_copy_delay:
 
 #undef RELEASE_PAGE
 
-       kr = KERN_SUCCESS;
 done:
        thread_interrupt_level(interruptible_state);
 
-       KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
-                             (int)((uint64_t)vaddr >> 32),
-                             (int)vaddr,
+       /*
+        * Only I/O throttle on faults which cause a pagein/swapin.
+        */
+       if ((type_of_fault == DBG_PAGEIND_FAULT) || (type_of_fault == DBG_PAGEINV_FAULT) || (type_of_fault == DBG_COMPRESSOR_SWAPIN_FAULT)) {
+               throttle_lowpri_io(1);
+       } else {
+               if (kr == KERN_SUCCESS && type_of_fault != DBG_CACHE_HIT_FAULT && type_of_fault != DBG_GUARD_FAULT) {
+
+                       if ((throttle_delay = vm_page_throttled(TRUE))) {
+
+                               if (vm_debug_events) {
+                                       if (type_of_fault == DBG_COMPRESSOR_FAULT)
+                                               VM_DEBUG_EVENT(vmf_compressordelay, VMF_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
+                                       else if (type_of_fault == DBG_COW_FAULT)
+                                               VM_DEBUG_EVENT(vmf_cowdelay, VMF_COWDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
+                                       else
+                                               VM_DEBUG_EVENT(vmf_zfdelay, VMF_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
+                               }
+                               delay(throttle_delay);
+                       }
+               }
+       }
+       KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+                             (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
+                             ((uint64_t)trace_vaddr >> 32),
+                             trace_vaddr,
                              kr,
                              type_of_fault,
                              0);
@@ -3303,19 +5153,21 @@ kern_return_t
 vm_fault_wire(
        vm_map_t        map,
        vm_map_entry_t  entry,
+       vm_prot_t       prot,
+       vm_tag_t        wire_tag,
        pmap_t          pmap,
-       vm_map_offset_t pmap_addr)
+       vm_map_offset_t pmap_addr,
+       ppnum_t         *physpage_p)
 {
-
-       register vm_map_offset_t        va;
-       register vm_map_offset_t        end_addr = entry->vme_end;
-       register kern_return_t  rc;
+       vm_map_offset_t va;
+       vm_map_offset_t end_addr = entry->vme_end;
+       kern_return_t   rc;
 
        assert(entry->in_transition);
 
-       if ((entry->object.vm_object != NULL) && 
-                       !entry->is_sub_map && 
-                       entry->object.vm_object->phys_contiguous) {
+       if ((VME_OBJECT(entry) != NULL) &&
+           !entry->is_sub_map &&
+           VME_OBJECT(entry)->phys_contiguous) {
                return KERN_SUCCESS;
        }
 
@@ -3325,7 +5177,7 @@ vm_fault_wire(
         *      page tables and such can be locked down as well.
         */
 
-       pmap_pageable(pmap, pmap_addr, 
+       pmap_pageable(pmap, pmap_addr,
                pmap_addr + (end_addr - entry->vme_start), FALSE);
 
        /*
@@ -3334,14 +5186,18 @@ vm_fault_wire(
         */
 
        for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) {
-               if ((rc = vm_fault_wire_fast(
-                       map, va, entry, pmap, 
-                       pmap_addr + (va - entry->vme_start)
-                       )) != KERN_SUCCESS) {
-                       rc = vm_fault(map, va, VM_PROT_NONE, TRUE, 
-                               (pmap == kernel_pmap) ? 
-                                       THREAD_UNINT : THREAD_ABORTSAFE, 
-                               pmap, pmap_addr + (va - entry->vme_start));
+               rc = vm_fault_wire_fast(map, va, prot, wire_tag, entry, pmap,
+                                       pmap_addr + (va - entry->vme_start),
+                                       physpage_p);
+               if (rc != KERN_SUCCESS) {
+                       rc = vm_fault_internal(map, va, prot, TRUE, wire_tag,
+                                              ((pmap == kernel_pmap)
+                                               ? THREAD_UNINT
+                                               : THREAD_ABORTSAFE),
+                                              pmap,
+                                              (pmap_addr +
+                                               (va - entry->vme_start)),
+                                              physpage_p);
                        DTRACE_VM2(softlock, int, 1, (uint64_t *), NULL);
                }
 
@@ -3350,7 +5206,7 @@ vm_fault_wire(
 
                        /* unwire wired pages */
                        tmp_entry.vme_end = va;
-                       vm_fault_unwire(map, 
+                       vm_fault_unwire(map,
                                &tmp_entry, FALSE, pmap, pmap_addr);
 
                        return rc;
@@ -3372,13 +5228,13 @@ vm_fault_unwire(
        pmap_t          pmap,
        vm_map_offset_t pmap_addr)
 {
-       register vm_map_offset_t        va;
-       register vm_map_offset_t        end_addr = entry->vme_end;
+       vm_map_offset_t va;
+       vm_map_offset_t end_addr = entry->vme_end;
        vm_object_t             object;
        struct vm_object_fault_info fault_info;
+       unsigned int    unwired_pages;
 
-       object = (entry->is_sub_map)
-                       ? VM_OBJECT_NULL : entry->object.vm_object;
+       object = (entry->is_sub_map) ? VM_OBJECT_NULL : VME_OBJECT(entry);
 
        /*
         * If it's marked phys_contiguous, then vm_fault_wire() didn't actually
@@ -3391,10 +5247,22 @@ vm_fault_unwire(
 
        fault_info.interruptible = THREAD_UNINT;
        fault_info.behavior = entry->behavior;
-       fault_info.user_tag = entry->alias;
-       fault_info.lo_offset = entry->offset;
-       fault_info.hi_offset = (entry->vme_end - entry->vme_start) + entry->offset;
+       fault_info.user_tag = VME_ALIAS(entry);
+       fault_info.pmap_options = 0;
+       if (entry->iokit_acct ||
+           (!entry->is_sub_map && !entry->use_pmap)) {
+               fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
+       }
+       fault_info.lo_offset = VME_OFFSET(entry);
+       fault_info.hi_offset = (entry->vme_end - entry->vme_start) + VME_OFFSET(entry);
        fault_info.no_cache = entry->no_cache;
+       fault_info.stealth = TRUE;
+       fault_info.io_sync = FALSE;
+       fault_info.cs_bypass = FALSE;
+       fault_info.mark_zf_absent = FALSE;
+       fault_info.batch_pmap_op = FALSE;
+
+       unwired_pages = 0;
 
        /*
         *      Since the pages are wired down, we must be able to
@@ -3403,13 +5271,13 @@ vm_fault_unwire(
 
        for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) {
 
-               if (pmap) {
-                       pmap_change_wiring(pmap, 
-                                          pmap_addr + (va - entry->vme_start), FALSE);
-               }
                if (object == VM_OBJECT_NULL) {
-                       (void) vm_fault(map, va, VM_PROT_NONE, 
-                                       TRUE, THREAD_UNINT, pmap, pmap_addr);
+                       if (pmap) {
+                               pmap_change_wiring(pmap,
+                                                  pmap_addr + (va - entry->vme_start), FALSE);
+                       }
+                       (void) vm_fault(map, va, VM_PROT_NONE,
+                                       TRUE, VM_KERN_MEMORY_NONE, THREAD_UNINT, pmap, pmap_addr);
                } else {
                        vm_prot_t       prot;
                        vm_page_t       result_page;
@@ -3417,7 +5285,13 @@ vm_fault_unwire(
                        vm_object_t     result_object;
                        vm_fault_return_t result;
 
-                       fault_info.cluster_size = end_addr - va;
+                       if (end_addr - va > (vm_size_t) -1) {
+                               /* 32-bit overflow */
+                               fault_info.cluster_size = (vm_size_t) (0 - PAGE_SIZE);
+                       } else {
+                               fault_info.cluster_size = (vm_size_t) (end_addr - va);
+                               assert(fault_info.cluster_size == end_addr - va);
+                       }
 
                        do {
                                prot = VM_PROT_NONE;
@@ -3427,13 +5301,16 @@ vm_fault_unwire(
                                XPR(XPR_VM_FAULT,
                                        "vm_fault_unwire -> vm_fault_page\n",
                                        0,0,0,0,0);
+                               result_page = VM_PAGE_NULL;
                                result = vm_fault_page(
                                        object,
-                                       entry->offset + (va - entry->vme_start),
+                                       (VME_OFFSET(entry) +
+                                        (va - entry->vme_start)),
                                        VM_PROT_NONE, TRUE,
+                                       FALSE, /* page not looked up */
                                        &prot, &result_page, &top_page,
                                        (int *)0,
-                                       NULL, map->no_zero_fill, 
+                                       NULL, map->no_zero_fill,
                                        FALSE, &fault_info);
                        } while (result == VM_FAULT_RETRY);
 
@@ -3443,27 +5320,55 @@ vm_fault_unwire(
                         * move on to the next one in case the remaining pages are mapped from
                         * different objects.  During a forced unmount, the object is terminated
                         * so the alive flag will be false if this happens.  A forced unmount will
-                        * will occur when an external disk is unplugged before the user does an 
+                        * will occur when an external disk is unplugged before the user does an
                         * eject, so we don't want to panic in that situation.
                         */
 
                        if (result == VM_FAULT_MEMORY_ERROR && !object->alive)
                                continue;
 
+                       if (result == VM_FAULT_MEMORY_ERROR &&
+                           object == kernel_object) {
+                               /*
+                                * This must have been allocated with
+                                * KMA_KOBJECT and KMA_VAONLY and there's
+                                * no physical page at this offset.
+                                * We're done (no page to free).
+                                */
+                               assert(deallocate);
+                               continue;
+                       }
+
                        if (result != VM_FAULT_SUCCESS)
                                panic("vm_fault_unwire: failure");
 
-                       result_object = result_page->object;
+                       result_object = VM_PAGE_OBJECT(result_page);
 
                        if (deallocate) {
-                               assert(result_page->phys_page !=
+                               assert(VM_PAGE_GET_PHYS_PAGE(result_page) !=
                                       vm_page_fictitious_addr);
-                               pmap_disconnect(result_page->phys_page);
+                               pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(result_page));
+                               if (VM_PAGE_WIRED(result_page)) {
+                                       unwired_pages++;
+                               }
                                VM_PAGE_FREE(result_page);
                        } else {
-                               vm_page_lockspin_queues();
-                               vm_page_unwire(result_page);
-                               vm_page_unlock_queues();
+                               if ((pmap) && (VM_PAGE_GET_PHYS_PAGE(result_page) != vm_page_guard_addr))
+                                       pmap_change_wiring(pmap,
+                                           pmap_addr + (va - entry->vme_start), FALSE);
+
+
+                               if (VM_PAGE_WIRED(result_page)) {
+                                       vm_page_lockspin_queues();
+                                       vm_page_unwire(result_page, TRUE);
+                                       vm_page_unlock_queues();
+                                       unwired_pages++;
+                               }
+                               if(entry->zero_wired_pages) {
+                                       pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(result_page));
+                                       entry->zero_wired_pages = FALSE;
+                               }
+
                                PAGE_WAKEUP_DONE(result_page);
                        }
                        vm_fault_cleanup(result_object, top_page);
@@ -3476,9 +5381,12 @@ vm_fault_unwire(
         *      such may be unwired themselves.
         */
 
-       pmap_pageable(pmap, pmap_addr, 
+       pmap_pageable(pmap, pmap_addr,
                pmap_addr + (end_addr - entry->vme_start), TRUE);
 
+       if (kernel_object == object) {
+           vm_tag_update_size(fault_info.user_tag, -ptoa_64(unwired_pages));
+       }
 }
 
 /*
@@ -3501,17 +5409,20 @@ vm_fault_unwire(
  *     other than the common case will return KERN_FAILURE, and the caller
  *     is expected to call vm_fault().
  */
-kern_return_t
+static kern_return_t
 vm_fault_wire_fast(
        __unused vm_map_t       map,
        vm_map_offset_t va,
+       __unused vm_prot_t       caller_prot,
+       vm_tag_t        wire_tag,
        vm_map_entry_t  entry,
-       pmap_t                  pmap,
-       vm_map_offset_t pmap_addr)
+       pmap_t          pmap,
+       vm_map_offset_t pmap_addr,
+       ppnum_t         *physpage_p)
 {
        vm_object_t             object;
        vm_object_offset_t      offset;
-       register vm_page_t      m;
+       vm_page_t               m;
        vm_prot_t               prot;
        thread_t                thread = current_thread();
        int                     type_of_fault;
@@ -3530,7 +5441,7 @@ vm_fault_wire_fast(
 #define RELEASE_PAGE(m)        {                               \
        PAGE_WAKEUP_DONE(m);                            \
        vm_page_lockspin_queues();                      \
-       vm_page_unwire(m);                              \
+       vm_page_unwire(m, TRUE);                        \
        vm_page_unlock_queues();                        \
 }
 
@@ -3559,15 +5470,17 @@ vm_fault_wire_fast(
        /*
         *      If this entry is not directly to a vm_object, bail out.
         */
-       if (entry->is_sub_map)
+       if (entry->is_sub_map) {
+               assert(physpage_p == NULL);
                return(KERN_FAILURE);
+       }
 
        /*
         *      Find the backing store object and offset into it.
         */
 
-       object = entry->object.vm_object;
-       offset = (va - entry->vme_start) + entry->offset;
+       object = VME_OBJECT(entry);
+       offset = (va - entry->vme_start) + VME_OFFSET(entry);
        prot = entry->protection;
 
        /*
@@ -3596,19 +5509,15 @@ vm_fault_wire_fast(
        /*
         *      Look for page in top-level object.  If it's not there or
         *      there's something going on, give up.
-        * ENCRYPTED SWAP: use the slow fault path, since we'll need to
-        * decrypt the page before wiring it down.
         */
        m = vm_page_lookup(object, offset);
-       if ((m == VM_PAGE_NULL) || (m->busy) || (m->encrypted) ||
+       if ((m == VM_PAGE_NULL) || (m->busy) ||
            (m->unusual && ( m->error || m->restart || m->absent))) {
 
                GIVE_UP;
        }
-       ASSERT_PAGE_DECRYPTED(m);
-
        if (m->fictitious &&
-           m->phys_page == vm_page_guard_addr) {
+           VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
                /*
                 * Guard pages are fictitious pages and are never
                 * entered into a pmap, so let's say it's been wired...
@@ -3619,11 +5528,11 @@ vm_fault_wire_fast(
 
        /*
         *      Wire the page down now.  All bail outs beyond this
-        *      point must unwire the page.  
+        *      point must unwire the page.
         */
 
        vm_page_lockspin_queues();
-       vm_page_wire(m);
+       vm_page_wire(m, wire_tag, TRUE);
        vm_page_unlock_queues();
 
        /*
@@ -3649,16 +5558,43 @@ vm_fault_wire_fast(
                            pmap,
                            pmap_addr,
                            prot,
-                           TRUE,
-                           FALSE,
-                           FALSE,
+                           prot,
+                           TRUE,  /* wired */
+                           FALSE, /* change_wiring */
+                           wire_tag,
+                           FALSE, /* no_cache */
+                           FALSE, /* cs_bypass */
+                           VME_ALIAS(entry),
+                           ((entry->iokit_acct ||
+                             (!entry->is_sub_map && !entry->use_pmap))
+                            ? PMAP_OPTIONS_ALT_ACCT
+                            : 0),
+                           NULL,
                            &type_of_fault);
+       if (kr != KERN_SUCCESS) {
+               RELEASE_PAGE(m);
+               GIVE_UP;
+       }
 
 done:
        /*
         *      Unlock everything, and return
         */
 
+       if (physpage_p) {
+               /* for vm_map_wire_and_extract() */
+               if (kr == KERN_SUCCESS) {
+                       assert(object == VM_PAGE_OBJECT(m));
+                       *physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
+                       if (prot & VM_PROT_WRITE) {
+                               vm_object_lock_assert_exclusive(object);
+                               m->dirty = TRUE;
+                       }
+               } else {
+                       *physpage_p = 0;
+               }
+       }
+
        PAGE_WAKEUP_DONE(m);
        UNLOCK_AND_DEALLOCATE;
 
@@ -3672,35 +5608,38 @@ done:
  *             Release a page used by vm_fault_copy.
  */
 
-void
+static void
 vm_fault_copy_cleanup(
        vm_page_t       page,
        vm_page_t       top_page)
 {
-       vm_object_t     object = page->object;
+       vm_object_t     object = VM_PAGE_OBJECT(page);
 
        vm_object_lock(object);
        PAGE_WAKEUP_DONE(page);
-       vm_page_lockspin_queues();
-       if (!page->active && !page->inactive && !page->throttled)
-               vm_page_activate(page);
-       vm_page_unlock_queues();
+       if ( !VM_PAGE_PAGEABLE(page)) {
+               vm_page_lockspin_queues();
+               if ( !VM_PAGE_PAGEABLE(page)) {
+                       vm_page_activate(page);
+               }
+               vm_page_unlock_queues();
+       }
        vm_fault_cleanup(object, top_page);
 }
 
-void
+static void
 vm_fault_copy_dst_cleanup(
        vm_page_t       page)
 {
        vm_object_t     object;
 
        if (page != VM_PAGE_NULL) {
-               object = page->object;
+               object = VM_PAGE_OBJECT(page);
                vm_object_lock(object);
                vm_page_lockspin_queues();
-               vm_page_unwire(page);
+               vm_page_unwire(page, TRUE);
                vm_page_unlock_queues();
-               vm_object_paging_end(object);   
+               vm_object_paging_end(object);
                vm_object_unlock(object);
        }
 }
@@ -3744,7 +5683,7 @@ vm_fault_copy(
        int                     interruptible)
 {
        vm_page_t               result_page;
-       
+
        vm_page_t               src_page;
        vm_page_t               src_top_page;
        vm_prot_t               src_prot;
@@ -3755,7 +5694,9 @@ vm_fault_copy(
 
        vm_map_size_t           amount_left;
        vm_object_t             old_copy_object;
+       vm_object_t             result_page_object = NULL;
        kern_return_t           error = 0;
+       vm_fault_return_t       result;
 
        vm_map_size_t           part_size;
        struct vm_object_fault_info fault_info_src;
@@ -3777,16 +5718,28 @@ vm_fault_copy(
        fault_info_src.interruptible = interruptible;
        fault_info_src.behavior = VM_BEHAVIOR_SEQUENTIAL;
        fault_info_src.user_tag  = 0;
+       fault_info_src.pmap_options = 0;
        fault_info_src.lo_offset = vm_object_trunc_page(src_offset);
        fault_info_src.hi_offset = fault_info_src.lo_offset + amount_left;
        fault_info_src.no_cache   = FALSE;
+       fault_info_src.stealth = TRUE;
+       fault_info_src.io_sync = FALSE;
+       fault_info_src.cs_bypass = FALSE;
+       fault_info_src.mark_zf_absent = FALSE;
+       fault_info_src.batch_pmap_op = FALSE;
 
        fault_info_dst.interruptible = interruptible;
        fault_info_dst.behavior = VM_BEHAVIOR_SEQUENTIAL;
        fault_info_dst.user_tag  = 0;
+       fault_info_dst.pmap_options = 0;
        fault_info_dst.lo_offset = vm_object_trunc_page(dst_offset);
        fault_info_dst.hi_offset = fault_info_dst.lo_offset + amount_left;
        fault_info_dst.no_cache   = FALSE;
+       fault_info_dst.stealth = TRUE;
+       fault_info_dst.io_sync = FALSE;
+       fault_info_dst.cs_bypass = FALSE;
+       fault_info_dst.mark_zf_absent = FALSE;
+       fault_info_dst.batch_pmap_op = FALSE;
 
        do { /* while (amount_left > 0) */
                /*
@@ -3803,18 +5756,27 @@ vm_fault_copy(
                vm_object_lock(dst_object);
                vm_object_paging_begin(dst_object);
 
-               fault_info_dst.cluster_size = amount_left;
+               if (amount_left > (vm_size_t) -1) {
+                       /* 32-bit overflow */
+                       fault_info_dst.cluster_size = (vm_size_t) (0 - PAGE_SIZE);
+               } else {
+                       fault_info_dst.cluster_size = (vm_size_t) amount_left;
+                       assert(fault_info_dst.cluster_size == amount_left);
+               }
 
                XPR(XPR_VM_FAULT,"vm_fault_copy -> vm_fault_page\n",0,0,0,0,0);
-               switch (vm_fault_page(dst_object,
-                                     vm_object_trunc_page(dst_offset),
-                                     VM_PROT_WRITE|VM_PROT_READ,
-                                     FALSE,
-                                     &dst_prot, &dst_page, &dst_top_page,
-                                     (int *)0,
-                                     &error,
-                                     dst_map->no_zero_fill,
-                                     FALSE, &fault_info_dst)) {
+               dst_page = VM_PAGE_NULL;
+               result = vm_fault_page(dst_object,
+                                      vm_object_trunc_page(dst_offset),
+                                      VM_PROT_WRITE|VM_PROT_READ,
+                                      FALSE,
+                                      FALSE, /* page not looked up */
+                                      &dst_prot, &dst_page, &dst_top_page,
+                                      (int *)0,
+                                      &error,
+                                      dst_map->no_zero_fill,
+                                      FALSE, &fault_info_dst);
+               switch (result) {
                case VM_FAULT_SUCCESS:
                        break;
                case VM_FAULT_RETRY:
@@ -3825,15 +5787,24 @@ vm_fault_copy(
                        /* fall thru */
                case VM_FAULT_INTERRUPTED:
                        RETURN(MACH_SEND_INTERRUPTED);
+               case VM_FAULT_SUCCESS_NO_VM_PAGE:
+                       /* success but no VM page: fail the copy */
+                       vm_object_paging_end(dst_object);
+                       vm_object_unlock(dst_object);
+                       /*FALLTHROUGH*/
                case VM_FAULT_MEMORY_ERROR:
                        if (error)
                                return (error);
                        else
                                return(KERN_MEMORY_ERROR);
+               default:
+                       panic("vm_fault_copy: unexpected error 0x%x from "
+                             "vm_fault_page()\n", result);
                }
                assert ((dst_prot & VM_PROT_WRITE) != VM_PROT_NONE);
 
-               old_copy_object = dst_page->object->copy;
+               assert(dst_object == VM_PAGE_OBJECT(dst_page));
+               old_copy_object = dst_object->copy;
 
                /*
                 * There exists the possiblity that the source and
@@ -3842,16 +5813,16 @@ vm_fault_copy(
                 * same, the call to vm_fault_page() for the
                 * destination page will deadlock.  To prevent this we
                 * wire the page so we can drop busy without having
-                * the page daemon steal the page.  We clean up the 
+                * the page daemon steal the page.  We clean up the
                 * top page  but keep the paging reference on the object
                 * holding the dest page so it doesn't go away.
                 */
 
                vm_page_lockspin_queues();
-               vm_page_wire(dst_page);
+               vm_page_wire(dst_page, VM_KERN_MEMORY_OSFMK, TRUE);
                vm_page_unlock_queues();
                PAGE_WAKEUP_DONE(dst_page);
-               vm_object_unlock(dst_page->object);
+               vm_object_unlock(dst_object);
 
                if (dst_top_page != VM_PAGE_NULL) {
                        vm_object_lock(dst_object);
@@ -3880,20 +5851,29 @@ vm_fault_copy(
                                src_prot = VM_PROT_READ;
                                vm_object_paging_begin(src_object);
 
-                               fault_info_src.cluster_size = amount_left;
+                               if (amount_left > (vm_size_t) -1) {
+                                       /* 32-bit overflow */
+                                       fault_info_src.cluster_size = (vm_size_t) (0 - PAGE_SIZE);
+                               } else {
+                                       fault_info_src.cluster_size = (vm_size_t) amount_left;
+                                       assert(fault_info_src.cluster_size == amount_left);
+                               }
 
                                XPR(XPR_VM_FAULT,
                                        "vm_fault_copy(2) -> vm_fault_page\n",
                                        0,0,0,0,0);
-                               switch (vm_fault_page(
-                                               src_object, 
-                                               vm_object_trunc_page(src_offset),
-                                               VM_PROT_READ, FALSE,
-                                               &src_prot, 
-                                               &result_page, &src_top_page,
-                                               (int *)0, &error, FALSE,
-                                               FALSE, &fault_info_src)) {
-
+                               result_page = VM_PAGE_NULL;
+                               result = vm_fault_page(
+                                       src_object,
+                                       vm_object_trunc_page(src_offset),
+                                       VM_PROT_READ, FALSE,
+                                       FALSE, /* page not looked up */
+                                       &src_prot,
+                                       &result_page, &src_top_page,
+                                       (int *)0, &error, FALSE,
+                                       FALSE, &fault_info_src);
+
+                               switch (result) {
                                case VM_FAULT_SUCCESS:
                                        break;
                                case VM_FAULT_RETRY:
@@ -3905,40 +5885,53 @@ vm_fault_copy(
                                case VM_FAULT_INTERRUPTED:
                                        vm_fault_copy_dst_cleanup(dst_page);
                                        RETURN(MACH_SEND_INTERRUPTED);
+                               case VM_FAULT_SUCCESS_NO_VM_PAGE:
+                                       /* success but no VM page: fail */
+                                       vm_object_paging_end(src_object);
+                                       vm_object_unlock(src_object);
+                                       /*FALLTHROUGH*/
                                case VM_FAULT_MEMORY_ERROR:
                                        vm_fault_copy_dst_cleanup(dst_page);
                                        if (error)
                                                return (error);
                                        else
                                                return(KERN_MEMORY_ERROR);
+                               default:
+                                       panic("vm_fault_copy(2): unexpected "
+                                             "error 0x%x from "
+                                             "vm_fault_page()\n", result);
                                }
 
-
+                               result_page_object = VM_PAGE_OBJECT(result_page);
                                assert((src_top_page == VM_PAGE_NULL) ==
-                                      (result_page->object == src_object));
+                                      (result_page_object == src_object));
                        }
                        assert ((src_prot & VM_PROT_READ) != VM_PROT_NONE);
-                       vm_object_unlock(result_page->object);
+                       vm_object_unlock(result_page_object);
                }
 
+               vm_map_lock_read(dst_map);
+
                if (!vm_map_verify(dst_map, dst_version)) {
+                       vm_map_unlock_read(dst_map);
                        if (result_page != VM_PAGE_NULL && src_page != dst_page)
                                vm_fault_copy_cleanup(result_page, src_top_page);
                        vm_fault_copy_dst_cleanup(dst_page);
                        break;
                }
+               assert(dst_object == VM_PAGE_OBJECT(dst_page));
 
-               vm_object_lock(dst_page->object);
+               vm_object_lock(dst_object);
 
-               if (dst_page->object->copy != old_copy_object) {
-                       vm_object_unlock(dst_page->object);
-                       vm_map_verify_done(dst_map, dst_version);
+               if (dst_object->copy != old_copy_object) {
+                       vm_object_unlock(dst_object);
+                       vm_map_unlock_read(dst_map);
                        if (result_page != VM_PAGE_NULL && src_page != dst_page)
                                vm_fault_copy_cleanup(result_page, src_top_page);
                        vm_fault_copy_dst_cleanup(dst_page);
                        break;
                }
-               vm_object_unlock(dst_page->object);
+               vm_object_unlock(dst_object);
 
                /*
                 *      Copy the page, and note that it is dirty
@@ -3965,15 +5958,24 @@ vm_fault_copy(
                        }
 
                        if (result_page == VM_PAGE_NULL) {
+                               assert((vm_offset_t) dst_po == dst_po);
+                               assert((vm_size_t) part_size == part_size);
                                vm_page_part_zero_fill(dst_page,
-                                                       dst_po, part_size);
+                                                      (vm_offset_t) dst_po,
+                                                      (vm_size_t) part_size);
                        } else {
-                               vm_page_part_copy(result_page, src_po,
-                                       dst_page, dst_po, part_size);
+                               assert((vm_offset_t) src_po == src_po);
+                               assert((vm_offset_t) dst_po == dst_po);
+                               assert((vm_size_t) part_size == part_size);
+                               vm_page_part_copy(result_page,
+                                                 (vm_offset_t) src_po,
+                                                 dst_page,
+                                                 (vm_offset_t) dst_po,
+                                                 (vm_size_t)part_size);
                                if(!dst_page->dirty){
                                        vm_object_lock(dst_object);
-                                       dst_page->dirty = TRUE;
-                                       vm_object_unlock(dst_page->object);
+                                       SET_PAGE_DIRTY(dst_page, TRUE);
+                                       vm_object_unlock(dst_object);
                                }
 
                        }
@@ -3983,11 +5985,14 @@ vm_fault_copy(
                        if (result_page == VM_PAGE_NULL)
                                vm_page_zero_fill(dst_page);
                        else{
+                               vm_object_lock(result_page_object);
                                vm_page_copy(result_page, dst_page);
+                               vm_object_unlock(result_page_object);
+
                                if(!dst_page->dirty){
                                        vm_object_lock(dst_object);
-                                       dst_page->dirty = TRUE;
-                                       vm_object_unlock(dst_page->object);
+                                       SET_PAGE_DIRTY(dst_page, TRUE);
+                                       vm_object_unlock(dst_object);
                                }
                        }
 
@@ -3997,7 +6002,7 @@ vm_fault_copy(
                 *      Unlock everything, and return
                 */
 
-               vm_map_verify_done(dst_map, dst_version);
+               vm_map_unlock_read(dst_map);
 
                if (result_page != VM_PAGE_NULL && src_page != dst_page)
                        vm_fault_copy_cleanup(result_page, src_top_page);
@@ -4011,7 +6016,7 @@ vm_fault_copy(
        RETURN(KERN_SUCCESS);
 #undef RETURN
 
-       /*NOTREACHED*/  
+       /*NOTREACHED*/
 }
 
 #if    VM_FAULT_CLASSIFY
@@ -4044,7 +6049,7 @@ vm_fault_classify(vm_object_t             object,
 
        while (TRUE) {
                m = vm_page_lookup(object, offset);
-               if (m != VM_PAGE_NULL) {                
+               if (m != VM_PAGE_NULL) {
                        if (m->busy || m->error || m->restart || m->absent) {
                                type = VM_FAULT_TYPE_OTHER;
                                break;
@@ -4052,7 +6057,7 @@ vm_fault_classify(vm_object_t             object,
                        if (((fault_type & VM_PROT_WRITE) == 0) ||
                            ((level == 0) && object->copy == VM_OBJECT_NULL)) {
                                type = VM_FAULT_TYPE_MAP_IN;
-                               break;  
+                               break;
                        }
                        type = VM_FAULT_TYPE_COPY;
                        break;
@@ -4067,7 +6072,7 @@ vm_fault_classify(vm_object_t             object,
                                break;
                        }
 
-                       offset += object->shadow_offset;
+                       offset += object->vo_shadow_offset;
                        object = object->shadow;
                        level++;
                        continue;
@@ -4099,8 +6104,199 @@ vm_fault_classify_init(void)
 }
 #endif /* VM_FAULT_CLASSIFY */
 
+vm_offset_t
+kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr)
+{
+       vm_map_entry_t  entry;
+       vm_object_t     object;
+       vm_offset_t     object_offset;
+       vm_page_t       m;
+       int             compressor_external_state, compressed_count_delta;
+       int             compressor_flags = (C_DONT_BLOCK | C_KEEP | C_KDP);
+       int             my_fault_type = VM_PROT_READ;
+       kern_return_t   kr;
+
+       if (not_in_kdp) {
+               panic("kdp_lightweight_fault called from outside of debugger context");
+       }
+
+       assert(map != VM_MAP_NULL);
+
+       assert((cur_target_addr & PAGE_MASK) == 0);
+       if ((cur_target_addr & PAGE_MASK) != 0) {
+               return 0;
+       }
+
+       if (kdp_lck_rw_lock_is_acquired_exclusive(&map->lock)) {
+               return 0;
+       }
+
+       if (!vm_map_lookup_entry(map, cur_target_addr, &entry)) {
+               return 0;
+       }
+
+       if (entry->is_sub_map) {
+               return 0;
+       }
+
+       object = VME_OBJECT(entry);
+       if (object == VM_OBJECT_NULL) {
+               return 0;
+       }
+
+       object_offset = cur_target_addr - entry->vme_start + VME_OFFSET(entry);
+
+       while (TRUE) {
+               if (kdp_lck_rw_lock_is_acquired_exclusive(&object->Lock)) {
+                       return 0;
+               }
+
+               if (object->pager_created && (object->paging_in_progress ||
+                       object->activity_in_progress)) {
+                       return 0;
+               }
+
+               m = kdp_vm_page_lookup(object, object_offset);
+
+               if (m != VM_PAGE_NULL) {
+
+                       if ((object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
+                               return 0;
+                       }
+
+                       if (m->laundry || m->busy || m->free_when_done || m->absent || m->error || m->cleaning ||
+                               m->overwriting || m->restart || m->unusual) {
+                               return 0;
+                       }
+
+                       assert(!m->private);
+                       if (m->private) {
+                               return 0;
+                       }
+
+                       assert(!m->fictitious);
+                       if (m->fictitious) {
+                               return 0;
+                       }
+
+                       assert(m->vm_page_q_state != VM_PAGE_USED_BY_COMPRESSOR);
+                       if (m->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
+                               return 0;
+                       }
+
+                       return ptoa(VM_PAGE_GET_PHYS_PAGE(m));
+               }
+
+               compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN;
+
+               if (object->pager_created && MUST_ASK_PAGER(object, object_offset, compressor_external_state)) {
+                       if (compressor_external_state == VM_EXTERNAL_STATE_EXISTS) {
+                               kr = vm_compressor_pager_get(object->pager, (object_offset + object->paging_offset),
+                                                               kdp_compressor_decompressed_page_ppnum, &my_fault_type,
+                                                               compressor_flags, &compressed_count_delta);
+                               if (kr == KERN_SUCCESS) {
+                                       return kdp_compressor_decompressed_page_paddr;
+                               } else {
+                                       return 0;
+                               }
+                       }
+               }
+
+               if (object->shadow == VM_OBJECT_NULL) {
+                       return 0;
+               }
+
+               object_offset += object->vo_shadow_offset;
+               object = object->shadow;
+       }
+
+}
+
+void
+vm_page_validate_cs_mapped(
+       vm_page_t       page,
+       const void      *kaddr)
+{
+       vm_object_t             object;
+       vm_object_offset_t      offset;
+       memory_object_t         pager;
+       struct vnode            *vnode;
+       boolean_t               validated;
+       unsigned                tainted;
+
+       assert(page->busy);
+       object = VM_PAGE_OBJECT(page);
+       vm_object_lock_assert_exclusive(object);
+
+       if (page->wpmapped && !page->cs_tainted) {
+               /*
+                * This page was mapped for "write" access sometime in the
+                * past and could still be modifiable in the future.
+                * Consider it tainted.
+                * [ If the page was already found to be "tainted", no
+                * need to re-validate. ]
+                */
+               page->cs_validated = TRUE;
+               page->cs_tainted = TRUE;
+               if (cs_debug) {
+                       printf("CODESIGNING: vm_page_validate_cs: "
+                              "page %p obj %p off 0x%llx "
+                              "was modified\n",
+                              page, object, page->offset);
+               }
+               vm_cs_validated_dirtied++;
+       }
+
+       if (page->cs_validated || page->cs_tainted) {
+               return;
+       }
+
+       vm_cs_validates++;
+
+       assert(object->code_signed);
+       offset = page->offset;
+
+       if (!object->alive || object->terminating || object->pager == NULL) {
+               /*
+                * The object is terminating and we don't have its pager
+                * so we can't validate the data...
+                */
+               return;
+       }
+       /*
+        * Since we get here to validate a page that was brought in by
+        * the pager, we know that this pager is all setup and ready
+        * by now.
+        */
+       assert(!object->internal);
+       assert(object->pager != NULL);
+       assert(object->pager_ready);
+
+       pager = object->pager;
+       assert(object->paging_in_progress);
+       vnode = vnode_pager_lookup_vnode(pager);
+
+       /* verify the SHA1 hash for this page */
+       tainted = 0;
+       validated = cs_validate_range(vnode,
+                                     pager,
+                                     (object->paging_offset +
+                                      offset),
+                                     (const void *)((const char *)kaddr),
+                                     PAGE_SIZE_64,
+                                     &tainted);
+
+       if (tainted & CS_VALIDATE_TAINTED) {
+               page->cs_tainted = TRUE;
+       }
+       if (tainted & CS_VALIDATE_NX) {
+               page->cs_nx = TRUE;
+       }
 
-extern int cs_validation;
+       if (validated) {
+               page->cs_validated = TRUE;
+       }
+}
 
 void
 vm_page_validate_cs(
@@ -4112,19 +6308,52 @@ vm_page_validate_cs(
        vm_map_size_t           ksize;
        vm_offset_t             kaddr;
        kern_return_t           kr;
-       memory_object_t         pager;
-       void                    *blobs;
-       boolean_t               validated, tainted;
        boolean_t               busy_page;
+       boolean_t               need_unmap;
+
+       object = VM_PAGE_OBJECT(page);
+       vm_object_lock_assert_held(object);
+
+       if (page->wpmapped && !page->cs_tainted) {
+               vm_object_lock_assert_exclusive(object);
+
+               /*
+                * This page was mapped for "write" access sometime in the
+                * past and could still be modifiable in the future.
+                * Consider it tainted.
+                * [ If the page was already found to be "tainted", no
+                * need to re-validate. ]
+                */
+               page->cs_validated = TRUE;
+               page->cs_tainted = TRUE;
+               if (cs_debug) {
+                       printf("CODESIGNING: vm_page_validate_cs: "
+                              "page %p obj %p off 0x%llx "
+                              "was modified\n",
+                              page, object, page->offset);
+               }
+               vm_cs_validated_dirtied++;
+       }
+
+       if (page->cs_validated || page->cs_tainted) {
+               return;
+       }
 
-       vm_object_lock_assert_exclusive(page->object);
-       assert(!page->cs_validated);
+       if (page->slid) {
+               panic("vm_page_validate_cs(%p): page is slid\n", page);
+       }
+       assert(!page->slid);
 
-       if (!cs_validation) {
+#if CHECK_CS_VALIDATION_BITMAP
+       if ( vnode_pager_cs_check_validation_bitmap( object->pager, trunc_page(page->offset + object->paging_offset), CS_BITMAP_CHECK ) == KERN_SUCCESS) {
+               page->cs_validated = TRUE;
+               page->cs_tainted = FALSE;
+               vm_cs_bitmap_validated++;
                return;
        }
+#endif
+       vm_object_lock_assert_exclusive(object);
 
-       object = page->object;
        assert(object->code_signed);
        offset = page->offset;
 
@@ -4133,7 +6362,7 @@ vm_page_validate_cs(
                /* keep page busy while we map (and unlock) the VM object */
                page->busy = TRUE;
        }
-       
+
        /*
         * Take a paging reference on the VM object
         * to protect it from collapse or bypass,
@@ -4142,64 +6371,38 @@ vm_page_validate_cs(
        vm_object_paging_begin(object);
 
        /* map the page in the kernel address space */
-       koffset = 0;
        ksize = PAGE_SIZE_64;
-       kr = vm_paging_map_object(&koffset,
-                                 page,
+       koffset = 0;
+       need_unmap = FALSE;
+       kr = vm_paging_map_object(page,
                                  object,
                                  offset,
+                                 VM_PROT_READ,
+                                 FALSE, /* can't unlock object ! */
                                  &ksize,
-                                 FALSE); /* can't unlock object ! */
+                                 &koffset,
+                                 &need_unmap);
        if (kr != KERN_SUCCESS) {
                panic("vm_page_validate_cs: could not map page: 0x%x\n", kr);
        }
        kaddr = CAST_DOWN(vm_offset_t, koffset);
 
-       /*
-        * Since we get here to validate a page that was brought in by
-        * the pager, we know that this pager is all setup and ready
-        * by now.
-        */
-       assert(!object->internal);
-       assert(object->pager != NULL);
-       assert(object->pager_ready);
-
-       if (!object->alive || object->terminating || object->pager == NULL) {
-               /*
-                * The object is terminating and we don't have its pager
-                * so we can't validate the data...
-                */
-               goto out;
-       }
-
-       pager = object->pager;
-       assert(pager != NULL);
+       /* validate the mapped page */
+       vm_page_validate_cs_mapped(page, (const void *) kaddr);
 
-       kr = vnode_pager_get_object_cs_blobs(pager, &blobs);
-       if (kr != KERN_SUCCESS) {
-               blobs = NULL;
+#if CHECK_CS_VALIDATION_BITMAP
+       if ( page->cs_validated == TRUE && page->cs_tainted == FALSE ) {
+               vnode_pager_cs_check_validation_bitmap( object->pager, trunc_page( offset + object->paging_offset), CS_BITMAP_SET );
        }
-
-       /* verify the SHA1 hash for this page */
-       validated = cs_validate_page(blobs,
-                                    offset + object->paging_offset,
-                                    (const void *)kaddr,
-                                    &tainted);
-
+#endif
        assert(page->busy);
-       assert(object == page->object);
+       assert(object == VM_PAGE_OBJECT(page));
        vm_object_lock_assert_exclusive(object);
 
-       page->cs_validated = validated;
-       if (validated) {
-               page->cs_tainted = tainted;
-       }
-
-out:
        if (!busy_page) {
                PAGE_WAKEUP_DONE(page);
        }
-       if (koffset != 0) {
+       if (need_unmap) {
                /* unmap the map from the kernel address space */
                vm_paging_unmap_object(object, koffset, koffset + ksize);
                koffset = 0;
@@ -4208,3 +6411,71 @@ out:
        }
        vm_object_paging_end(object);
 }
+
+void
+vm_page_validate_cs_mapped_chunk(
+       vm_page_t       page,
+       const void      *kaddr,
+       vm_offset_t     chunk_offset,
+       vm_size_t       chunk_size,
+       boolean_t       *validated_p,
+       unsigned        *tainted_p)
+{
+       vm_object_t             object;
+       vm_object_offset_t      offset, offset_in_page;
+       memory_object_t         pager;
+       struct vnode            *vnode;
+       boolean_t               validated;
+       unsigned                tainted;
+
+       *validated_p = FALSE;
+       *tainted_p = 0;
+
+       assert(page->busy);
+       object = VM_PAGE_OBJECT(page);
+       vm_object_lock_assert_exclusive(object);
+
+       assert(object->code_signed);
+       offset = page->offset;
+
+       if (!object->alive || object->terminating || object->pager == NULL) {
+               /*
+                * The object is terminating and we don't have its pager
+                * so we can't validate the data...
+                */
+               return;
+       }
+       /*
+        * Since we get here to validate a page that was brought in by
+        * the pager, we know that this pager is all setup and ready
+        * by now.
+        */
+       assert(!object->internal);
+       assert(object->pager != NULL);
+       assert(object->pager_ready);
+
+       pager = object->pager;
+       assert(object->paging_in_progress);
+       vnode = vnode_pager_lookup_vnode(pager);
+
+       /* verify the signature for this chunk */
+       offset_in_page = chunk_offset;
+       assert(offset_in_page < PAGE_SIZE);
+
+       tainted = 0;
+       validated = cs_validate_range(vnode,
+                                     pager,
+                                     (object->paging_offset +
+                                      offset +
+                                      offset_in_page),
+                                     (const void *)((const char *)kaddr
+                                                   + offset_in_page),
+                                     chunk_size,
+                                     &tainted);
+       if (validated) {
+               *validated_p = TRUE;
+       }
+       if (tainted) {
+               *tainted_p = tainted;
+       }
+}