]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_fault.c
xnu-4903.221.2.tar.gz
[apple/xnu.git] / osfmk / vm / vm_fault.c
index ae31a40380aa7d577d45b9ad2f980456cd326b2c..abbe202ff351edadda29587a00d4e9865c33b311 100644 (file)
@@ -2,7 +2,7 @@
  * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * unlawful or unlicensed copies of an Apple operating system, or to
  * circumvent, violate, or enable the circumvention or violation of, any
  * terms of an Apple operating system software license agreement.
- * 
+ *
  * Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
  */
-/* 
+/*
  * Mach Operating System
  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
  * All Rights Reserved.
- * 
+ *
  * Permission to use, copy, modify and distribute this software and its
  * documentation is hereby granted, provided that both the copyright
  * notice and this permission notice appear in all copies of the
  * software, derivative works or modified versions, and any portions
  * thereof, and that both notices appear in supporting documentation.
- * 
+ *
  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- * 
+ *
  * Carnegie Mellon requests users of this software to return to
- * 
+ *
  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
  *  School of Computer Science
  *  Carnegie Mellon University
  *  Pittsburgh PA 15213-3890
- * 
+ *
  * any improvements or extensions that they make and grant Carnegie Mellon
  * the rights to redistribute these changes.
  */
 #include <sys/reason.h>
 #include <sys/signalvar.h>
 
-#include <libsa/sys/timers.h>  /* for struct timespec */
+#include <san/kasan.h>
 
 #define VM_FAULT_CLASSIFY      0
 
 unsigned int   vm_object_pagein_throttle = 16;
 
 /*
- * We apply a hard throttle to the demand zero rate of tasks that we believe are running out of control which 
+ * We apply a hard throttle to the demand zero rate of tasks that we believe are running out of control which
  * kicks in when swap space runs out.  64-bit programs have massive address spaces and can leak enormous amounts
  * of memory if they're buggy and can run the system completely out of swap space.  If this happens, we
  * impose a hard throttle on them to prevent them from taking the last bit of memory left.  This helps
- * keep the UI active so that the user has a chance to kill the offending task before the system 
+ * keep the UI active so that the user has a chance to kill the offending task before the system
  * completely hangs.
  *
  * The hard throttle is only applied when the system is nearly completely out of swap space and is only applied
@@ -139,11 +139,12 @@ uint64_t vm_hard_throttle_threshold;
 
 
 #define NEED_TO_HARD_THROTTLE_THIS_TASK()      (vm_wants_task_throttled(current_task()) ||     \
-                                                (vm_page_free_count < vm_page_throttle_limit && \
-                                                 proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO) > THROTTLE_LEVEL_THROTTLED))
+                                                ((vm_page_free_count < vm_page_throttle_limit || \
+                                                  HARD_THROTTLE_LIMIT_REACHED()) && \
+                                                 proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO) >= THROTTLE_LEVEL_THROTTLED))
 
 
-#define HARD_THROTTLE_DELAY    5000    /* 5000 us == 5 ms */
+#define HARD_THROTTLE_DELAY    10000   /* 10000 us == 10 ms */
 #define SOFT_THROTTLE_DELAY    200     /* 200 us == .2 ms */
 
 #define        VM_PAGE_CREATION_THROTTLE_PERIOD_SECS   6
@@ -157,6 +158,7 @@ static kern_return_t vm_fault_wire_fast(
                                vm_map_t        map,
                                vm_map_offset_t va,
                                vm_prot_t       prot,
+                               vm_tag_t        wire_tag,
                                vm_map_entry_t  entry,
                                pmap_t          pmap,
                                vm_map_offset_t pmap_addr,
@@ -167,6 +169,7 @@ static kern_return_t vm_fault_internal(
                vm_map_offset_t vaddr,
                vm_prot_t       caller_prot,
                boolean_t       change_wiring,
+               vm_tag_t        wire_tag,
                int             interruptible,
                pmap_t          pmap,
                vm_map_offset_t pmap_addr,
@@ -195,6 +198,10 @@ unsigned long vm_cs_revalidates = 0;
 unsigned long vm_cs_query_modified = 0;
 unsigned long vm_cs_validated_dirtied = 0;
 unsigned long vm_cs_bitmap_validated = 0;
+#if PMAP_CS
+uint64_t vm_cs_defer_to_pmap_cs = 0;
+uint64_t vm_cs_defer_to_pmap_cs_not = 0;
+#endif /* PMAP_CS */
 
 void vm_pre_fault(vm_map_offset_t);
 
@@ -202,6 +209,24 @@ extern char *kdp_compressor_decompressed_page;
 extern addr64_t        kdp_compressor_decompressed_page_paddr;
 extern ppnum_t kdp_compressor_decompressed_page_ppnum;
 
+struct vmrtfr {
+       int vmrtfr_maxi;
+       int vmrtfr_curi;
+       int64_t vmrtf_total;
+       vm_rtfault_record_t *vm_rtf_records;
+} vmrtfrs;
+#define VMRTF_DEFAULT_BUFSIZE (4096)
+#define VMRTF_NUM_RECORDS_DEFAULT (VMRTF_DEFAULT_BUFSIZE / sizeof(vm_rtfault_record_t))
+int vmrtf_num_records = VMRTF_NUM_RECORDS_DEFAULT;
+
+static void vm_rtfrecord_lock(void);
+static void vm_rtfrecord_unlock(void);
+static void vm_record_rtfault(thread_t, uint64_t, vm_map_offset_t, int);
+
+lck_spin_t vm_rtfr_slock;
+extern lck_grp_t vm_page_lck_grp_bucket;
+extern lck_attr_t vm_page_lck_attr;
+
 /*
  *     Routine:        vm_fault_init
  *     Purpose:
@@ -229,7 +254,7 @@ vm_fault_init(void)
 
        if (PE_parse_boot_argn("vm_compressor", &vm_compressor_temp, sizeof (vm_compressor_temp))) {
                for ( i = 0; i < VM_PAGER_MAX_MODES; i++) {
-                       if (vm_compressor_temp > 0 && 
+                       if (vm_compressor_temp > 0 &&
                            ((vm_compressor_temp & ( 1 << i)) == vm_compressor_temp)) {
                                need_default_val = FALSE;
                                vm_compressor_mode = vm_compressor_temp;
@@ -238,28 +263,25 @@ vm_fault_init(void)
                }
                if (need_default_val)
                        printf("Ignoring \"vm_compressor\" boot arg %d\n", vm_compressor_temp);
-       } 
+       }
        if (need_default_val) {
                /* If no boot arg or incorrect boot arg, try device tree. */
                PE_get_default("kern.vm_compressor", &vm_compressor_mode, sizeof(vm_compressor_mode));
        }
-       PE_parse_boot_argn("vm_compressor_threads", &vm_compressor_thread_count, sizeof (vm_compressor_thread_count));
-
-       if (PE_parse_boot_argn("vm_compressor_immediate", &vm_compressor_temp, sizeof (vm_compressor_temp)))
-               vm_compressor_immediate_preferred_override = TRUE;
-       else {
-               if (PE_get_default("kern.vm_compressor_immediate", &vm_compressor_temp, sizeof(vm_compressor_temp)))
-                       vm_compressor_immediate_preferred_override = TRUE;
-       }
-       if (vm_compressor_immediate_preferred_override == TRUE) {
-               if (vm_compressor_temp)
-                       vm_compressor_immediate_preferred = TRUE;
-               else
-                       vm_compressor_immediate_preferred = FALSE;
-       }
        printf("\"vm_compressor_mode\" is %d\n", vm_compressor_mode);
 }
 
+void vm_rtfault_record_init(void) {
+       PE_parse_boot_argn("vm_rtfault_records", &vmrtf_num_records, sizeof(vmrtf_num_records));
+
+       assert(vmrtf_num_records >= 1);
+       vmrtf_num_records = MAX(vmrtf_num_records, 1);
+       size_t kallocsz = vmrtf_num_records * sizeof(vm_rtfault_record_t);
+       vmrtfrs.vm_rtf_records = kalloc(kallocsz);
+       bzero(vmrtfrs.vm_rtf_records, kallocsz);
+       vmrtfrs.vmrtfr_maxi = vmrtf_num_records - 1;
+       lck_spin_init(&vm_rtfr_slock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
+}
 /*
  *     Routine:        vm_fault_cleanup
  *     Purpose:
@@ -292,30 +314,12 @@ vm_fault_cleanup(
        }
 }
 
-#if    MACH_CLUSTER_STATS
-#define MAXCLUSTERPAGES 16
-struct {
-       unsigned long pages_in_cluster;
-       unsigned long pages_at_higher_offsets;
-       unsigned long pages_at_lower_offsets;
-} cluster_stats_in[MAXCLUSTERPAGES];
-#define CLUSTER_STAT(clause)   clause
-#define CLUSTER_STAT_HIGHER(x) \
-       ((cluster_stats_in[(x)].pages_at_higher_offsets)++)
-#define CLUSTER_STAT_LOWER(x)  \
-        ((cluster_stats_in[(x)].pages_at_lower_offsets)++)
-#define CLUSTER_STAT_CLUSTER(x)        \
-       ((cluster_stats_in[(x)].pages_in_cluster)++)
-#else  /* MACH_CLUSTER_STATS */
-#define CLUSTER_STAT(clause)
-#endif /* MACH_CLUSTER_STATS */
-
 #define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0)
 
 
 boolean_t      vm_page_deactivate_behind = TRUE;
-/* 
- * default sizes given VM_BEHAVIOR_DEFAULT reference behavior 
+/*
+ * default sizes given VM_BEHAVIOR_DEFAULT reference behavior
  */
 #define VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW    128
 #define VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER   16              /* don't make this too big... */
@@ -540,7 +544,7 @@ vm_fault_deactivate_behind(
         for (n = 0; n < max_pages_in_run; n++) {
                m = vm_page_lookup(object, offset + run_offset + (n * pg_offset));
 
-               if (m && !m->laundry && !m->busy && !m->no_cache && (m->vm_page_q_state != VM_PAGE_ON_THROTTLED_Q) && !m->fictitious && !m->absent) {
+               if (m && !m->vmp_laundry && !m->vmp_busy && !m->vmp_no_cache && (m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && !m->vmp_fictitious && !m->vmp_absent) {
                        page_run[pages_in_run++] = m;
 
                        /*
@@ -591,15 +595,15 @@ vm_page_throttled(boolean_t page_kept)
         clock_sec_t     elapsed_sec;
         clock_sec_t     tv_sec;
         clock_usec_t    tv_usec;
-       
+
        thread_t thread = current_thread();
-       
+
        if (thread->options & TH_OPT_VMPRIV)
                return (0);
 
        if (thread->t_page_creation_throttled) {
                thread->t_page_creation_throttled = 0;
-               
+
                if (page_kept == FALSE)
                        goto no_throttle;
        }
@@ -613,7 +617,7 @@ vm_page_throttled(boolean_t page_kept)
 
        if ((vm_page_free_count < vm_page_throttle_limit || (VM_CONFIG_COMPRESSOR_IS_PRESENT && SWAPPER_NEEDS_TO_UNTHROTTLE())) &&
            thread->t_page_creation_count > (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS * VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC)) {
-               
+
                if (vm_page_free_wanted == 0 && vm_page_free_wanted_privileged == 0) {
 #if (DEVELOPMENT || DEBUG)
                        OSAddAtomic64(1, &vm_page_creation_throttle_avoided);
@@ -634,13 +638,13 @@ vm_page_throttled(boolean_t page_kept)
                                 * over a long period of time a chance to get out of
                                 * the throttled state... we reset the counter and timestamp
                                 * so that if it stays under the rate limit for the next second
-                                * it will be back in our good graces... if it exceeds it, it 
+                                * it will be back in our good graces... if it exceeds it, it
                                 * will remain in the throttled state
                                 */
                                thread->t_page_creation_time = tv_sec;
                                thread->t_page_creation_count = VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC * (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS - 1);
                        }
-                       ++vm_page_throttle_count;
+                       VM_PAGEOUT_DEBUG(vm_page_throttle_count, 1);
 
                        thread->t_page_creation_throttled = 1;
 
@@ -674,10 +678,10 @@ no_throttle:
  * cleanup is based on being called from vm_fault_page
  *
  * object must be locked
- * object == m->object
+ * object == m->vmp_object
  */
 static vm_fault_return_t
-vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, boolean_t interruptible_state, boolean_t page_throttle)
+vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, wait_interrupt_t interruptible_state, boolean_t page_throttle)
 {
        int throttle_delay;
 
@@ -698,26 +702,6 @@ vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, boolean_t int
 
                return (VM_FAULT_MEMORY_ERROR);
        }
-       if (vm_backing_store_low) {
-               /*
-                * are we protecting the system from
-                * backing store exhaustion.  If so
-                * sleep unless we are privileged.
-                */
-               if (!(current_task()->priv_flags & VM_BACKING_STORE_PRIV)) {
-
-                       if (m != VM_PAGE_NULL)
-                               VM_PAGE_FREE(m);
-                       vm_fault_cleanup(object, first_m);
-
-                       assert_wait((event_t)&vm_backing_store_low, THREAD_UNINT);
-
-                       thread_block(THREAD_CONTINUE_NULL);
-                       thread_interrupt_level(interruptible_state);
-
-                       return (VM_FAULT_RETRY);
-               }
-       }
        if (page_throttle == TRUE) {
                if ((throttle_delay = vm_page_throttled(FALSE))) {
                        /*
@@ -749,7 +733,7 @@ vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, boolean_t int
  * do the work to zero fill a page and
  * inject it into the correct paging queue
  *
- * m->object must be locked
+ * m->vmp_object must be locked
  * page queue lock must NOT be held
  */
 static int
@@ -772,19 +756,19 @@ vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill)
         * execution.  i.e. it is the responsibility
         * of higher layers to call for an instruction
         * sync after changing the contents and before
-        * sending a program into this area.  We 
+        * sending a program into this area.  We
         * choose this approach for performance
         */
-       m->pmapped = TRUE;
+       m->vmp_pmapped = TRUE;
 
-       m->cs_validated = FALSE;
-       m->cs_tainted = FALSE;
-       m->cs_nx = FALSE;
+       m->vmp_cs_validated = FALSE;
+       m->vmp_cs_tainted = FALSE;
+       m->vmp_cs_nx = FALSE;
 
        if (no_zero_fill == TRUE) {
                my_fault = DBG_NZF_PAGE_FAULT;
 
-               if (m->absent && m->busy)
+               if (m->vmp_absent && m->vmp_busy)
                        return (my_fault);
        } else {
                vm_page_zero_fill(m);
@@ -792,9 +776,9 @@ vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill)
                VM_STAT_INCR(zero_fill_count);
                DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
        }
-       assert(!m->laundry);
+       assert(!m->vmp_laundry);
        assert(object != kernel_object);
-       //assert(m->pageq.next == 0 && m->pageq.prev == 0);
+       //assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
 
        if (!VM_DYNAMIC_PAGING_ENABLED() &&
                (object->purgable == VM_PURGABLE_DENY ||
@@ -812,8 +796,8 @@ vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill)
                         */
                        vm_page_queues_remove(m, TRUE);
                        vm_page_check_pageable_safe(m);
-                       vm_page_queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
-                       m->vm_page_q_state = VM_PAGE_ON_THROTTLED_Q;
+                       vm_page_queue_enter(&vm_page_queue_throttled, m, vm_page_t, vmp_pageq);
+                       m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
                        vm_page_throttled_count++;
                }
                vm_page_unlock_queues();
@@ -832,7 +816,7 @@ vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill)
  *             The required permissions for the page is given
  *             in "fault_type".  Desired permissions are included
  *             in "protection".
- *             fault_info is passed along to determine pagein cluster 
+ *             fault_info is passed along to determine pagein cluster
  *             limits... it contains the expected reference pattern,
  *             cluster size if available, etc...
  *
@@ -864,7 +848,7 @@ vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill)
  *             The "result_page" is also left busy.  It is not removed
  *             from the pageout queues.
  *     Special Case:
- *             A return value of VM_FAULT_SUCCESS_NO_PAGE means that the 
+ *             A return value of VM_FAULT_SUCCESS_NO_PAGE means that the
  *             fault succeeded but there's no VM page (i.e. the VM object
  *             does not actually hold VM pages, but device memory or
  *             large pages).  The object is still locked and we still hold a
@@ -892,7 +876,7 @@ vm_fault_page(
        /* More arguments: */
        kern_return_t   *error_code,    /* code if page is in error */
        boolean_t       no_zero_fill,   /* don't zero fill absent pages */
-       boolean_t       data_supply,    /* treat as data_supply if 
+       boolean_t       data_supply,    /* treat as data_supply if
                                         * it is a write fault and a full
                                         * page is provided */
        vm_object_fault_info_t fault_info)
@@ -907,10 +891,8 @@ vm_fault_page(
        boolean_t               force_fault_retry = FALSE;
        vm_prot_t               access_required = fault_type;
        vm_prot_t               wants_copy_flag;
-       CLUSTER_STAT(int pages_at_higher_offsets;)
-       CLUSTER_STAT(int pages_at_lower_offsets;)
        kern_return_t           wait_result;
-       boolean_t               interruptible_state;
+       wait_interrupt_t        interruptible_state;
        boolean_t               data_already_requested = FALSE;
        vm_behavior_t           orig_behavior;
        vm_size_t               orig_cluster_size;
@@ -924,10 +906,10 @@ vm_fault_page(
        int                     grab_options;
 
 /*
- * MUST_ASK_PAGER() evaluates to TRUE if the page specified by object/offset is 
+ * MUST_ASK_PAGER() evaluates to TRUE if the page specified by object/offset is
  * marked as paged out in the compressor pager or the pager doesn't exist.
- * Note also that if the pager for an internal object 
- * has not been created, the pager is not invoked regardless of the value 
+ * Note also that if the pager for an internal object
+ * has not been created, the pager is not invoked regardless of the value
  * of MUST_ASK_PAGER().
  *
  * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
@@ -965,7 +947,7 @@ vm_fault_page(
 
        interruptible = fault_info->interruptible;
        interruptible_state = thread_interrupt_level(interruptible);
+
        /*
         *      INVARIANTS (through entire routine):
         *
@@ -1082,7 +1064,7 @@ vm_fault_page(
 #endif
                if (m != VM_PAGE_NULL) {
 
-                       if (m->busy) {
+                       if (m->vmp_busy) {
                                /*
                                 * The page is being brought in,
                                 * wait for it and then retry.
@@ -1109,10 +1091,10 @@ vm_fault_page(
                                }
                                continue;
                        }
-                       if (m->laundry) {
-                               m->free_when_done = FALSE;
+                       if (m->vmp_laundry) {
+                               m->vmp_free_when_done = FALSE;
 
-                               if (!m->cleaning) 
+                               if (!m->vmp_cleaning)
                                        vm_pageout_steal_laundry(m, FALSE);
                        }
                        if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
@@ -1126,7 +1108,7 @@ vm_fault_page(
                                         * be just to wire or unwire it.
                                         * Let's pretend it succeeded...
                                         */
-                                       m->busy = TRUE;
+                                       m->vmp_busy = TRUE;
                                        *result_page = m;
                                        assert(first_m == VM_PAGE_NULL);
                                        *top_page = first_m;
@@ -1145,7 +1127,7 @@ vm_fault_page(
                                }
                        }
 
-                       if (m->error) {
+                       if (m->vmp_error) {
                                /*
                                 * The page is in error, give up now.
                                 */
@@ -1161,7 +1143,7 @@ vm_fault_page(
 
                                return (VM_FAULT_MEMORY_ERROR);
                        }
-                       if (m->restart) {
+                       if (m->vmp_restart) {
                                /*
                                 * The pager wants us to restart
                                 * at the top of the chain,
@@ -1178,7 +1160,7 @@ vm_fault_page(
 
                                return (VM_FAULT_RETRY);
                        }
-                       if (m->absent) {
+                       if (m->vmp_absent) {
                                /*
                                 * The page isn't busy, but is absent,
                                 * therefore it's deemed "unavailable".
@@ -1204,7 +1186,7 @@ vm_fault_page(
                                        /*
                                         * check for any conditions that prevent
                                         * us from creating a new zero-fill page
-                                        * vm_fault_check will do all of the 
+                                        * vm_fault_check will do all of the
                                         * fault cleanup in the case of an error condition
                                         * including resetting the thread_interrupt_level
                                         */
@@ -1232,7 +1214,7 @@ vm_fault_page(
                                                vm_object_unlock(object);
 
                                                /*
-                                                * grab the original page we 
+                                                * grab the original page we
                                                 * 'soldered' in place and
                                                 * retake lock on 'first_object'
                                                 */
@@ -1248,11 +1230,11 @@ vm_fault_page(
                                                 * we're going to use the absent page we just found
                                                 * so convert it to a 'busy' page
                                                 */
-                                               m->absent = FALSE;
-                                               m->busy = TRUE;
+                                               m->vmp_absent = FALSE;
+                                               m->vmp_busy = TRUE;
                                        }
                                        if (fault_info->mark_zf_absent && no_zero_fill == TRUE)
-                                               m->absent = TRUE;
+                                               m->vmp_absent = TRUE;
                                        /*
                                         * zero-fill the page and put it on
                                         * the correct paging queue
@@ -1268,8 +1250,8 @@ vm_fault_page(
                                                VM_PAGE_FREE(m);
                                        } else {
                                                first_m = m;
-                                               m->absent = FALSE;
-                                               m->busy = TRUE;
+                                               m->vmp_absent = FALSE;
+                                               m->vmp_busy = TRUE;
 
                                                vm_page_lockspin_queues();
                                                vm_page_queues_remove(m, FALSE);
@@ -1290,7 +1272,7 @@ vm_fault_page(
                                        vm_object_unlock(object);
                                        object = next_object;
                                        vm_object_paging_begin(object);
-                                       
+
                                        /*
                                         * reset to default type of fault
                                         */
@@ -1299,7 +1281,7 @@ vm_fault_page(
                                        continue;
                                }
                        }
-                       if ((m->cleaning)
+                       if ((m->vmp_cleaning)
                            && ((object != first_object) || (object->copy != VM_OBJECT_NULL))
                            && (fault_type & VM_PROT_WRITE)) {
                                /*
@@ -1325,14 +1307,14 @@ vm_fault_page(
                                vm_object_reference_locked(object);
 
                                vm_fault_cleanup(object, first_m);
-                               
+
                                counter(c_vm_fault_page_block_backoff_kernel++);
                                vm_object_lock(object);
                                assert(object->ref_count > 0);
 
                                m = vm_page_lookup(object, offset);
 
-                               if (m != VM_PAGE_NULL && m->cleaning) {
+                               if (m != VM_PAGE_NULL && m->vmp_cleaning) {
                                        PAGE_ASSERT_WAIT(m, interruptible);
 
                                        vm_object_unlock(object);
@@ -1349,14 +1331,14 @@ vm_fault_page(
                                        return (VM_FAULT_RETRY);
                                }
                        }
-                       if (type_of_fault == NULL && (m->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q) &&
+                       if (type_of_fault == NULL && (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) &&
                            !(fault_info != NULL && fault_info->stealth)) {
                                /*
                                 * If we were passed a non-NULL pointer for
                                 * "type_of_fault", than we came from
                                 * vm_fault... we'll let it deal with
                                 * this condition, since it
-                                * needs to see m->speculative to correctly
+                                * needs to see m->vmp_speculative to correctly
                                 * account the pageins, otherwise...
                                 * take it off the speculative queue, we'll
                                 * let the caller of vm_fault_page deal
@@ -1367,35 +1349,12 @@ vm_fault_page(
                                 * the page in the speculative queue.
                                 */
                                vm_page_lockspin_queues();
-                               if (m->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q)
+                               if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q)
                                        vm_page_queues_remove(m, FALSE);
                                vm_page_unlock_queues();
                        }
                        assert(object == VM_PAGE_OBJECT(m));
 
-                       if (m->encrypted) {
-                               /*
-                                * ENCRYPTED SWAP:
-                                * the user needs access to a page that we
-                                * encrypted before paging it out.
-                                * Decrypt the page now.
-                                * Keep it busy to prevent anyone from
-                                * accessing it during the decryption.
-                                */
-                               m->busy = TRUE;
-                               vm_page_decrypt(m, 0);
-                               assert(m->busy);
-                               PAGE_WAKEUP_DONE(m);
-
-                               /*
-                                * Retry from the top, in case
-                                * something changed while we were
-                                * decrypting.
-                                */
-                               continue;
-                       }
-                       ASSERT_PAGE_DECRYPTED(m);
-
                        if (object->code_signed) {
                                /*
                                 * CODE SIGNING:
@@ -1421,13 +1380,13 @@ vm_fault_page(
                        XPR(XPR_VM_FAULT,
                            "vm_f_page: found page obj 0x%X, offset 0x%X, page 0x%X\n",
                                object, offset, m, 0, 0);
-                       assert(!m->busy);
-                       assert(!m->absent);
+                       assert(!m->vmp_busy);
+                       assert(!m->vmp_absent);
 
-                       m->busy = TRUE;
+                       m->vmp_busy = TRUE;
                        break;
                }
-               
+
 
                /*
                 * we get here when there is no page present in the object at
@@ -1436,6 +1395,7 @@ vm_fault_page(
                 * this object can provide the data or we're the top object...
                 * object is locked;  m == NULL
                 */
+
                if (must_be_resident) {
                        if (fault_type == VM_PROT_NONE &&
                            object == kernel_object) {
@@ -1454,10 +1414,14 @@ vm_fault_page(
 
                        goto dont_look_for_page;
                }
+
+               /* Don't expect to fault pages into the kernel object. */
+               assert(object != kernel_object);
+
                data_supply = FALSE;
 
                look_for_page = (object->pager_created && (MUST_ASK_PAGER(object, offset, external_state) == TRUE) && !data_supply);
-               
+
 #if TRACEFAULTPAGE
                dbgTrace(0xBEEF000C, (unsigned int) look_for_page, (unsigned int) object);      /* (TEST/DEBUG) */
 #endif
@@ -1589,16 +1553,16 @@ vm_fault_page(
                                                return (VM_FAULT_MEMORY_SHORTAGE);
                                        }
 
-                                       m->absent = TRUE;
+                                       m->vmp_absent = TRUE;
                                        if (fault_info && fault_info->batch_pmap_op == TRUE) {
                                                vm_page_insert_internal(m, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
                                        } else {
                                                vm_page_insert(m, object, offset);
                                        }
                                }
-                               assert(m->busy);
-                                       
-                               m->absent = TRUE;
+                               assert(m->vmp_busy);
+
+                               m->vmp_absent = TRUE;
                                pager = object->pager;
 
                                assert(object->paging_in_progress > 0);
@@ -1638,8 +1602,8 @@ vm_fault_page(
 
                                switch (rc) {
                                case KERN_SUCCESS:
-                                       m->absent = FALSE;
-                                       m->dirty = TRUE;
+                                       m->vmp_absent = FALSE;
+                                       m->vmp_dirty = TRUE;
                                        if ((object->wimg_bits &
                                             VM_WIMG_MASK) !=
                                            VM_WIMG_USE_DEFAULT) {
@@ -1652,7 +1616,7 @@ vm_fault_page(
                                                pmap_sync_page_attributes_phys(
                                                        VM_PAGE_GET_PHYS_PAGE(m));
                                        } else {
-                                               m->written_by_kernel = TRUE;
+                                               m->vmp_written_by_kernel = TRUE;
                                        }
 
                                        /*
@@ -1663,27 +1627,28 @@ vm_fault_page(
                                         * "compressed purgeable" ledger, so
                                         * update that now.
                                         */
-                                       if ((object->purgable !=
-                                            VM_PURGABLE_DENY) &&
-                                           (object->vo_purgeable_owner !=
+                                       if (((object->purgable !=
+                                             VM_PURGABLE_DENY) ||
+                                            object->vo_ledger_tag) &&
+                                           (object->vo_owner !=
                                             NULL)) {
                                                /*
                                                 * One less compressed
-                                                * purgeable page.
+                                                * purgeable/tagged page.
                                                 */
-                                               vm_purgeable_compressed_update(
+                                               vm_object_owner_compressed_update(
                                                        object,
                                                        -1);
                                        }
 
                                        break;
                                case KERN_MEMORY_FAILURE:
-                                       m->unusual = TRUE;
-                                       m->error = TRUE;
-                                       m->absent = FALSE;
+                                       m->vmp_unusual = TRUE;
+                                       m->vmp_error = TRUE;
+                                       m->vmp_absent = FALSE;
                                        break;
                                case KERN_MEMORY_ERROR:
-                                       assert(m->absent);
+                                       assert(m->vmp_absent);
                                        break;
                                default:
                                        panic("vm_fault_page(): unexpected "
@@ -1697,7 +1662,7 @@ vm_fault_page(
                                goto data_requested;
                        }
                        my_fault_type = DBG_PAGEIN_FAULT;
-               
+
                        if (m != VM_PAGE_NULL) {
                                VM_PAGE_FREE(m);
                                m = VM_PAGE_NULL;
@@ -1709,7 +1674,7 @@ vm_fault_page(
 
                        /*
                         * It's possible someone called vm_object_destroy while we weren't
-                        * holding the object lock.  If that has happened, then bail out 
+                        * holding the object lock.  If that has happened, then bail out
                         * here.
                         */
 
@@ -1726,6 +1691,10 @@ vm_fault_page(
                         * so we can release the object lock.
                         */
 
+                       if (object->object_is_shared_cache) {
+                               set_thread_rwlock_boost();
+                       }
+
                        vm_object_unlock(object);
 
                        /*
@@ -1769,7 +1738,7 @@ vm_fault_page(
                                 */
                                assert(first_m != VM_PAGE_NULL);
                                assert(VM_PAGE_OBJECT(first_m) == first_object);
-                                       
+
                                vm_object_lock(first_object);
                                VM_PAGE_FREE(first_m);
                                vm_object_paging_end(first_object);
@@ -1810,6 +1779,10 @@ vm_fault_page(
 #endif
                        vm_object_lock(object);
 
+                       if (object->object_is_shared_cache) {
+                               clear_thread_rwlock_boost();
+                       }
+
                data_requested:
                        if (rc != KERN_SUCCESS) {
 
@@ -1846,7 +1819,7 @@ vm_fault_page(
                        if (m == VM_PAGE_NULL && object->phys_contiguous) {
                                /*
                                 * No page here means that the object we
-                                * initially looked up was "physically 
+                                * initially looked up was "physically
                                 * contiguous" (i.e. device memory).  However,
                                 * with Virtual VRAM, the object might not
                                 * be backed by that device memory anymore,
@@ -1876,10 +1849,10 @@ vm_fault_page(
                }
 dont_look_for_page:
                /*
-                * We get here if the object has no pager, or an existence map 
+                * We get here if the object has no pager, or an existence map
                 * exists and indicates the page isn't present on the pager
                 * or we're unwiring a page.  If a pager exists, but there
-                * is no existence map, then the m->absent case above handles
+                * is no existence map, then the m->vmp_absent case above handles
                 * the ZF case when the pager can't provide the page
                 */
 #if TRACEFAULTPAGE
@@ -1919,7 +1892,7 @@ dont_look_for_page:
                        /*
                         * check for any conditions that prevent
                         * us from creating a new zero-fill page
-                        * vm_fault_check will do all of the 
+                        * vm_fault_check will do all of the
                         * fault cleanup in the case of an error condition
                         * including resetting the thread_interrupt_level
                         */
@@ -1940,7 +1913,7 @@ dont_look_for_page:
                                vm_page_insert(m, object, offset);
                        }
                        if (fault_info->mark_zf_absent && no_zero_fill == TRUE)
-                               m->absent = TRUE;
+                               m->vmp_absent = TRUE;
 
                        my_fault = vm_fault_zero_page(m, no_zero_fill);
 
@@ -1989,19 +1962,12 @@ dont_look_for_page:
        dbgTrace(0xBEEF0015, (unsigned int) object, (unsigned int) m);  /* (TEST/DEBUG) */
 #endif
 #if    EXTRA_ASSERTIONS
-       assert(m->busy && !m->absent);
+       assert(m->vmp_busy && !m->vmp_absent);
        assert((first_m == VM_PAGE_NULL) ||
-              (first_m->busy && !first_m->absent &&
-               !first_m->active && !first_m->inactive && !first_m->secluded));
+              (first_m->vmp_busy && !first_m->vmp_absent &&
+               !first_m->vmp_active && !first_m->vmp_inactive && !first_m->vmp_secluded));
 #endif /* EXTRA_ASSERTIONS */
 
-       /*
-        * ENCRYPTED SWAP:
-        * If we found a page, we must have decrypted it before we
-        * get here...
-        */
-       ASSERT_PAGE_DECRYPTED(m);
-
        XPR(XPR_VM_FAULT,
            "vm_f_page: FOUND obj 0x%X, off 0x%X, page 0x%X, 1_obj 0x%X, 1_m 0x%X\n",
                object, offset, m,
@@ -2027,25 +1993,6 @@ dont_look_for_page:
                         */
                        assert(!must_be_resident);
 
-                       /*
-                        * are we protecting the system from
-                        * backing store exhaustion.  If so
-                        * sleep unless we are privileged.
-                        */
-                       if (vm_backing_store_low) {
-                               if (!(current_task()->priv_flags & VM_BACKING_STORE_PRIV)) {
-
-                                       RELEASE_PAGE(m);
-                                       vm_fault_cleanup(object, first_m);
-
-                                       assert_wait((event_t)&vm_backing_store_low, THREAD_UNINT);
-
-                                       thread_block(THREAD_CONTINUE_NULL);
-                                       thread_interrupt_level(interruptible_state);
-
-                                       return (VM_FAULT_RETRY);
-                               }
-                       }
                        /*
                         * If we try to collapse first_object at this
                         * point, we may deadlock when we try to get
@@ -2094,14 +2041,14 @@ dont_look_for_page:
                         * access to this page, then we could
                         * avoid the pmap_disconnect() call.
                         */
-                       if (m->pmapped)
+                       if (m->vmp_pmapped)
                                pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
 
-                       if (m->clustered) {
+                       if (m->vmp_clustered) {
                                VM_PAGE_COUNT_AS_PAGEIN(m);
                                VM_PAGE_CONSUME_CLUSTERED(m);
                        }
-                       assert(!m->cleaning);
+                       assert(!m->vmp_cleaning);
 
                        /*
                         * We no longer need the old page or object.
@@ -2136,12 +2083,12 @@ dont_look_for_page:
                         */
                        VM_PAGE_FREE(first_m);
                        first_m = VM_PAGE_NULL;
-                       
+
                        /*
                         * and replace it with the
                         * page we just copied into
                         */
-                       assert(copy_m->busy);
+                       assert(copy_m->vmp_busy);
                        vm_page_insert(copy_m, object, offset);
                        SET_PAGE_DIRTY(copy_m, TRUE);
 
@@ -2151,8 +2098,8 @@ dont_look_for_page:
                         * way, let's try to collapse the top object.
                         * But we have to play ugly games with
                         * paging_in_progress to do that...
-                        */     
-                       vm_object_paging_end(object); 
+                        */
+                       vm_object_paging_end(object);
                        vm_object_collapse(object, offset, TRUE);
                        vm_object_paging_begin(object);
 
@@ -2227,7 +2174,7 @@ dont_look_for_page:
                        /*
                         * Page currently exists in the copy object
                         */
-                       if (copy_m->busy) {
+                       if (copy_m->vmp_busy) {
                                /*
                                 * If the page is being brought
                                 * in, wait for it and then retry.
@@ -2249,13 +2196,8 @@ dont_look_for_page:
                                copy_object->ref_count--;
                                assert(copy_object->ref_count > 0);
                                copy_m = vm_page_lookup(copy_object, copy_offset);
-                               /*
-                                * ENCRYPTED SWAP:
-                                * it's OK if the "copy_m" page is encrypted,
-                                * because we're not moving it nor handling its
-                                * contents.
-                                */
-                               if (copy_m != VM_PAGE_NULL && copy_m->busy) {
+
+                               if (copy_m != VM_PAGE_NULL && copy_m->vmp_busy) {
                                        PAGE_ASSERT_WAIT(copy_m, interruptible);
 
                                        vm_object_unlock(copy_object);
@@ -2281,32 +2223,7 @@ dont_look_for_page:
                         * for example) or it hasn't been paged out.
                         * (VM_EXTERNAL_STATE_UNKNOWN||VM_EXTERNAL_STATE_ABSENT)
                         * We must copy the page to the copy object.
-                        */
-
-                       if (vm_backing_store_low) {
-                               /*
-                                * we are protecting the system from
-                                * backing store exhaustion.  If so
-                                * sleep unless we are privileged.
-                                */
-                               if (!(current_task()->priv_flags & VM_BACKING_STORE_PRIV)) {
-                                       assert_wait((event_t)&vm_backing_store_low, THREAD_UNINT);
-
-                                       RELEASE_PAGE(m);
-                                       VM_OBJ_RES_DECR(copy_object);
-                                       vm_object_lock_assert_exclusive(copy_object);
-                                       copy_object->ref_count--;
-                                       assert(copy_object->ref_count > 0);
-
-                                       vm_object_unlock(copy_object);
-                                       vm_fault_cleanup(object, first_m);
-                                       thread_block(THREAD_CONTINUE_NULL);
-                                       thread_interrupt_level(interruptible_state);
-
-                                       return (VM_FAULT_RETRY);
-                               }
-                       }
-                       /*
+                        *
                         * Allocate a page for the copy
                         */
                        copy_m = vm_page_alloc(copy_object, copy_offset);
@@ -2329,17 +2246,17 @@ dont_look_for_page:
                         * Must copy page into copy-object.
                         */
                        vm_page_copy(m, copy_m);
-                       
+
                        /*
                         * If the old page was in use by any users
                         * of the copy-object, it must be removed
                         * from all pmaps.  (We can't know which
                         * pmaps use it.)
                         */
-                       if (m->pmapped)
+                       if (m->vmp_pmapped)
                                pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
 
-                       if (m->clustered) {
+                       if (m->vmp_clustered) {
                                VM_PAGE_COUNT_AS_PAGEIN(m);
                                VM_PAGE_CONSUME_CLUSTERED(m);
                        }
@@ -2353,7 +2270,7 @@ dont_look_for_page:
                           ) {
 
                                vm_page_lockspin_queues();
-                               assert(!m->cleaning);
+                               assert(!m->vmp_cleaning);
                                vm_page_activate(copy_m);
                                vm_page_unlock_queues();
 
@@ -2361,9 +2278,9 @@ dont_look_for_page:
                                PAGE_WAKEUP_DONE(copy_m);
 
                        } else {
-                               
-                               assert(copy_m->busy == TRUE);
-                               assert(!m->cleaning);
+
+                               assert(copy_m->vmp_busy == TRUE);
+                               assert(!m->vmp_cleaning);
 
                                /*
                                 * dirty is protected by the object lock
@@ -2416,8 +2333,8 @@ dont_look_for_page:
                         * wait result].  Can't turn off the page's
                         * busy bit because we're not done with it.
                         */
-                       if (m->wanted) {
-                               m->wanted = FALSE;
+                       if (m->vmp_wanted) {
+                               m->vmp_wanted = FALSE;
                                thread_wakeup_with_result((event_t) m, THREAD_RESTART);
                        }
                }
@@ -2432,7 +2349,7 @@ dont_look_for_page:
                copy_object->ref_count--;
                assert(copy_object->ref_count > 0);
 
-               VM_OBJ_RES_DECR(copy_object);   
+               VM_OBJ_RES_DECR(copy_object);
                vm_object_unlock(copy_object);
 
                break;
@@ -2466,8 +2383,20 @@ done:
                         * state being up to date
                         */
                        vm_fault_is_sequential(object, offset, fault_info->behavior);
+                       vm_fault_deactivate_behind(object, offset, fault_info->behavior);
 
+               } else if (type_of_fault == NULL && my_fault == DBG_CACHE_HIT_FAULT) {
+                       /*
+                        * we weren't called from vm_fault, so handle the
+                        * accounting here for hits in the cache
+                        */
+                       if (m->vmp_clustered) {
+                               VM_PAGE_COUNT_AS_PAGEIN(m);
+                               VM_PAGE_CONSUME_CLUSTERED(m);
+                       }
+                       vm_fault_is_sequential(object, offset, fault_info->behavior);
                        vm_fault_deactivate_behind(object, offset, fault_info->behavior);
+
                } else if (my_fault == DBG_COMPRESSOR_FAULT || my_fault == DBG_COMPRESSOR_SWAPIN_FAULT) {
 
                        VM_STAT_INCR(decompressions);
@@ -2509,16 +2438,16 @@ backoff:
  */
 #define VM_FAULT_NEED_CS_VALIDATION(pmap, page, page_obj)              \
        ((pmap) != kernel_pmap /*1*/ &&                                 \
-        !(page)->cs_tainted /*2*/ &&                                   \
+        !(page)->vmp_cs_tainted /*2*/ &&                                       \
         (page_obj)->code_signed /*3*/ &&                                       \
-        (!(page)->cs_validated || (page)->wpmapped /*4*/))
+        (!(page)->vmp_cs_validated || (page)->vmp_wpmapped /*4*/))
 
 
 /*
  * page queue lock must NOT be held
- * m->object must be locked
+ * m->vmp_object must be locked
  *
- * NOTE: m->object could be locked "shared" only if we are called
+ * NOTE: m->vmp_object could be locked "shared" only if we are called
  * from vm_fault() as part of a soft fault.  If so, we must be
  * careful not to modify the VM object in any way that is not
  * legal under a shared lock...
@@ -2536,29 +2465,38 @@ vm_fault_enter(vm_page_t m,
               vm_prot_t caller_prot,
               boolean_t wired,
               boolean_t change_wiring,
-              boolean_t no_cache,
-              boolean_t cs_bypass,
-              __unused int      user_tag,
-              int       pmap_options,
+              vm_tag_t  wire_tag,
+              vm_object_fault_info_t fault_info,
               boolean_t *need_retry,
               int *type_of_fault)
 {
        kern_return_t   kr, pe_result;
-       boolean_t       previously_pmapped = m->pmapped;
+       boolean_t       previously_pmapped = m->vmp_pmapped;
        boolean_t       must_disconnect = 0;
        boolean_t       map_is_switched, map_is_switch_protected;
+       boolean_t       cs_violation;
        int             cs_enforcement_enabled;
        vm_prot_t       fault_type;
        vm_object_t     object;
-       
+       boolean_t       no_cache = fault_info->no_cache;
+       boolean_t       cs_bypass = fault_info->cs_bypass;
+       int             pmap_options = fault_info->pmap_options;
+
        fault_type = change_wiring ? VM_PROT_NONE : caller_prot;
        object = VM_PAGE_OBJECT(m);
 
        vm_object_lock_assert_held(object);
+
+#if KASAN
+       if (pmap == kernel_pmap) {
+               kasan_notify_address(vaddr, PAGE_SIZE);
+       }
+#endif
+
        LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
 
        if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
-               assert(m->fictitious);
+               assert(m->vmp_fictitious);
                return KERN_SUCCESS;
        }
 
@@ -2566,7 +2504,12 @@ vm_fault_enter(vm_page_t m,
 
                vm_object_lock_assert_exclusive(object);
 
-       } else if ((fault_type & VM_PROT_WRITE) == 0 && !m->wpmapped) {
+       } else if ((fault_type & VM_PROT_WRITE) == 0 &&
+                  (!m->vmp_wpmapped
+#if VM_OBJECT_ACCESS_TRACKING
+                   || object->access_tracking
+#endif /* VM_OBJECT_ACCESS_TRACKING */
+                          )) {
                /*
                 * This is not a "write" fault, so we
                 * might not have taken the object lock
@@ -2578,15 +2521,21 @@ vm_fault_enter(vm_page_t m,
                 * soft-fault again if we need write
                 * access later...
                 */
-               prot &= ~VM_PROT_WRITE;
+
+               /* This had better not be a JIT page. */
+               if (!pmap_has_prot_policy(prot)) {
+                       prot &= ~VM_PROT_WRITE;
+               } else {
+                       assert(cs_bypass);
+               }
        }
-       if (m->pmapped == FALSE) {
+       if (m->vmp_pmapped == FALSE) {
 
-               if (m->clustered) {
+               if (m->vmp_clustered) {
                        if (*type_of_fault == DBG_CACHE_HIT_FAULT) {
                                /*
                                 * found it in the cache, but this
-                                * is the first fault-in of the page (m->pmapped == FALSE)
+                                * is the first fault-in of the page (m->vmp_pmapped == FALSE)
                                 * so it must have come in as part of
                                 * a cluster... account 1 pagein against it
                                 */
@@ -2594,7 +2543,7 @@ vm_fault_enter(vm_page_t m,
                                        *type_of_fault = DBG_PAGEIND_FAULT;
                                else
                                        *type_of_fault = DBG_PAGEINV_FAULT;
-                               
+
                                VM_PAGE_COUNT_AS_PAGEIN(m);
                        }
                        VM_PAGE_CONSUME_CLUSTERED(m);
@@ -2610,39 +2559,62 @@ vm_fault_enter(vm_page_t m,
        }
 
        /* Validate code signature if necessary. */
-       if (VM_FAULT_NEED_CS_VALIDATION(pmap, m, object)) {
+       if (!cs_bypass &&
+           VM_FAULT_NEED_CS_VALIDATION(pmap, m, object)) {
                vm_object_lock_assert_exclusive(object);
 
-               if (m->cs_validated) {
+               if (m->vmp_cs_validated) {
                        vm_cs_revalidates++;
                }
 
-               /* VM map is locked, so 1 ref will remain on VM object - 
+               /* VM map is locked, so 1 ref will remain on VM object -
                 * so no harm if vm_page_validate_cs drops the object lock */
+
+#if PMAP_CS
+               if (fault_info->pmap_cs_associated &&
+                   pmap_cs_enforced(pmap) &&
+                   !m->vmp_cs_validated &&
+                   !m->vmp_cs_tainted &&
+                   !m->vmp_cs_nx &&
+                   (prot & VM_PROT_EXECUTE) &&
+                   (caller_prot & VM_PROT_EXECUTE)) {
+                       /*
+                        * With pmap_cs, the pmap layer will validate the
+                        * code signature for any executable pmap mapping.
+                        * No need for us to validate this page too:
+                        * in pmap_cs we trust...
+                        */
+                       vm_cs_defer_to_pmap_cs++;
+               } else {
+                       vm_cs_defer_to_pmap_cs_not++;
+                       vm_page_validate_cs(m);
+               }
+#else /* PMAP_CS */
                vm_page_validate_cs(m);
+#endif /* PMAP_CS */
        }
 
-#define page_immutable(m,prot) ((m)->cs_validated /*&& ((prot) & VM_PROT_EXECUTE)*/)
-#define page_nx(m) ((m)->cs_nx)
+#define page_immutable(m,prot) ((m)->vmp_cs_validated /*&& ((prot) & VM_PROT_EXECUTE)*/)
+#define page_nx(m) ((m)->vmp_cs_nx)
 
        map_is_switched = ((pmap != vm_map_pmap(current_task()->map)) &&
                           (pmap == vm_map_pmap(current_thread()->map)));
        map_is_switch_protected = current_thread()->map->switch_protect;
-       
+
        /* If the map is switched, and is switch-protected, we must protect
-        * some pages from being write-faulted: immutable pages because by 
+        * some pages from being write-faulted: immutable pages because by
         * definition they may not be written, and executable pages because that
         * would provide a way to inject unsigned code.
         * If the page is immutable, we can simply return. However, we can't
         * immediately determine whether a page is executable anywhere. But,
         * we can disconnect it everywhere and remove the executable protection
-        * from the current map. We do that below right before we do the 
+        * from the current map. We do that below right before we do the
         * PMAP_ENTER.
         */
-       cs_enforcement_enabled = cs_enforcement(NULL);
+       cs_enforcement_enabled = cs_process_enforcement(NULL);
 
-       if(cs_enforcement_enabled && map_is_switched && 
-          map_is_switch_protected && page_immutable(m, prot) && 
+       if(cs_enforcement_enabled && map_is_switched &&
+          map_is_switch_protected && page_immutable(m, prot) &&
           (prot & VM_PROT_WRITE))
        {
                return KERN_CODESIGN_ERROR;
@@ -2654,59 +2626,62 @@ vm_fault_enter(vm_page_t m,
                return KERN_CODESIGN_ERROR;
        }
 
-       if (cs_enforcement_enabled &&
-           !m->cs_validated &&
-           (prot & VM_PROT_EXECUTE) &&
-           !(caller_prot & VM_PROT_EXECUTE)) {
-               /*
-                * FOURK PAGER:
-                * This page has not been validated and will not be
-                * allowed to be mapped for "execute".
-                * But the caller did not request "execute" access for this
-                * fault, so we should not raise a code-signing violation
-                * (and possibly kill the process) below.
-                * Instead, let's just remove the "execute" access request.
-                * 
-                * This can happen on devices with a 4K page size if a 16K
-                * page contains a mix of signed&executable and
-                * unsigned&non-executable 4K pages, making the whole 16K
-                * mapping "executable".
-                */
-               prot &= ~VM_PROT_EXECUTE;
-       }
-
        /* A page could be tainted, or pose a risk of being tainted later.
         * Check whether the receiving process wants it, and make it feel
         * the consequences (that hapens in cs_invalid_page()).
-        * For CS Enforcement, two other conditions will 
-        * cause that page to be tainted as well: 
+        * For CS Enforcement, two other conditions will
+        * cause that page to be tainted as well:
         * - pmapping an unsigned page executable - this means unsigned code;
         * - writeable mapping of a validated page - the content of that page
         *   can be changed without the kernel noticing, therefore unsigned
         *   code can be created
         */
-       if (!cs_bypass &&
-           (m->cs_tainted ||
-            (cs_enforcement_enabled &&
-             (/* The page is unsigned and wants to be executable */
-              (!m->cs_validated && (prot & VM_PROT_EXECUTE))  ||
-              /* The page should be immutable, but is in danger of being modified
-               * This is the case where we want policy from the code directory -
-               * is the page immutable or not? For now we have to assume that 
-               * code pages will be immutable, data pages not.
-               * We'll assume a page is a code page if it has a code directory 
-               * and we fault for execution.
-               * That is good enough since if we faulted the code page for
-               * writing in another map before, it is wpmapped; if we fault
-               * it for writing in this map later it will also be faulted for executing 
-               * at the same time; and if we fault for writing in another map
-               * later, we will disconnect it from this pmap so we'll notice
-               * the change.
-               */
-             (page_immutable(m, prot) && ((prot & VM_PROT_WRITE) || m->wpmapped))
-             ))
-                   )) 
-       {
+       if (cs_bypass) {
+               /* code-signing is bypassed */
+               cs_violation = FALSE;
+       } else if (m->vmp_cs_tainted) {
+               /* tainted page */
+               cs_violation = TRUE;
+       } else if (!cs_enforcement_enabled) {
+               /* no further code-signing enforcement */
+               cs_violation = FALSE;
+       } else if (page_immutable(m, prot) &&
+                  ((prot & VM_PROT_WRITE) ||
+                   m->vmp_wpmapped)) {
+               /*
+                * The page should be immutable, but is in danger of being
+                * modified.
+                * This is the case where we want policy from the code
+                * directory - is the page immutable or not? For now we have
+                * to assume that code pages will be immutable, data pages not.
+                * We'll assume a page is a code page if it has a code directory
+                * and we fault for execution.
+                * That is good enough since if we faulted the code page for
+                * writing in another map before, it is wpmapped; if we fault
+                * it for writing in this map later it will also be faulted for
+                * executing at the same time; and if we fault for writing in
+                * another map later, we will disconnect it from this pmap so
+                * we'll notice the change.
+                */
+               cs_violation = TRUE;
+       } else if (!m->vmp_cs_validated &&
+                  (prot & VM_PROT_EXECUTE)
+#if PMAP_CS
+                  /*
+                   * Executable pages will be validated by pmap_cs;
+                   * in pmap_cs we trust...
+                   * If pmap_cs is turned off, this is a code-signing
+                   * violation.
+                   */
+                  && ! (pmap_cs_enforced(pmap))
+#endif /* PMAP_CS */
+               ) {
+               cs_violation = TRUE;
+       } else {
+               cs_violation = FALSE;
+       }
+
+       if (cs_violation) {
                /* We will have a tainted page. Have to handle the special case
                 * of a switched map now. If the map is not switched, standard
                 * procedure applies - call cs_invalid_page().
@@ -2715,22 +2690,21 @@ vm_fault_enter(vm_page_t m,
                 * it will not be executing from the map. So we don't call
                 * cs_invalid_page() in that case. */
                boolean_t reject_page, cs_killed;
-               if(map_is_switched) { 
+               if(map_is_switched) {
                        assert(pmap==vm_map_pmap(current_thread()->map));
                        assert(!(prot & VM_PROT_WRITE) || (map_is_switch_protected == FALSE));
                        reject_page = FALSE;
                } else {
                        if (cs_debug > 5)
-                               printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s slid: %s prot: 0x%x\n", 
+                               printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s prot: 0x%x\n",
                                       object->code_signed ? "yes" : "no",
-                                      m->cs_validated ? "yes" : "no",
-                                      m->cs_tainted ? "yes" : "no",
-                                      m->wpmapped ? "yes" : "no",
-                                      m->slid ? "yes" : "no",
+                                      m->vmp_cs_validated ? "yes" : "no",
+                                      m->vmp_cs_tainted ? "yes" : "no",
+                                      m->vmp_wpmapped ? "yes" : "no",
                                       (int)prot);
                        reject_page = cs_invalid_page((addr64_t) vaddr, &cs_killed);
                }
-               
+
                if (reject_page) {
                        /* reject the invalid page: abort the page fault */
                        int                     pid;
@@ -2758,7 +2732,7 @@ vm_fault_enter(vm_page_t m,
 
                        /* get file's VM object */
                        file_object = object;
-                       file_offset = m->offset;
+                       file_offset = m->vmp_offset;
                        for (shadow = file_object->shadow,
                                     shadow_depth = 0;
                             shadow != VM_OBJECT_NULL;
@@ -2812,7 +2786,7 @@ vm_fault_enter(vm_page_t m,
                               "from offset 0x%llx in file \"%s%s%s\" "
                               "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
                               "(signed:%d validated:%d tainted:%d nx:%d "
-                              "wpmapped:%d slid:%d dirty:%d depth:%d)\n",
+                              "wpmapped:%d dirty:%d depth:%d)\n",
                               pid, procname, (addr64_t) vaddr,
                               file_offset,
                               (pathname ? pathname : "<nil>"),
@@ -2825,12 +2799,11 @@ vm_fault_enter(vm_page_t m,
                                : "!="),
                               mtime.tv_sec, mtime.tv_nsec,
                               object->code_signed,
-                              m->cs_validated,
-                              m->cs_tainted,
-                              m->cs_nx,
-                              m->wpmapped,
-                              m->slid,
-                              m->dirty,
+                              m->vmp_cs_validated,
+                              m->vmp_cs_tainted,
+                              m->vmp_cs_nx,
+                              m->vmp_wpmapped,
+                              m->vmp_dirty,
                               shadow_depth);
 
                        /*
@@ -2875,12 +2848,12 @@ vm_fault_enter(vm_page_t m,
                                                        ceri->ceri_page_modtime_secs = mtime.tv_sec;
                                                        ceri->ceri_page_modtime_nsecs = mtime.tv_nsec;
                                                        ceri->ceri_object_codesigned = (object->code_signed);
-                                                       ceri->ceri_page_codesig_validated = (m->cs_validated);
-                                                       ceri->ceri_page_codesig_tainted = (m->cs_tainted);
-                                                       ceri->ceri_page_codesig_nx = (m->cs_nx);
-                                                       ceri->ceri_page_wpmapped = (m->wpmapped);
-                                                       ceri->ceri_page_slid = (m->slid);
-                                                       ceri->ceri_page_dirty = (m->dirty);
+                                                       ceri->ceri_page_codesig_validated = (m->vmp_cs_validated);
+                                                       ceri->ceri_page_codesig_tainted = (m->vmp_cs_tainted);
+                                                       ceri->ceri_page_codesig_nx = (m->vmp_cs_nx);
+                                                       ceri->ceri_page_wpmapped = (m->vmp_wpmapped);
+                                                       ceri->ceri_page_slid = 0;
+                                                       ceri->ceri_page_dirty = (m->vmp_dirty);
                                                        ceri->ceri_page_shadow_depth = shadow_depth;
                                                } else {
 #if DEBUG || DEVELOPMENT
@@ -2897,13 +2870,13 @@ vm_fault_enter(vm_page_t m,
                                set_thread_exit_reason(current_thread(), codesigning_exit_reason, FALSE);
                        }
                        if (panic_on_cs_killed &&
-                           object->object_slid) {
+                           object->object_is_shared_cache) {
                                panic("CODE SIGNING: process %d[%s]: "
                                      "rejecting invalid page at address 0x%llx "
                                      "from offset 0x%llx in file \"%s%s%s\" "
                                      "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
                                      "(signed:%d validated:%d tainted:%d nx:%d"
-                                     "wpmapped:%d slid:%d dirty:%d depth:%d)\n",
+                                     "wpmapped:%d dirty:%d depth:%d)\n",
                                      pid, procname, (addr64_t) vaddr,
                                      file_offset,
                                      (pathname ? pathname : "<nil>"),
@@ -2916,12 +2889,11 @@ vm_fault_enter(vm_page_t m,
                                       : "!="),
                                      mtime.tv_sec, mtime.tv_nsec,
                                      object->code_signed,
-                                     m->cs_validated,
-                                     m->cs_tainted,
-                                     m->cs_nx,
-                                     m->wpmapped,
-                                     m->slid,
-                                     m->dirty,
+                                     m->vmp_cs_validated,
+                                     m->vmp_cs_tainted,
+                                     m->vmp_cs_nx,
+                                     m->vmp_wpmapped,
+                                     m->vmp_dirty,
                                      shadow_depth);
                        }
 
@@ -2936,7 +2908,7 @@ vm_fault_enter(vm_page_t m,
                } else {
                        /* proceed with the invalid page */
                        kr = KERN_SUCCESS;
-                       if (!m->cs_validated &&
+                       if (!m->vmp_cs_validated &&
                            !object->code_signed) {
                                /*
                                 * This page has not been (fully) validated but
@@ -2965,8 +2937,8 @@ vm_fault_enter(vm_page_t m,
                                 * through that code path for re-consideration
                                 * of the validity of that page.
                                 */
-                               must_disconnect = !m->cs_tainted;
-                               m->cs_tainted = TRUE;
+                               must_disconnect = !m->vmp_cs_tainted;
+                               m->vmp_cs_tainted = TRUE;
                        }
                        cs_enter_tainted_accepted++;
                }
@@ -2982,7 +2954,7 @@ vm_fault_enter(vm_page_t m,
                        }
 #endif
                }
-               
+
        } else {
                /* proceed with the valid page */
                kr = KERN_SUCCESS;
@@ -3009,12 +2981,12 @@ MACRO_END
         * the page queues.  Change wiring
         * case is obvious.
         */
-       assert((m->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) || object != compressor_object);
+       assert((m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) || object != compressor_object);
 
 #if CONFIG_BACKGROUND_QUEUE
        vm_page_update_background_state(m);
 #endif
-       if (m->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
+       if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
                /*
                 * Compressor pages are neither wired
                 * nor pageable and should never change.
@@ -3025,7 +2997,7 @@ MACRO_END
 
                if (wired) {
                        if (kr == KERN_SUCCESS) {
-                               vm_page_wire(m, VM_PROT_MEMORY_TAG(caller_prot), TRUE);
+                               vm_page_wire(m, wire_tag, TRUE);
                        }
                } else {
                        vm_page_unwire(m, TRUE);
@@ -3044,19 +3016,19 @@ MACRO_END
                        __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
                        vm_page_deactivate(m);
                        /* we keep the page queues lock, if we need it later */
-               } else if (((m->vm_page_q_state == VM_PAGE_NOT_ON_Q) ||
-                           (m->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q) ||
-                           (m->vm_page_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
-                           ((m->vm_page_q_state != VM_PAGE_ON_THROTTLED_Q) && no_cache)) &&
+               } else if (((m->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
+                           (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ||
+                           (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
+                           ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && no_cache)) &&
                           !VM_PAGE_WIRED(m)) {
 
-                       if (vm_page_local_q && 
+                       if (vm_page_local_q &&
                            (*type_of_fault == DBG_COW_FAULT ||
                             *type_of_fault == DBG_ZERO_FILL_FAULT) ) {
                                struct vpl      *lq;
                                uint32_t        lid;
 
-                               assert(m->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+                               assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
 
                                __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
                                vm_object_lock_assert_exclusive(object);
@@ -3083,11 +3055,11 @@ MACRO_END
 
                                vm_page_check_pageable_safe(m);
                                vm_page_queue_enter(&lq->vpl_queue, m,
-                                                   vm_page_t, pageq);
-                               m->vm_page_q_state = VM_PAGE_ON_ACTIVE_LOCAL_Q;
-                               m->local_id = lid;
+                                                   vm_page_t, vmp_pageq);
+                               m->vmp_q_state = VM_PAGE_ON_ACTIVE_LOCAL_Q;
+                               m->vmp_local_id = lid;
                                lq->vpl_count++;
-                                       
+
                                if (object->internal)
                                        lq->vpl_internal_count++;
                                else
@@ -3124,11 +3096,11 @@ MACRO_END
                                 * page queue lock
                                 */
                                if (!VM_PAGE_WIRED(m)) {
-                                       if (m->vm_page_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
+                                       if (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
                                                vm_page_queues_remove(m, FALSE);
 
-                                               vm_pageout_cleaned_reactivated++;
-                                               vm_pageout_cleaned_fault_reactivated++;
+                                               VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
+                                               VM_PAGEOUT_DEBUG(vm_pageout_cleaned_fault_reactivated, 1);
                                        }
 
                                        if ( !VM_PAGE_ACTIVE_OR_INACTIVE(m) ||
@@ -3143,15 +3115,15 @@ MACRO_END
                                                 * that they can be readily
                                                 * recycled if free memory runs
                                                 * low.  Otherwise the page is
-                                                * activated as normal. 
+                                                * activated as normal.
                                                 */
 
                                                if (no_cache &&
                                                    (!previously_pmapped ||
-                                                    m->no_cache)) {
-                                                       m->no_cache = TRUE;
+                                                    m->vmp_no_cache)) {
+                                                       m->vmp_no_cache = TRUE;
 
-                                                       if (m->vm_page_q_state != VM_PAGE_ON_SPECULATIVE_Q) 
+                                                       if (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q)
                                                                vm_page_speculate(m, FALSE);
 
                                                } else if ( !VM_PAGE_ACTIVE_OR_INACTIVE(m)) {
@@ -3176,11 +3148,11 @@ MACRO_END
        if (kr == KERN_SUCCESS) {
                /*
                 * NOTE: we may only hold the vm_object lock SHARED
-                * at this point, so we need the phys_page lock to 
+                * at this point, so we need the phys_page lock to
                 * properly serialize updating the pmapped and
                 * xpmapped bits
                 */
-               if ((prot & VM_PROT_EXECUTE) && !m->xpmapped) {
+               if ((prot & VM_PROT_EXECUTE) && !m->vmp_xpmapped) {
                        ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
 
                        pmap_lock_phys_page(phys_page);
@@ -3190,17 +3162,20 @@ MACRO_END
                         * need to grab this lock a 2nd time
                         * just below
                         */
-                       m->pmapped = TRUE;
-                       
-                       if (!m->xpmapped) {
+                       m->vmp_pmapped = TRUE;
 
-                               m->xpmapped = TRUE;
+                       if (!m->vmp_xpmapped) {
+
+                               m->vmp_xpmapped = TRUE;
 
                                pmap_unlock_phys_page(phys_page);
 
                                if (!object->internal)
                                        OSAddAtomic(1, &vm_page_xpmapped_external_count);
 
+#if defined(__arm__) || defined(__arm64__)
+                               pmap_sync_page_data_phys(phys_page);
+#else
                                if (object->internal &&
                                    object->pager != NULL) {
                                        /*
@@ -3217,71 +3192,79 @@ MACRO_END
                                        assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
                                        pmap_sync_page_data_phys(phys_page);
                                }
+#endif
                        } else
                                pmap_unlock_phys_page(phys_page);
                } else {
-                       if (m->pmapped == FALSE) {
+                       if (m->vmp_pmapped == FALSE) {
                                ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
 
                                pmap_lock_phys_page(phys_page);
-                               m->pmapped = TRUE;
+                               m->vmp_pmapped = TRUE;
                                pmap_unlock_phys_page(phys_page);
                        }
                }
-               if (vm_page_is_slideable(m)) {
-                       boolean_t was_busy = m->busy;
-
-                       vm_object_lock_assert_exclusive(object);
-
-                       m->busy = TRUE;
-                       kr = vm_page_slide(m, 0);
-                       assert(m->busy);
-                       if(!was_busy) {
-                               PAGE_WAKEUP_DONE(m);
-                       }
-                       if (kr != KERN_SUCCESS) {
-                               /*
-                                * This page has not been slid correctly,
-                                * do not do the pmap_enter() !
-                                * Let vm_fault_enter() return the error
-                                * so the caller can fail the fault.
-                                */
-                               goto after_the_pmap_enter;
-                       }
-               }
 
                if (fault_type & VM_PROT_WRITE) {
 
-                       if (m->wpmapped == FALSE) {
+                       if (m->vmp_wpmapped == FALSE) {
                                vm_object_lock_assert_exclusive(object);
                                if (!object->internal && object->pager) {
                                        task_update_logical_writes(current_task(), PAGE_SIZE, TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager));
                                }
-                               m->wpmapped = TRUE;
+                               m->vmp_wpmapped = TRUE;
                        }
                        if (must_disconnect) {
                                /*
-                                * We can only get here 
+                                * We can only get here
                                 * because of the CSE logic
                                 */
                                assert(cs_enforcement_enabled);
                                pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
-                               /* 
+                               /*
                                 * If we are faulting for a write, we can clear
                                 * the execute bit - that will ensure the page is
                                 * checked again before being executable, which
                                 * protects against a map switch.
                                 * This only happens the first time the page
-                                * gets tainted, so we won't get stuck here 
+                                * gets tainted, so we won't get stuck here
                                 * to make an already writeable page executable.
                                 */
                                if (!cs_bypass){
+                                       assert(!pmap_has_prot_policy(prot));
                                        prot &= ~VM_PROT_EXECUTE;
                                }
                        }
                }
                assert(VM_PAGE_OBJECT(m) == object);
 
+#if VM_OBJECT_ACCESS_TRACKING
+               if (object->access_tracking) {
+                       DTRACE_VM2(access_tracking, vm_map_offset_t, vaddr, int, fault_type);
+                       if (fault_type & VM_PROT_WRITE) {
+                               object->access_tracking_writes++;
+                               vm_object_access_tracking_writes++;
+                       } else {
+                               object->access_tracking_reads++;
+                               vm_object_access_tracking_reads++;
+                       }
+               }
+#endif /* VM_OBJECT_ACCESS_TRACKING */
+
+#if PMAP_CS
+               /*
+                * If CS enforcement is on, we don't ask for an executable page if the
+                * fault does not call for execution, because that can fail in
+                * situations where the caller only actually wanted read access.
+                * However, it may be better to instead retry without execute on
+                * failure, or pass a flag into pmap_enter to do the right thing.
+                */
+               // TODO: <rdar://problem/30997388> maybe do something better than masking out VM_PROT_EXECUTE on non-execute faults
+               if (pmap_cs_enforced(pmap) && !(caller_prot & VM_PROT_EXECUTE)) {
+                       prot &= ~VM_PROT_EXECUTE;
+               }
+#endif
+
                /* Prevent a deadlock by not
                 * holding the object lock if we need to wait for a page in
                 * pmap_enter() - <rdar://problem/7138958> */
@@ -3289,6 +3272,19 @@ MACRO_END
                                   wired,
                                   pmap_options | PMAP_OPTIONS_NOWAIT,
                                   pe_result);
+#if __x86_64__
+               if (pe_result == KERN_INVALID_ARGUMENT &&
+                   pmap == PMAP_NULL &&
+                   wired) {
+                       /*
+                        * Wiring a page in a pmap-less VM map:
+                        * VMware's "vmmon" kernel extension does this
+                        * to grab pages.
+                        * Let it proceed even though the PMAP_ENTER() failed.
+                        */
+                       pe_result = KERN_SUCCESS;
+               }
+#endif /* __x86_64__ */
 
                if(pe_result == KERN_RESOURCE_SHORTAGE) {
 
@@ -3298,10 +3294,10 @@ MACRO_END
                                 * on the top-object in this chain... we can't just drop
                                 * the lock on the object we're inserting the page into
                                 * and recall the PMAP_ENTER since we can still cause
-                                * a deadlock if one of the critical paths tries to 
+                                * a deadlock if one of the critical paths tries to
                                 * acquire the lock on the top-object and we're blocked
                                 * in PMAP_ENTER waiting for memory... our only recourse
-                                * is to deal with it at a higher level where we can 
+                                * is to deal with it at a higher level where we can
                                 * drop both locks.
                                 */
                                *need_retry = TRUE;
@@ -3310,33 +3306,35 @@ MACRO_END
                        }
                        /* The nonblocking version of pmap_enter did not succeed.
                         * and we don't need to drop other locks and retry
-                        * at the level above us, so 
+                        * at the level above us, so
                         * use the blocking version instead. Requires marking
                         * the page busy and unlocking the object */
-                       boolean_t was_busy = m->busy;
+                       boolean_t was_busy = m->vmp_busy;
 
                        vm_object_lock_assert_exclusive(object);
 
-                       m->busy = TRUE;
+                       m->vmp_busy = TRUE;
                        vm_object_unlock(object);
-                       
+
                        PMAP_ENTER_OPTIONS(pmap, vaddr, m, prot, fault_type,
                                           0, wired,
                                           pmap_options, pe_result);
-                               
+
                        assert(VM_PAGE_OBJECT(m) == object);
 
                        /* Take the object lock again. */
                        vm_object_lock(object);
-                       
+
                        /* If the page was busy, someone else will wake it up.
                         * Otherwise, we have to do it now. */
-                       assert(m->busy);
+                       assert(m->vmp_busy);
                        if(!was_busy) {
                                PAGE_WAKEUP_DONE(m);
                        }
                        vm_pmap_enter_blocked++;
                }
+
+               kr = pe_result;
        }
 
 after_the_pmap_enter:
@@ -3348,13 +3346,14 @@ vm_pre_fault(vm_map_offset_t vaddr)
 {
        if (pmap_find_phys(current_map()->pmap, vaddr) == 0) {
 
-               vm_fault(current_map(), /* map */
-                       vaddr,          /* vaddr */
-                       VM_PROT_READ, /* fault_type */
-                       FALSE, /* change_wiring */
-                       THREAD_UNINT, /* interruptible */
-                       NULL, /* caller_pmap */
-                       0 /* caller_pmap_addr */);
+               vm_fault(current_map(),      /* map */
+                       vaddr,               /* vaddr */
+                       VM_PROT_READ,        /* fault_type */
+                       FALSE,               /* change_wiring */
+                       VM_KERN_MEMORY_NONE, /* tag - not wiring */
+                       THREAD_UNINT,        /* interruptible */
+                       NULL,                /* caller_pmap */
+                       0                    /* caller_pmap_addr */);
        }
 }
 
@@ -3381,7 +3380,7 @@ unsigned long vm_fault_collapse_skipped = 0;
 
 
 kern_return_t
-vm_fault(
+vm_fault_external(
        vm_map_t        map,
        vm_map_offset_t vaddr,
        vm_prot_t       fault_type,
@@ -3390,11 +3389,26 @@ vm_fault(
        pmap_t          caller_pmap,
        vm_map_offset_t caller_pmap_addr)
 {
-       return vm_fault_internal(map, vaddr, fault_type, change_wiring,
+       return vm_fault_internal(map, vaddr, fault_type, change_wiring, vm_tag_bt(),
                                 interruptible, caller_pmap, caller_pmap_addr,
                                 NULL);
 }
 
+kern_return_t
+vm_fault(
+       vm_map_t        map,
+       vm_map_offset_t vaddr,
+       vm_prot_t       fault_type,
+       boolean_t       change_wiring,
+       vm_tag_t        wire_tag,               /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
+       int             interruptible,
+       pmap_t          caller_pmap,
+       vm_map_offset_t caller_pmap_addr)
+{
+       return vm_fault_internal(map, vaddr, fault_type, change_wiring, wire_tag,
+                                interruptible, caller_pmap, caller_pmap_addr,
+                                NULL);
+}
 
 kern_return_t
 vm_fault_internal(
@@ -3402,6 +3416,7 @@ vm_fault_internal(
        vm_map_offset_t vaddr,
        vm_prot_t       caller_prot,
        boolean_t       change_wiring,
+       vm_tag_t        wire_tag,               /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
        int             interruptible,
        pmap_t          caller_pmap,
        vm_map_offset_t caller_pmap_addr,
@@ -3426,18 +3441,22 @@ vm_fault_internal(
        vm_object_t             new_object;
        int                     type_of_fault;
        pmap_t                  pmap;
-       boolean_t               interruptible_state;
+       wait_interrupt_t        interruptible_state;
        vm_map_t                real_map = map;
        vm_map_t                original_map = map;
+       boolean_t               object_locks_dropped = FALSE;
        vm_prot_t               fault_type;
        vm_prot_t               original_fault_type;
-       struct vm_object_fault_info fault_info;
+       struct vm_object_fault_info fault_info = {};
        boolean_t               need_collapse = FALSE;
        boolean_t               need_retry = FALSE;
        boolean_t               *need_retry_ptr = NULL;
        int                     object_lock_type = 0;
        int                     cur_object_lock_type;
        vm_object_t             top_object = VM_OBJECT_NULL;
+       vm_object_t             written_on_object = VM_OBJECT_NULL;
+       memory_object_t         written_on_pager = NULL;
+       vm_object_offset_t      written_on_offset = 0;
        int                     throttle_delay;
        int                     compressed_count_delta;
        int                     grab_options;
@@ -3452,13 +3471,13 @@ vm_fault_internal(
        vaddr = vm_map_trunc_page(vaddr, PAGE_MASK);
 
        if (map == kernel_map) {
-               trace_vaddr = VM_KERNEL_UNSLIDE_OR_PERM(vaddr);
-               trace_real_vaddr = VM_KERNEL_UNSLIDE_OR_PERM(trace_real_vaddr);
+               trace_vaddr = VM_KERNEL_ADDRHIDE(vaddr);
+               trace_real_vaddr = VM_KERNEL_ADDRHIDE(trace_real_vaddr);
        } else {
                trace_vaddr = vaddr;
        }
 
-       KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
+       KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
                      (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_START,
                              ((uint64_t)trace_vaddr >> 32),
                              trace_vaddr,
@@ -3467,7 +3486,7 @@ vm_fault_internal(
                              0);
 
        if (get_preemption_level() != 0) {
-               KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
+               KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
                                      (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
                                      ((uint64_t)trace_vaddr >> 32),
                                      trace_vaddr,
@@ -3477,7 +3496,15 @@ vm_fault_internal(
 
                return (KERN_FAILURE);
        }
-       
+
+       thread_t cthread = current_thread();
+       boolean_t rtfault = (cthread->sched_mode == TH_MODE_REALTIME);
+       uint64_t fstart = 0;
+
+       if (rtfault) {
+               fstart = mach_continuous_time();
+       }
+
        interruptible_state = thread_interrupt_level(interruptible);
 
        fault_type = (change_wiring ? VM_PROT_NONE : caller_prot);
@@ -3493,7 +3520,17 @@ vm_fault_internal(
 
        cur_object_lock_type = OBJECT_LOCK_SHARED;
 
+       if ((map == kernel_map) && (caller_prot & VM_PROT_WRITE)) {
+               if (compressor_map) {
+                       if ((vaddr >= vm_map_min(compressor_map)) && (vaddr < vm_map_max(compressor_map))) {
+                               panic("Write fault on compressor map, va: %p type: %u bounds: %p->%p", (void *) vaddr, caller_prot, (void *) vm_map_min(compressor_map), (void *) vm_map_max(compressor_map));
+
+                       }
+               }
+       }
 RetryFault:
+       assert(written_on_object == VM_OBJECT_NULL);
+
        /*
         * assume we will hit a page in the cache
         * otherwise, explicitly override with
@@ -3515,7 +3552,6 @@ RetryFault:
                                  &fault_info,
                                  &real_map);
 
-
        if (kr != KERN_SUCCESS) {
                vm_map_unlock_read(map);
                goto done;
@@ -3589,6 +3625,24 @@ RetryFault:
         *
         */
 
+#if defined(__arm64__)
+       /*
+        * Fail if reading an execute-only page in a
+        * pmap that enforces execute-only protection.
+        */
+       if (fault_type == VM_PROT_READ &&
+               (prot & VM_PROT_EXECUTE) &&
+               !(prot & VM_PROT_READ) &&
+               pmap_enforces_execute_only(pmap)) {
+                       vm_object_unlock(object);
+                       vm_map_unlock_read(map);
+                       if (real_map != map) {
+                               vm_map_unlock(real_map);
+                       }
+                       kr = KERN_PROTECTION_FAILURE;
+                       goto done;
+       }
+#endif
 
        /*
         * If this page is to be inserted in a copy delay object
@@ -3628,7 +3682,7 @@ RetryFault:
                if (m != VM_PAGE_NULL) {
                        m_object = cur_object;
 
-                       if (m->busy) {
+                       if (m->vmp_busy) {
                                wait_result_t   result;
 
                                /*
@@ -3677,9 +3731,9 @@ RetryFault:
                                                continue;
                                        }
                                }
-                               if ((m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q) && m_object->internal) {
+                               if ((m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) && m_object->internal) {
                                        /*
-                                        * m->busy == TRUE and the object is locked exclusively
+                                        * m->vmp_busy == TRUE and the object is locked exclusively
                                         * if m->pageout_queue == TRUE after we acquire the
                                         * queues lock, we are guaranteed that it is stable on
                                         * the pageout queue and therefore reclaimable
@@ -3691,7 +3745,7 @@ RetryFault:
 
                                        vm_page_lock_queues();
 
-                                       if (m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q) {
+                                       if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
                                                vm_pageout_throttle_up(m);
                                                vm_page_unlock_queues();
 
@@ -3723,7 +3777,7 @@ RetryFault:
                                goto done;
                        }
 reclaimed_from_pageout:
-                       if (m->laundry) {
+                       if (m->vmp_laundry) {
                                if (object != cur_object) {
                                        if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
                                                cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
@@ -3766,7 +3820,7 @@ reclaimed_from_pageout:
                                 */
                                break;
                        }
-                       if (m->unusual && (m->error || m->restart || m->private || m->absent)) {
+                       if (m->vmp_unusual && (m->vmp_error || m->vmp_restart || m->vmp_private || m->vmp_absent)) {
                                /*
                                 * Unusual case... let the slow path deal with it
                                 */
@@ -3782,42 +3836,30 @@ reclaimed_from_pageout:
                                kr = KERN_MEMORY_ERROR;
                                goto done;
                        }
+                       assert(m_object == VM_PAGE_OBJECT(m));
 
-                       if (m->encrypted) {
+                       if (VM_FAULT_NEED_CS_VALIDATION(map->pmap, m, m_object) ||
+                           (physpage_p != NULL && (prot & VM_PROT_WRITE))) {
+upgrade_for_validation:
                                /*
-                                * ENCRYPTED SWAP:
-                                * We've soft-faulted (because it's not in the page
-                                * table) on an encrypted page.
-                                * Keep the page "busy" so that no one messes with
-                                * it during the decryption.
-                                * Release the extra locks we're holding, keep only
-                                * the page's VM object lock.
-                                *
-                                * in order to set 'busy' on 'm', we must
-                                * have object that 'm' belongs to locked exclusively
+                                * We might need to validate this page
+                                * against its code signature, so we
+                                * want to hold the VM object exclusively.
                                 */
                                if (object != cur_object) {
-                                       vm_object_unlock(object);
-
                                        if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
+                                               vm_object_unlock(object);
+                                               vm_object_unlock(cur_object);
 
                                                cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
 
-                                               if (vm_object_lock_upgrade(cur_object) == FALSE) {
-                                                       /*
-                                                        * couldn't upgrade so go do a full retry
-                                                        * immediately since we've already dropped
-                                                        * the top object lock associated with this page
-                                                        * and the current one got dropped due to the
-                                                        * failed upgrade... the state is no longer valid
-                                                        */
-                                                       vm_map_unlock_read(map);
-                                                       if (real_map != map)
-                                                               vm_map_unlock(real_map);
+                                               vm_map_unlock_read(map);
+                                               if (real_map != map)
+                                                       vm_map_unlock(real_map);
 
-                                                       goto RetryFault;
-                                               }
+                                               goto RetryFault;
                                        }
+
                                } else if (object_lock_type == OBJECT_LOCK_SHARED) {
 
                                        object_lock_type = OBJECT_LOCK_EXCLUSIVE;
@@ -3837,111 +3879,34 @@ reclaimed_from_pageout:
                                                continue;
                                        }
                                }
-                               m->busy = TRUE;
+                       }
+                       /*
+                        *      Two cases of map in faults:
+                        *          - At top level w/o copy object.
+                        *          - Read fault anywhere.
+                        *              --> must disallow write.
+                        */
 
-                               vm_map_unlock_read(map);
-                               if (real_map != map) 
-                                       vm_map_unlock(real_map);
+                       if (object == cur_object && object->copy == VM_OBJECT_NULL) {
 
-                               vm_page_decrypt(m, 0);
-
-                               assert(m->busy);
-                               PAGE_WAKEUP_DONE(m);
-
-                               vm_object_unlock(cur_object);
-                               /*
-                                * Retry from the top, in case anything
-                                * changed while we were decrypting...
-                                */
-                               goto RetryFault;
-                       }
-                       ASSERT_PAGE_DECRYPTED(m);
-
-                       if(vm_page_is_slideable(m)) {
-                               /*
-                                * We might need to slide this page, and so,
-                                * we want to hold the VM object exclusively.
-                                */
-                               if (object != cur_object) {
-                                       if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
-                                               vm_object_unlock(object);
-                                               vm_object_unlock(cur_object);
-
-                                               cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
-
-                                               vm_map_unlock_read(map);
-                                               if (real_map != map)
-                                                       vm_map_unlock(real_map);
-
-                                               goto RetryFault;
-                                       }
-                               } else if (object_lock_type == OBJECT_LOCK_SHARED) {
-
-                                       vm_object_unlock(object);
-                                       object_lock_type = OBJECT_LOCK_EXCLUSIVE;
-                                       vm_map_unlock_read(map);
-                                       goto RetryFault;
-                               }
-                       }
-                       assert(m_object == VM_PAGE_OBJECT(m));
-
-                       if (VM_FAULT_NEED_CS_VALIDATION(map->pmap, m, m_object) ||
-                           (physpage_p != NULL && (prot & VM_PROT_WRITE))) {
-upgrade_for_validation:
-                               /*
-                                * We might need to validate this page
-                                * against its code signature, so we
-                                * want to hold the VM object exclusively.
-                                */
-                               if (object != cur_object) {
-                                       if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
-                                               vm_object_unlock(object);
-                                               vm_object_unlock(cur_object);
-
-                                               cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
-
-                                               vm_map_unlock_read(map);
-                                               if (real_map != map)
-                                                       vm_map_unlock(real_map);
-
-                                               goto RetryFault;
-                                       }
-
-                               } else if (object_lock_type == OBJECT_LOCK_SHARED) {
-
-                                       object_lock_type = OBJECT_LOCK_EXCLUSIVE;
-
-                                       if (vm_object_lock_upgrade(object) == FALSE) {
-                                               /*
-                                                * couldn't upgrade, so explictly take the lock
-                                                * exclusively and go relookup the page since we
-                                                * will have dropped the object lock and
-                                                * a different thread could have inserted
-                                                * a page at this offset
-                                                * no need for a full retry since we're
-                                                * at the top level of the object chain
-                                                */
-                                               vm_object_lock(object);
-
-                                               continue;
-                                       }
-                               }
-                       }
-                       /*
-                        *      Two cases of map in faults:
-                        *          - At top level w/o copy object.
-                        *          - Read fault anywhere.
-                        *              --> must disallow write.
-                        */
-
-                       if (object == cur_object && object->copy == VM_OBJECT_NULL) {
-
-                               goto FastPmapEnter;
-                       }
+                               goto FastPmapEnter;
+                       }
 
                        if ((fault_type & VM_PROT_WRITE) == 0) {
-
-                               prot &= ~VM_PROT_WRITE;
+                               if (!pmap_has_prot_policy(prot)) {
+                                       prot &= ~VM_PROT_WRITE;
+                               } else {
+                                       /*
+                                        * For a protection that the pmap cares
+                                        * about, we must hand over the full
+                                        * set of protections (so that the pmap
+                                        * layer can apply any desired policy).
+                                        * This means that cs_bypass must be
+                                        * set, as this can force us to pass
+                                        * RWX.
+                                        */
+                                       assert(fault_info.cs_bypass);
+                               }
 
                                if (object != cur_object) {
                                        /*
@@ -3977,7 +3942,7 @@ FastPmapEnter:
                                 * prepare for the pmap_enter...
                                 * object and map are both locked
                                 * m contains valid data
-                                * object == m->object
+                                * object == m->vmp_object
                                 * cur_object == NULL or it's been unlocked
                                 * no paging references on either object or cur_object
                                 */
@@ -3994,10 +3959,8 @@ FastPmapEnter:
                                                            caller_prot,
                                                            wired,
                                                            change_wiring,
-                                                           fault_info.no_cache,
-                                                           fault_info.cs_bypass,
-                                                           fault_info.user_tag,
-                                                           fault_info.pmap_options,
+                                                           wire_tag,
+                                                           &fault_info,
                                                            need_retry_ptr,
                                                            &type_of_fault);
                                } else {
@@ -4008,10 +3971,8 @@ FastPmapEnter:
                                                            caller_prot,
                                                            wired,
                                                            change_wiring,
-                                                           fault_info.no_cache,
-                                                           fault_info.cs_bypass,
-                                                           fault_info.user_tag,
-                                                           fault_info.pmap_options,
+                                                           wire_tag,
+                                                           &fault_info,
                                                            need_retry_ptr,
                                                            &type_of_fault);
                                }
@@ -4021,14 +3982,14 @@ FastPmapEnter:
 
                                if (m_object->internal)
                                        event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL));
-                               else if (m_object->object_slid)
+                               else if (m_object->object_is_shared_cache)
                                        event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE));
                                else
                                        event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
 
-                               KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->offset, get_current_unique_pid(), 0);
+                               KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->vmp_offset, get_current_unique_pid(), 0);
 
-                               DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag);
+                               DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag);
                                }
 #endif
                                if (kr == KERN_SUCCESS &&
@@ -4037,7 +3998,7 @@ FastPmapEnter:
                                        *physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
                                        if (prot & VM_PROT_WRITE) {
                                                vm_object_lock_assert_exclusive(m_object);
-                                               m->dirty = TRUE;
+                                               m->vmp_dirty = TRUE;
                                        }
                                }
 
@@ -4057,7 +4018,7 @@ FastPmapEnter:
 
                                if (need_collapse == TRUE)
                                        vm_object_collapse(object, offset, TRUE);
-                               
+
                                if (need_retry == FALSE &&
                                    (type_of_fault == DBG_PAGEIND_FAULT || type_of_fault == DBG_PAGEINV_FAULT || type_of_fault == DBG_CACHE_HIT_FAULT)) {
                                        /*
@@ -4065,16 +4026,25 @@ FastPmapEnter:
                                         * vm_fault_deactivate_behind depends on the
                                         * state being up to date
                                         */
-                                       vm_fault_is_sequential(object, cur_offset, fault_info.behavior);
+                                       vm_fault_is_sequential(m_object, cur_offset, fault_info.behavior);
 
-                                       vm_fault_deactivate_behind(object, cur_offset, fault_info.behavior);
+                                       vm_fault_deactivate_behind(m_object, cur_offset, fault_info.behavior);
                                }
                                /*
                                 * That's it, clean up and return.
                                 */
-                               if (m->busy)
+                               if (m->vmp_busy)
                                        PAGE_WAKEUP_DONE(m);
 
+                               if (need_retry == FALSE && !m_object->internal && (fault_type & VM_PROT_WRITE)) {
+                                       
+                                       vm_object_paging_begin(m_object);
+
+                                       assert(written_on_object == VM_OBJECT_NULL);
+                                       written_on_object = m_object;
+                                       written_on_pager = m_object->pager;
+                                       written_on_offset = m_object->paging_offset + m->vmp_offset;
+                               }
                                vm_object_unlock(object);
 
                                vm_map_unlock_read(map);
@@ -4093,7 +4063,7 @@ FastPmapEnter:
                                        (void)pmap_enter_options(
                                                pmap, vaddr, 0, 0, 0, 0, 0,
                                                PMAP_OPTIONS_NOENTER, NULL);
-                                       
+
                                        need_retry = FALSE;
                                        goto RetryFault;
                                }
@@ -4118,14 +4088,14 @@ FastPmapEnter:
                                 */
                                break;
                        }
-                       
+
                        /*
                         * This is now a shadow based copy on write
                         * fault -- it requires a copy up the shadow
                         * chain.
                         */
                        assert(m_object == VM_PAGE_OBJECT(m));
-                       
+
                        if ((cur_object_lock_type == OBJECT_LOCK_SHARED) &&
                            VM_FAULT_NEED_CS_VALIDATION(NULL, m, m_object)) {
                                goto upgrade_for_validation;
@@ -4137,7 +4107,7 @@ FastPmapEnter:
                         * need to remember current page, as it's the
                         * source of the copy.
                         *
-                        * at this point we hold locks on both 
+                        * at this point we hold locks on both
                         * object and cur_object... no need to take
                         * paging refs or mark pages BUSY since
                         * we don't drop either object lock until
@@ -4168,10 +4138,10 @@ FastPmapEnter:
                        /*
                         * Now cope with the source page and object
                         */
-                       if (object->ref_count > 1 && cur_m->pmapped)
+                       if (object->ref_count > 1 && cur_m->vmp_pmapped)
                                pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m));
-                       
-                       if (cur_m->clustered) {
+
+                       if (cur_m->vmp_clustered) {
                                VM_PAGE_COUNT_AS_PAGEIN(cur_m);
                                VM_PAGE_CONSUME_CLUSTERED(cur_m);
                                vm_fault_is_sequential(cur_object, cur_offset, fault_info.behavior);
@@ -4311,7 +4281,7 @@ FastPmapEnter:
                                                         * at the top level of the object chain
                                                         */
                                                        vm_object_lock(object);
-                                                       
+
                                                        continue;
                                                }
                                        }
@@ -4361,7 +4331,7 @@ FastPmapEnter:
                                                m = VM_PAGE_NULL;
                                                break;
                                        }
-                                       m->dirty = TRUE;
+                                       m->vmp_dirty = TRUE;
 
                                        /*
                                         * If the object is purgeable, its
@@ -4389,22 +4359,25 @@ FastPmapEnter:
                                                 * no ledger update in that
                                                 * case.
                                                 */
-                                       } else if ((cur_object->purgable ==
-                                                   VM_PURGABLE_DENY) ||
-                                                  (cur_object->vo_purgeable_owner ==
+                                       } else if (((cur_object->purgable ==
+                                                    VM_PURGABLE_DENY) &&
+                                                   (!cur_object->vo_ledger_tag)) ||
+                                                  (cur_object->vo_owner ==
                                                    NULL)) {
                                                /*
                                                 * "cur_object" is not purgeable
-                                                * or is not owned, so no
-                                                * purgeable ledgers to update.
+                                                * and is not ledger-taged, or
+                                                * there's no owner for it,
+                                                * so no owner's ledgers to
+                                                * update.
                                                 */
                                        } else {
                                                /*
                                                 * One less compressed
-                                                * purgeable page for
+                                                * purgeable/tagged page for
                                                 * cur_object's owner.
                                                 */
-                                               vm_purgeable_compressed_update(
+                                               vm_object_owner_compressed_update(
                                                        cur_object,
                                                        -1);
                                        }
@@ -4472,16 +4445,6 @@ FastPmapEnter:
                                        kr = KERN_MEMORY_ERROR;
                                        goto done;
                                }
-                               if (vm_backing_store_low) {
-                                       /*
-                                        * we are protecting the system from
-                                        * backing store exhaustion... 
-                                        * must take the slow path if we're
-                                        * not privileged
-                                        */
-                                       if (!(current_task()->priv_flags & VM_BACKING_STORE_PRIV))
-                                               break;
-                               }
                                if (cur_object != object) {
                                        vm_object_unlock(cur_object);
 
@@ -4519,7 +4482,7 @@ FastPmapEnter:
 
                                /*
                                 * Now zero fill page...
-                                * the page is probably going to 
+                                * the page is probably going to
                                 * be written soon, so don't bother
                                 * to clear the modified bit
                                 *
@@ -4583,8 +4546,8 @@ handle_copy_delay:
                vm_map_unlock(real_map);
 
        if (__improbable(object == compressor_object ||
-                        object == kernel_object ||
-                        object == vm_submap_object)) {
+               object == kernel_object ||
+               object == vm_submap_object)) {
                /*
                 * These objects are explicitly managed and populated by the
                 * kernel.  The virtual ranges backed by these objects should
@@ -4637,7 +4600,7 @@ handle_copy_delay:
         *
         *      the object is returned locked with a paging reference
         *
-        *      if top_page != NULL, then it's BUSY and the 
+        *      if top_page != NULL, then it's BUSY and the
         *      object it belongs to has a paging reference
         *      but is returned unlocked
         */
@@ -4653,7 +4616,7 @@ handle_copy_delay:
                 */
                switch (kr) {
                case VM_FAULT_MEMORY_SHORTAGE:
-                       if (vm_page_wait((change_wiring) ? 
+                       if (vm_page_wait((change_wiring) ?
                                         THREAD_UNINT :
                                         THREAD_ABORTSAFE))
                                goto RetryFault;
@@ -4701,22 +4664,47 @@ handle_copy_delay:
        }                                               \
        MACRO_END
 
+
+       object_locks_dropped = FALSE;
        /*
         * We must verify that the maps have not changed
-        * since our last lookup.
+        * since our last lookup. vm_map_verify() needs the
+        * map lock (shared) but we are holding object locks.
+        * So we do a try_lock() first and, if that fails, we
+        * drop the object locks and go in for the map lock again.
         */
-       if (m != VM_PAGE_NULL) {
-               old_copy_object = m_object->copy;
-               vm_object_unlock(m_object);
-       } else {
-               old_copy_object = VM_OBJECT_NULL;
-               vm_object_unlock(object);
+       if (!vm_map_try_lock_read(original_map)) {
+
+               if (m != VM_PAGE_NULL) {
+                       old_copy_object = m_object->copy;
+                       vm_object_unlock(m_object);
+               } else {
+                       old_copy_object = VM_OBJECT_NULL;
+                       vm_object_unlock(object);
+               }
+
+               object_locks_dropped = TRUE;
+
+               vm_map_lock_read(original_map);
        }
 
-       /*
-        * no object locks are held at this point
-        */
        if ((map != original_map) || !vm_map_verify(map, &version)) {
+
+               if (object_locks_dropped == FALSE) {
+                       if (m != VM_PAGE_NULL) {
+                               old_copy_object = m_object->copy;
+                               vm_object_unlock(m_object);
+                       } else {
+                               old_copy_object = VM_OBJECT_NULL;
+                               vm_object_unlock(object);
+                       }
+               
+                       object_locks_dropped = TRUE;
+               }
+
+               /*
+                * no object locks are held at this point
+                */
                vm_object_t             retry_object;
                vm_object_offset_t      retry_offset;
                vm_prot_t               retry_prot;
@@ -4731,7 +4719,6 @@ handle_copy_delay:
                 * take another fault.
                 */
                map = original_map;
-               vm_map_lock_read(map);
 
                kr = vm_map_lookup_locked(&map, vaddr,
                                          fault_type & ~VM_PROT_WRITE,
@@ -4813,20 +4800,31 @@ handle_copy_delay:
                 * Check whether the protection has changed or the object
                 * has been copied while we left the map unlocked.
                 */
-               prot &= retry_prot;
+               if (pmap_has_prot_policy(retry_prot)) {
+                       /* If the pmap layer cares, pass the full set. */
+                       prot = retry_prot;
+               } else {
+                       prot &= retry_prot;
+               }
        }
-       if (m != VM_PAGE_NULL) {
-               vm_object_lock(m_object);
 
-               if (m_object->copy != old_copy_object) {
-                       /*
-                        * The copy object changed while the top-level object
-                        * was unlocked, so take away write permission.
-                        */
-                       prot &= ~VM_PROT_WRITE;
-               }
-       } else
-               vm_object_lock(object);
+       if (object_locks_dropped == TRUE) {
+               if (m != VM_PAGE_NULL) {
+                       vm_object_lock(m_object);
+
+                       if (m_object->copy != old_copy_object) {
+                               /*
+                                * The copy object changed while the top-level object
+                                * was unlocked, so take away write permission.
+                                */
+                               assert(!pmap_has_prot_policy(prot));
+                               prot &= ~VM_PROT_WRITE;
+                       }
+               } else
+                       vm_object_lock(object);
+
+               object_locks_dropped = FALSE;
+       }
 
        /*
         * If we want to wire down this page, but no longer have
@@ -4834,7 +4832,7 @@ handle_copy_delay:
         */
        if (wired && (fault_type != (prot | VM_PROT_WRITE))) {
 
-               vm_map_verify_done(map, &version);
+               vm_map_unlock_read(map);
                if (real_map != map)
                        vm_map_unlock(real_map);
 
@@ -4867,10 +4865,8 @@ handle_copy_delay:
                                            caller_prot,
                                            wired,
                                            change_wiring,
-                                           fault_info.no_cache,
-                                           fault_info.cs_bypass,
-                                           fault_info.user_tag,
-                                           fault_info.pmap_options,
+                                           wire_tag,
+                                           &fault_info,
                                            NULL,
                                            &type_of_fault);
                } else {
@@ -4881,10 +4877,8 @@ handle_copy_delay:
                                            caller_prot,
                                            wired,
                                            change_wiring,
-                                           fault_info.no_cache,
-                                           fault_info.cs_bypass,
-                                           fault_info.user_tag,
-                                           fault_info.pmap_options,
+                                           wire_tag,
+                                           &fault_info,
                                            NULL,
                                            &type_of_fault);
                }
@@ -4896,19 +4890,19 @@ handle_copy_delay:
 
                if (m_object->internal)
                        event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL));
-               else if (m_object->object_slid)
+               else if (m_object->object_is_shared_cache)
                        event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE));
                else
                        event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
 
-               KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->offset, get_current_unique_pid(), 0);
+               KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->vmp_offset, get_current_unique_pid(), 0);
 
-               DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag);
-               }
+               DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag);
+       }
 #endif
                if (kr != KERN_SUCCESS) {
                        /* abort this page fault */
-                       vm_map_verify_done(map, &version);
+                       vm_map_unlock_read(map);
                        if (real_map != map)
                                vm_map_unlock(real_map);
                        PAGE_WAKEUP_DONE(m);
@@ -4921,7 +4915,7 @@ handle_copy_delay:
                        *physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
                        if (prot & VM_PROT_WRITE) {
                                vm_object_lock_assert_exclusive(m_object);
-                               m->dirty = TRUE;
+                               m->vmp_dirty = TRUE;
                        }
                }
        } else {
@@ -4930,33 +4924,11 @@ handle_copy_delay:
                vm_map_offset_t         laddr;
                vm_map_offset_t         ldelta, hdelta;
 
-               /* 
+               /*
                 * do a pmap block mapping from the physical address
-                * in the object 
+                * in the object
                 */
 
-#ifdef ppc
-               /* While we do not worry about execution protection in   */
-               /* general, certian pages may have instruction execution */
-               /* disallowed.  We will check here, and if not allowed   */
-               /* to execute, we return with a protection failure.      */
-
-               if ((fault_type & VM_PROT_EXECUTE) &&
-                       (!pmap_eligible_for_execute((ppnum_t)(object->vo_shadow_offset >> 12)))) {
-
-                       vm_map_verify_done(map, &version);
-
-                       if (real_map != map)
-                               vm_map_unlock(real_map);
-
-                       vm_fault_cleanup(object, top_page);
-                       vm_object_deallocate(object);
-
-                       kr = KERN_PROTECTION_FAILURE;
-                       goto done;
-               }
-#endif /* ppc */
-
                if (real_map != map)
                        vm_map_unlock(real_map);
 
@@ -4977,8 +4949,8 @@ handle_copy_delay:
                        if (hdelta > (entry->vme_end - laddr))
                                hdelta = entry->vme_end - laddr;
                        if (entry->is_sub_map) {
-                               
-                               laddr = ((laddr - entry->vme_start) 
+
+                               laddr = ((laddr - entry->vme_start)
                                         + VME_OFFSET(entry));
                                vm_map_lock_read(VME_SUBMAP(entry));
 
@@ -4989,13 +4961,13 @@ handle_copy_delay:
                                        real_map = VME_SUBMAP(entry);
                                }
                                map = VME_SUBMAP(entry);
-                               
+
                        } else {
                                break;
                        }
                }
 
-               if (vm_map_lookup_entry(map, laddr, &entry) && 
+               if (vm_map_lookup_entry(map, laddr, &entry) &&
                    (VME_OBJECT(entry) != NULL) &&
                    (VME_OBJECT(entry) == object)) {
                        int superpage;
@@ -5025,37 +4997,63 @@ handle_copy_delay:
                                 * Set up a block mapped area
                                 */
                                assert((uint32_t)((ldelta + hdelta) >> PAGE_SHIFT) == ((ldelta + hdelta) >> PAGE_SHIFT));
-                               pmap_map_block(caller_pmap, 
-                                              (addr64_t)(caller_pmap_addr - ldelta), 
-                                              (ppnum_t)((((vm_map_offset_t) (VME_OBJECT(entry)->vo_shadow_offset)) +
-                                                         VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta) >> PAGE_SHIFT),
-                                              (uint32_t)((ldelta + hdelta) >> PAGE_SHIFT), prot, 
-                                              (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0);
-                       } else { 
+                               kr = pmap_map_block(caller_pmap,
+                                                   (addr64_t)(caller_pmap_addr - ldelta),
+                                                   (ppnum_t)((((vm_map_offset_t) (VME_OBJECT(entry)->vo_shadow_offset)) +
+                                                              VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta) >> PAGE_SHIFT),
+                                                   (uint32_t)((ldelta + hdelta) >> PAGE_SHIFT), prot,
+                                                   (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0);
+
+                               if (kr != KERN_SUCCESS) {
+                                       goto cleanup;
+                               }
+                       } else {
                                /*
                                 * Set up a block mapped area
                                 */
                                assert((uint32_t)((ldelta + hdelta) >> PAGE_SHIFT) == ((ldelta + hdelta) >> PAGE_SHIFT));
-                               pmap_map_block(real_map->pmap, 
-                                              (addr64_t)(vaddr - ldelta), 
-                                              (ppnum_t)((((vm_map_offset_t)(VME_OBJECT(entry)->vo_shadow_offset)) +
-                                                         VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta) >> PAGE_SHIFT),
-                                              (uint32_t)((ldelta + hdelta) >> PAGE_SHIFT), prot, 
-                                              (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0);
+                               kr = pmap_map_block(real_map->pmap,
+                                                   (addr64_t)(vaddr - ldelta),
+                                                   (ppnum_t)((((vm_map_offset_t)(VME_OBJECT(entry)->vo_shadow_offset)) +
+                                                              VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta) >> PAGE_SHIFT),
+                                                   (uint32_t)((ldelta + hdelta) >> PAGE_SHIFT), prot,
+                                                   (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0);
+
+                               if (kr != KERN_SUCCESS) {
+                                       goto cleanup;
+                               }
                        }
                }
        }
 
+       /*
+        * Success
+        */
+       kr = KERN_SUCCESS;
+
+       /*
+        * TODO: could most of the done cases just use cleanup?
+        */
+cleanup:
        /*
         * Unlock everything, and return
         */
-       vm_map_verify_done(map, &version);
+       vm_map_unlock_read(map);
        if (real_map != map)
                vm_map_unlock(real_map);
 
        if (m != VM_PAGE_NULL) {
                assert(VM_PAGE_OBJECT(m) == m_object);
 
+               if (!m_object->internal && (fault_type & VM_PROT_WRITE)) {
+                       
+                       vm_object_paging_begin(m_object);
+
+                       assert(written_on_object == VM_OBJECT_NULL);
+                       written_on_object = m_object;
+                       written_on_pager = m_object->pager;
+                       written_on_offset = m_object->paging_offset + m->vmp_offset;
+               }
                PAGE_WAKEUP_DONE(m);
 
                vm_fault_cleanup(m_object, top_page);
@@ -5066,7 +5064,6 @@ handle_copy_delay:
 
 #undef RELEASE_PAGE
 
-       kr = KERN_SUCCESS;
 done:
        thread_interrupt_level(interruptible_state);
 
@@ -5092,7 +5089,23 @@ done:
                        }
                }
        }
-       KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
+
+       if (written_on_object) {
+
+               vnode_pager_dirtied(written_on_pager, written_on_offset, written_on_offset + PAGE_SIZE_64);
+
+               vm_object_lock(written_on_object);
+               vm_object_paging_end(written_on_object);
+               vm_object_unlock(written_on_object);
+
+               written_on_object = VM_OBJECT_NULL;
+       }
+
+       if (rtfault) {
+               vm_record_rtfault(cthread, fstart, trace_vaddr, type_of_fault);
+       }
+
+       KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
                              (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
                              ((uint64_t)trace_vaddr >> 32),
                              trace_vaddr,
@@ -5113,6 +5126,7 @@ vm_fault_wire(
        vm_map_t        map,
        vm_map_entry_t  entry,
        vm_prot_t       prot,
+       vm_tag_t        wire_tag,
        pmap_t          pmap,
        vm_map_offset_t pmap_addr,
        ppnum_t         *physpage_p)
@@ -5123,8 +5137,8 @@ vm_fault_wire(
 
        assert(entry->in_transition);
 
-       if ((VME_OBJECT(entry) != NULL) && 
-           !entry->is_sub_map && 
+       if ((VME_OBJECT(entry) != NULL) &&
+           !entry->is_sub_map &&
            VME_OBJECT(entry)->phys_contiguous) {
                return KERN_SUCCESS;
        }
@@ -5135,7 +5149,7 @@ vm_fault_wire(
         *      page tables and such can be locked down as well.
         */
 
-       pmap_pageable(pmap, pmap_addr, 
+       pmap_pageable(pmap, pmap_addr,
                pmap_addr + (end_addr - entry->vme_start), FALSE);
 
        /*
@@ -5144,14 +5158,14 @@ vm_fault_wire(
         */
 
        for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) {
-               rc = vm_fault_wire_fast(map, va, prot, entry, pmap, 
+               rc = vm_fault_wire_fast(map, va, prot, wire_tag, entry, pmap,
                                        pmap_addr + (va - entry->vme_start),
                                        physpage_p);
                if (rc != KERN_SUCCESS) {
-                       rc = vm_fault_internal(map, va, prot, TRUE, 
+                       rc = vm_fault_internal(map, va, prot, TRUE, wire_tag,
                                               ((pmap == kernel_pmap)
                                                ? THREAD_UNINT
-                                               : THREAD_ABORTSAFE), 
+                                               : THREAD_ABORTSAFE),
                                               pmap,
                                               (pmap_addr +
                                                (va - entry->vme_start)),
@@ -5164,7 +5178,7 @@ vm_fault_wire(
 
                        /* unwire wired pages */
                        tmp_entry.vme_end = va;
-                       vm_fault_unwire(map, 
+                       vm_fault_unwire(map,
                                &tmp_entry, FALSE, pmap, pmap_addr);
 
                        return rc;
@@ -5189,7 +5203,8 @@ vm_fault_unwire(
        vm_map_offset_t va;
        vm_map_offset_t end_addr = entry->vme_end;
        vm_object_t             object;
-       struct vm_object_fault_info fault_info;
+       struct vm_object_fault_info fault_info = {};
+       unsigned int    unwired_pages;
 
        object = (entry->is_sub_map) ? VM_OBJECT_NULL : VME_OBJECT(entry);
 
@@ -5205,7 +5220,6 @@ vm_fault_unwire(
        fault_info.interruptible = THREAD_UNINT;
        fault_info.behavior = entry->behavior;
        fault_info.user_tag = VME_ALIAS(entry);
-       fault_info.pmap_options = 0;
        if (entry->iokit_acct ||
            (!entry->is_sub_map && !entry->use_pmap)) {
                fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
@@ -5214,10 +5228,8 @@ vm_fault_unwire(
        fault_info.hi_offset = (entry->vme_end - entry->vme_start) + VME_OFFSET(entry);
        fault_info.no_cache = entry->no_cache;
        fault_info.stealth = TRUE;
-       fault_info.io_sync = FALSE;
-       fault_info.cs_bypass = FALSE;
-       fault_info.mark_zf_absent = FALSE;
-       fault_info.batch_pmap_op = FALSE;
+
+       unwired_pages = 0;
 
        /*
         *      Since the pages are wired down, we must be able to
@@ -5228,11 +5240,11 @@ vm_fault_unwire(
 
                if (object == VM_OBJECT_NULL) {
                        if (pmap) {
-                               pmap_change_wiring(pmap, 
+                               pmap_change_wiring(pmap,
                                                   pmap_addr + (va - entry->vme_start), FALSE);
                        }
-                       (void) vm_fault(map, va, VM_PROT_NONE, 
-                                       TRUE, THREAD_UNINT, pmap, pmap_addr);
+                       (void) vm_fault(map, va, VM_PROT_NONE,
+                                       TRUE, VM_KERN_MEMORY_NONE, THREAD_UNINT, pmap, pmap_addr);
                } else {
                        vm_prot_t       prot;
                        vm_page_t       result_page;
@@ -5240,13 +5252,12 @@ vm_fault_unwire(
                        vm_object_t     result_object;
                        vm_fault_return_t result;
 
-                       if (end_addr - va > (vm_size_t) -1) {
-                               /* 32-bit overflow */
-                               fault_info.cluster_size = (vm_size_t) (0 - PAGE_SIZE);
-                       } else {
-                               fault_info.cluster_size = (vm_size_t) (end_addr - va);
-                               assert(fault_info.cluster_size == end_addr - va);
+                       /* cap cluster size at maximum UPL size */
+                       upl_size_t cluster_size;
+                       if (os_sub_overflow(end_addr, va, &cluster_size)) {
+                               cluster_size = 0 - (upl_size_t)PAGE_SIZE;
                        }
+                       fault_info.cluster_size = cluster_size;
 
                        do {
                                prot = VM_PROT_NONE;
@@ -5265,7 +5276,7 @@ vm_fault_unwire(
                                        FALSE, /* page not looked up */
                                        &prot, &result_page, &top_page,
                                        (int *)0,
-                                       NULL, map->no_zero_fill, 
+                                       NULL, map->no_zero_fill,
                                        FALSE, &fault_info);
                        } while (result == VM_FAULT_RETRY);
 
@@ -5275,7 +5286,7 @@ vm_fault_unwire(
                         * move on to the next one in case the remaining pages are mapped from
                         * different objects.  During a forced unmount, the object is terminated
                         * so the alive flag will be false if this happens.  A forced unmount will
-                        * will occur when an external disk is unplugged before the user does an 
+                        * will occur when an external disk is unplugged before the user does an
                         * eject, so we don't want to panic in that situation.
                         */
 
@@ -5303,10 +5314,13 @@ vm_fault_unwire(
                                assert(VM_PAGE_GET_PHYS_PAGE(result_page) !=
                                       vm_page_fictitious_addr);
                                pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(result_page));
+                               if (VM_PAGE_WIRED(result_page)) {
+                                       unwired_pages++;
+                               }
                                VM_PAGE_FREE(result_page);
                        } else {
                                if ((pmap) && (VM_PAGE_GET_PHYS_PAGE(result_page) != vm_page_guard_addr))
-                                       pmap_change_wiring(pmap, 
+                                       pmap_change_wiring(pmap,
                                            pmap_addr + (va - entry->vme_start), FALSE);
 
 
@@ -5314,6 +5328,7 @@ vm_fault_unwire(
                                        vm_page_lockspin_queues();
                                        vm_page_unwire(result_page, TRUE);
                                        vm_page_unlock_queues();
+                                       unwired_pages++;
                                }
                                if(entry->zero_wired_pages) {
                                        pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(result_page));
@@ -5332,9 +5347,12 @@ vm_fault_unwire(
         *      such may be unwired themselves.
         */
 
-       pmap_pageable(pmap, pmap_addr, 
+       pmap_pageable(pmap, pmap_addr,
                pmap_addr + (end_addr - entry->vme_start), TRUE);
 
+       if (kernel_object == object) {
+           vm_tag_update_size(fault_info.user_tag, -ptoa_64(unwired_pages));
+       }
 }
 
 /*
@@ -5361,7 +5379,8 @@ static kern_return_t
 vm_fault_wire_fast(
        __unused vm_map_t       map,
        vm_map_offset_t va,
-       vm_prot_t       caller_prot,
+       __unused vm_prot_t       caller_prot,
+       vm_tag_t        wire_tag,
        vm_map_entry_t  entry,
        pmap_t          pmap,
        vm_map_offset_t pmap_addr,
@@ -5374,6 +5393,7 @@ vm_fault_wire_fast(
        thread_t                thread = current_thread();
        int                     type_of_fault;
        kern_return_t           kr;
+       struct vm_object_fault_info fault_info = {};
 
        VM_STAT_INCR(faults);
 
@@ -5456,18 +5476,14 @@ vm_fault_wire_fast(
        /*
         *      Look for page in top-level object.  If it's not there or
         *      there's something going on, give up.
-        * ENCRYPTED SWAP: use the slow fault path, since we'll need to
-        * decrypt the page before wiring it down.
         */
        m = vm_page_lookup(object, offset);
-       if ((m == VM_PAGE_NULL) || (m->busy) || (m->encrypted) ||
-           (m->unusual && ( m->error || m->restart || m->absent))) {
+       if ((m == VM_PAGE_NULL) || (m->vmp_busy) ||
+           (m->vmp_unusual && ( m->vmp_error || m->vmp_restart || m->vmp_absent))) {
 
                GIVE_UP;
        }
-       ASSERT_PAGE_DECRYPTED(m);
-
-       if (m->fictitious &&
+       if (m->vmp_fictitious &&
            VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
                /*
                 * Guard pages are fictitious pages and are never
@@ -5479,19 +5495,19 @@ vm_fault_wire_fast(
 
        /*
         *      Wire the page down now.  All bail outs beyond this
-        *      point must unwire the page.  
+        *      point must unwire the page.
         */
 
        vm_page_lockspin_queues();
-       vm_page_wire(m, VM_PROT_MEMORY_TAG(caller_prot), TRUE);
+       vm_page_wire(m, wire_tag, TRUE);
        vm_page_unlock_queues();
 
        /*
         *      Mark page busy for other threads.
         */
-       assert(!m->busy);
-       m->busy = TRUE;
-       assert(!m->absent);
+       assert(!m->vmp_busy);
+       m->vmp_busy = TRUE;
+       assert(!m->vmp_absent);
 
        /*
         *      Give up if the page is being written and there's a copy object
@@ -5501,6 +5517,13 @@ vm_fault_wire_fast(
                GIVE_UP;
        }
 
+       fault_info.user_tag = VME_ALIAS(entry);
+       fault_info.pmap_options = 0;
+       if (entry->iokit_acct ||
+           (!entry->is_sub_map && !entry->use_pmap)) {
+               fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
+       }
+
        /*
         *      Put this page into the physical map.
         */
@@ -5510,15 +5533,10 @@ vm_fault_wire_fast(
                            pmap_addr,
                            prot,
                            prot,
-                           TRUE,
-                           FALSE,
-                           FALSE,
-                           FALSE,
-                           VME_ALIAS(entry),
-                           ((entry->iokit_acct ||
-                             (!entry->is_sub_map && !entry->use_pmap))
-                            ? PMAP_OPTIONS_ALT_ACCT
-                            : 0),
+                           TRUE,  /* wired */
+                           FALSE, /* change_wiring */
+                           wire_tag,
+                           &fault_info,
                            NULL,
                            &type_of_fault);
        if (kr != KERN_SUCCESS) {
@@ -5538,7 +5556,7 @@ done:
                        *physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
                        if (prot & VM_PROT_WRITE) {
                                vm_object_lock_assert_exclusive(object);
-                               m->dirty = TRUE;
+                               m->vmp_dirty = TRUE;
                        }
                } else {
                        *physpage_p = 0;
@@ -5589,7 +5607,7 @@ vm_fault_copy_dst_cleanup(
                vm_page_lockspin_queues();
                vm_page_unwire(page, TRUE);
                vm_page_unlock_queues();
-               vm_object_paging_end(object);   
+               vm_object_paging_end(object);
                vm_object_unlock(object);
        }
 }
@@ -5633,7 +5651,7 @@ vm_fault_copy(
        int                     interruptible)
 {
        vm_page_t               result_page;
-       
+
        vm_page_t               src_page;
        vm_page_t               src_top_page;
        vm_prot_t               src_prot;
@@ -5649,8 +5667,8 @@ vm_fault_copy(
        vm_fault_return_t       result;
 
        vm_map_size_t           part_size;
-       struct vm_object_fault_info fault_info_src;
-       struct vm_object_fault_info fault_info_dst;
+       struct vm_object_fault_info fault_info_src = {};
+       struct vm_object_fault_info fault_info_dst = {};
 
        /*
         * In order not to confuse the clustered pageins, align
@@ -5667,29 +5685,15 @@ vm_fault_copy(
 
        fault_info_src.interruptible = interruptible;
        fault_info_src.behavior = VM_BEHAVIOR_SEQUENTIAL;
-       fault_info_src.user_tag  = 0;
-       fault_info_src.pmap_options = 0;
        fault_info_src.lo_offset = vm_object_trunc_page(src_offset);
        fault_info_src.hi_offset = fault_info_src.lo_offset + amount_left;
-       fault_info_src.no_cache   = FALSE;
        fault_info_src.stealth = TRUE;
-       fault_info_src.io_sync = FALSE;
-       fault_info_src.cs_bypass = FALSE;
-       fault_info_src.mark_zf_absent = FALSE;
-       fault_info_src.batch_pmap_op = FALSE;
 
        fault_info_dst.interruptible = interruptible;
        fault_info_dst.behavior = VM_BEHAVIOR_SEQUENTIAL;
-       fault_info_dst.user_tag  = 0;
-       fault_info_dst.pmap_options = 0;
        fault_info_dst.lo_offset = vm_object_trunc_page(dst_offset);
        fault_info_dst.hi_offset = fault_info_dst.lo_offset + amount_left;
-       fault_info_dst.no_cache   = FALSE;
        fault_info_dst.stealth = TRUE;
-       fault_info_dst.io_sync = FALSE;
-       fault_info_dst.cs_bypass = FALSE;
-       fault_info_dst.mark_zf_absent = FALSE;
-       fault_info_dst.batch_pmap_op = FALSE;
 
        do { /* while (amount_left > 0) */
                /*
@@ -5706,13 +5710,12 @@ vm_fault_copy(
                vm_object_lock(dst_object);
                vm_object_paging_begin(dst_object);
 
-               if (amount_left > (vm_size_t) -1) {
-                       /* 32-bit overflow */
-                       fault_info_dst.cluster_size = (vm_size_t) (0 - PAGE_SIZE);
-               } else {
-                       fault_info_dst.cluster_size = (vm_size_t) amount_left;
-                       assert(fault_info_dst.cluster_size == amount_left);
+               /* cap cluster size at maximum UPL size */
+               upl_size_t cluster_size;
+               if (os_convert_overflow(amount_left, &cluster_size)) {
+                       cluster_size = 0 - (upl_size_t)PAGE_SIZE;
                }
+               fault_info_dst.cluster_size = cluster_size;
 
                XPR(XPR_VM_FAULT,"vm_fault_copy -> vm_fault_page\n",0,0,0,0,0);
                dst_page = VM_PAGE_NULL;
@@ -5763,7 +5766,7 @@ vm_fault_copy(
                 * same, the call to vm_fault_page() for the
                 * destination page will deadlock.  To prevent this we
                 * wire the page so we can drop busy without having
-                * the page daemon steal the page.  We clean up the 
+                * the page daemon steal the page.  We clean up the
                 * top page  but keep the paging reference on the object
                 * holding the dest page so it doesn't go away.
                 */
@@ -5801,24 +5804,22 @@ vm_fault_copy(
                                src_prot = VM_PROT_READ;
                                vm_object_paging_begin(src_object);
 
-                               if (amount_left > (vm_size_t) -1) {
-                                       /* 32-bit overflow */
-                                       fault_info_src.cluster_size = (vm_size_t) (0 - PAGE_SIZE);
-                               } else {
-                                       fault_info_src.cluster_size = (vm_size_t) amount_left;
-                                       assert(fault_info_src.cluster_size == amount_left);
+                               /* cap cluster size at maximum UPL size */
+                               if (os_convert_overflow(amount_left, &cluster_size)) {
+                                       cluster_size = 0 - (upl_size_t)PAGE_SIZE;
                                }
+                               fault_info_src.cluster_size = cluster_size;
 
                                XPR(XPR_VM_FAULT,
                                        "vm_fault_copy(2) -> vm_fault_page\n",
                                        0,0,0,0,0);
                                result_page = VM_PAGE_NULL;
                                result = vm_fault_page(
-                                       src_object, 
+                                       src_object,
                                        vm_object_trunc_page(src_offset),
                                        VM_PROT_READ, FALSE,
                                        FALSE, /* page not looked up */
-                                       &src_prot, 
+                                       &src_prot,
                                        &result_page, &src_top_page,
                                        (int *)0, &error, FALSE,
                                        FALSE, &fault_info_src);
@@ -5860,7 +5861,10 @@ vm_fault_copy(
                        vm_object_unlock(result_page_object);
                }
 
+               vm_map_lock_read(dst_map);
+
                if (!vm_map_verify(dst_map, dst_version)) {
+                       vm_map_unlock_read(dst_map);
                        if (result_page != VM_PAGE_NULL && src_page != dst_page)
                                vm_fault_copy_cleanup(result_page, src_top_page);
                        vm_fault_copy_dst_cleanup(dst_page);
@@ -5872,7 +5876,7 @@ vm_fault_copy(
 
                if (dst_object->copy != old_copy_object) {
                        vm_object_unlock(dst_object);
-                       vm_map_verify_done(dst_map, dst_version);
+                       vm_map_unlock_read(dst_map);
                        if (result_page != VM_PAGE_NULL && src_page != dst_page)
                                vm_fault_copy_cleanup(result_page, src_top_page);
                        vm_fault_copy_dst_cleanup(dst_page);
@@ -5919,7 +5923,7 @@ vm_fault_copy(
                                                  dst_page,
                                                  (vm_offset_t) dst_po,
                                                  (vm_size_t)part_size);
-                               if(!dst_page->dirty){
+                               if(!dst_page->vmp_dirty){
                                        vm_object_lock(dst_object);
                                        SET_PAGE_DIRTY(dst_page, TRUE);
                                        vm_object_unlock(dst_object);
@@ -5936,7 +5940,7 @@ vm_fault_copy(
                                vm_page_copy(result_page, dst_page);
                                vm_object_unlock(result_page_object);
 
-                               if(!dst_page->dirty){
+                               if(!dst_page->vmp_dirty){
                                        vm_object_lock(dst_object);
                                        SET_PAGE_DIRTY(dst_page, TRUE);
                                        vm_object_unlock(dst_object);
@@ -5949,7 +5953,7 @@ vm_fault_copy(
                 *      Unlock everything, and return
                 */
 
-               vm_map_verify_done(dst_map, dst_version);
+               vm_map_unlock_read(dst_map);
 
                if (result_page != VM_PAGE_NULL && src_page != dst_page)
                        vm_fault_copy_cleanup(result_page, src_top_page);
@@ -5963,7 +5967,7 @@ vm_fault_copy(
        RETURN(KERN_SUCCESS);
 #undef RETURN
 
-       /*NOTREACHED*/  
+       /*NOTREACHED*/
 }
 
 #if    VM_FAULT_CLASSIFY
@@ -5996,15 +6000,15 @@ vm_fault_classify(vm_object_t           object,
 
        while (TRUE) {
                m = vm_page_lookup(object, offset);
-               if (m != VM_PAGE_NULL) {                
-                       if (m->busy || m->error || m->restart || m->absent) {
+               if (m != VM_PAGE_NULL) {
+                       if (m->vmp_busy || m->vmp_error || m->vmp_restart || m->vmp_absent) {
                                type = VM_FAULT_TYPE_OTHER;
                                break;
                        }
                        if (((fault_type & VM_PROT_WRITE) == 0) ||
                            ((level == 0) && object->copy == VM_OBJECT_NULL)) {
                                type = VM_FAULT_TYPE_MAP_IN;
-                               break;  
+                               break;
                        }
                        type = VM_FAULT_TYPE_COPY;
                        break;
@@ -6111,33 +6115,23 @@ kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr)
                                return 0;
                        }
 
-                       if (m->laundry || m->busy || m->free_when_done || m->absent || m->error || m->cleaning ||
-                               m->overwriting || m->restart || m->unusual) {
+                       if (m->vmp_laundry || m->vmp_busy || m->vmp_free_when_done || m->vmp_absent || m->vmp_error || m->vmp_cleaning ||
+                               m->vmp_overwriting || m->vmp_restart || m->vmp_unusual) {
                                return 0;
                        }
 
-                       assert(!m->private);
-                       if (m->private) {
+                       assert(!m->vmp_private);
+                       if (m->vmp_private) {
                                return 0;
                        }
 
-                       assert(!m->fictitious);
-                       if (m->fictitious) {
+                       assert(!m->vmp_fictitious);
+                       if (m->vmp_fictitious) {
                                return 0;
                        }
 
-                       assert(!m->encrypted);
-                       if (m->encrypted) {
-                               return 0;
-                       }
-
-                       assert(!m->encrypted_cleaning);
-                       if (m->encrypted_cleaning) {
-                               return 0;
-                       }
-
-                       assert(m->vm_page_q_state != VM_PAGE_USED_BY_COMPRESSOR);
-                       if (m->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
+                       assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
+                       if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
                                return 0;
                        }
 
@@ -6169,23 +6163,26 @@ kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr)
 
 }
 
-void
-vm_page_validate_cs_mapped(
-       vm_page_t       page,
-       const void      *kaddr)
+/*
+ * vm_page_validate_cs_fast():
+ * Performs a few quick checks to determine if the page's code signature
+ * really needs to be fully validated.  It could:
+ *     1. have been modified (i.e. automatically tainted),
+ *     2. have already been validated,
+ *     3. have already been found to be tainted,
+ *     4. no longer have a backing store.
+ * Returns FALSE if the page needs to be fully validated.
+ */
+static boolean_t
+vm_page_validate_cs_fast(
+       vm_page_t       page)
 {
-       vm_object_t             object;
-       vm_object_offset_t      offset;
-       memory_object_t         pager;
-       struct vnode            *vnode;
-       boolean_t               validated;
-       unsigned                tainted;
+       vm_object_t     object;
 
-       assert(page->busy);
        object = VM_PAGE_OBJECT(page);
-       vm_object_lock_assert_exclusive(object);
+       vm_object_lock_assert_held(object);
 
-       if (page->wpmapped && !page->cs_tainted) {
+       if (page->vmp_wpmapped && !page->vmp_cs_tainted) {
                /*
                 * This page was mapped for "write" access sometime in the
                 * past and could still be modifiable in the future.
@@ -6193,38 +6190,76 @@ vm_page_validate_cs_mapped(
                 * [ If the page was already found to be "tainted", no
                 * need to re-validate. ]
                 */
-               page->cs_validated = TRUE;
-               page->cs_tainted = TRUE;
+               vm_object_lock_assert_exclusive(object);
+               page->vmp_cs_validated = TRUE;
+               page->vmp_cs_tainted = TRUE;
                if (cs_debug) {
-                       printf("CODESIGNING: vm_page_validate_cs: "
+                       printf("CODESIGNING: %s: "
                               "page %p obj %p off 0x%llx "
                               "was modified\n",
-                              page, object, page->offset);
+                              __FUNCTION__,
+                              page, object, page->vmp_offset);
                }
                vm_cs_validated_dirtied++;
        }
 
-       if (page->cs_validated || page->cs_tainted) {
-               return;
+       if (page->vmp_cs_validated || page->vmp_cs_tainted) {
+               return TRUE;
        }
+       vm_object_lock_assert_exclusive(object);
 
-       vm_cs_validates++;
+#if CHECK_CS_VALIDATION_BITMAP
+       kern_return_t kr;
 
-       assert(object->code_signed);
-       offset = page->offset;
+       kr = vnode_pager_cs_check_validation_bitmap(
+               object->pager,
+               page->vmp_offset + object->paging_offset,
+               CS_BITMAP_CHECK);
+       if (kr == KERN_SUCCESS) {
+               page->vmp_cs_validated = TRUE;
+               page->vmp_cs_tainted = FALSE;
+               vm_cs_bitmap_validated++;
+               return TRUE;
+       }
+#endif /* CHECK_CS_VALIDATION_BITMAP */
 
        if (!object->alive || object->terminating || object->pager == NULL) {
                /*
                 * The object is terminating and we don't have its pager
                 * so we can't validate the data...
                 */
-               return;
+               return TRUE;
        }
+
+       /* we need to really validate this page */
+       vm_object_lock_assert_exclusive(object);
+       return FALSE;
+}
+
+void
+vm_page_validate_cs_mapped_slow(
+       vm_page_t       page,
+       const void      *kaddr)
+{
+       vm_object_t             object;
+       memory_object_offset_t  mo_offset;
+       memory_object_t         pager;
+       struct vnode            *vnode;
+       boolean_t               validated;
+       unsigned                tainted;
+
+       assert(page->vmp_busy);
+       object = VM_PAGE_OBJECT(page);
+       vm_object_lock_assert_exclusive(object);
+
+       vm_cs_validates++;
+
        /*
         * Since we get here to validate a page that was brought in by
         * the pager, we know that this pager is all setup and ready
         * by now.
         */
+       assert(object->code_signed);
        assert(!object->internal);
        assert(object->pager != NULL);
        assert(object->pager_ready);
@@ -6232,26 +6267,43 @@ vm_page_validate_cs_mapped(
        pager = object->pager;
        assert(object->paging_in_progress);
        vnode = vnode_pager_lookup_vnode(pager);
+       mo_offset = page->vmp_offset + object->paging_offset;
 
        /* verify the SHA1 hash for this page */
        tainted = 0;
        validated = cs_validate_range(vnode,
                                      pager,
-                                     (object->paging_offset +
-                                      offset),
+                                     mo_offset,
                                      (const void *)((const char *)kaddr),
                                      PAGE_SIZE_64,
                                      &tainted);
 
        if (tainted & CS_VALIDATE_TAINTED) {
-               page->cs_tainted = TRUE;
+               page->vmp_cs_tainted = TRUE;
        }
        if (tainted & CS_VALIDATE_NX) {
-               page->cs_nx = TRUE;
+               page->vmp_cs_nx = TRUE;
        }
-
        if (validated) {
-               page->cs_validated = TRUE;
+               page->vmp_cs_validated = TRUE;
+       }
+
+#if CHECK_CS_VALIDATION_BITMAP
+       if (page->vmp_cs_validated && !page->vmp_cs_tainted) {
+               vnode_pager_cs_check_validation_bitmap(object->pager,
+                                                      mo_offset,
+                                                      CS_BITMAP_SET);
+       }
+#endif /* CHECK_CS_VALIDATION_BITMAP */
+}
+
+void
+vm_page_validate_cs_mapped(
+       vm_page_t       page,
+       const void      *kaddr)
+{
+       if (!vm_page_validate_cs_fast(page)) {
+               vm_page_validate_cs_mapped_slow(page, kaddr);
        }
 }
 
@@ -6271,55 +6323,20 @@ vm_page_validate_cs(
        object = VM_PAGE_OBJECT(page);
        vm_object_lock_assert_held(object);
 
-       if (page->wpmapped && !page->cs_tainted) {
-               vm_object_lock_assert_exclusive(object);
-
-               /*
-                * This page was mapped for "write" access sometime in the
-                * past and could still be modifiable in the future.
-                * Consider it tainted.
-                * [ If the page was already found to be "tainted", no
-                * need to re-validate. ]
-                */
-               page->cs_validated = TRUE;
-               page->cs_tainted = TRUE;
-               if (cs_debug) {
-                       printf("CODESIGNING: vm_page_validate_cs: "
-                              "page %p obj %p off 0x%llx "
-                              "was modified\n",
-                              page, object, page->offset);
-               }
-               vm_cs_validated_dirtied++;
-       }
-
-       if (page->cs_validated || page->cs_tainted) {
-               return;
-       }
-
-       if (page->slid) {
-               panic("vm_page_validate_cs(%p): page is slid\n", page);
-       }
-       assert(!page->slid);
-
-#if CHECK_CS_VALIDATION_BITMAP 
-       if ( vnode_pager_cs_check_validation_bitmap( object->pager, trunc_page(page->offset + object->paging_offset), CS_BITMAP_CHECK ) == KERN_SUCCESS) {
-               page->cs_validated = TRUE;
-               page->cs_tainted = FALSE;
-               vm_cs_bitmap_validated++;
+       if (vm_page_validate_cs_fast(page)) {
                return;
        }
-#endif
        vm_object_lock_assert_exclusive(object);
 
        assert(object->code_signed);
-       offset = page->offset;
+       offset = page->vmp_offset;
 
-       busy_page = page->busy;
+       busy_page = page->vmp_busy;
        if (!busy_page) {
                /* keep page busy while we map (and unlock) the VM object */
-               page->busy = TRUE;
+               page->vmp_busy = TRUE;
        }
-       
+
        /*
         * Take a paging reference on the VM object
         * to protect it from collapse or bypass,
@@ -6340,19 +6357,14 @@ vm_page_validate_cs(
                                  &koffset,
                                  &need_unmap);
        if (kr != KERN_SUCCESS) {
-               panic("vm_page_validate_cs: could not map page: 0x%x\n", kr);
+               panic("%s: could not map page: 0x%x\n", __FUNCTION__, kr);
        }
        kaddr = CAST_DOWN(vm_offset_t, koffset);
 
        /* validate the mapped page */
-       vm_page_validate_cs_mapped(page, (const void *) kaddr);
+       vm_page_validate_cs_mapped_slow(page, (const void *) kaddr);
 
-#if CHECK_CS_VALIDATION_BITMAP 
-       if ( page->cs_validated == TRUE && page->cs_tainted == FALSE ) {
-               vnode_pager_cs_check_validation_bitmap( object->pager, trunc_page( offset + object->paging_offset), CS_BITMAP_SET );
-       }
-#endif
-       assert(page->busy);
+       assert(page->vmp_busy);
        assert(object == VM_PAGE_OBJECT(page));
        vm_object_lock_assert_exclusive(object);
 
@@ -6388,12 +6400,12 @@ vm_page_validate_cs_mapped_chunk(
        *validated_p = FALSE;
        *tainted_p = 0;
 
-       assert(page->busy);
+       assert(page->vmp_busy);
        object = VM_PAGE_OBJECT(page);
        vm_object_lock_assert_exclusive(object);
 
        assert(object->code_signed);
-       offset = page->offset;
+       offset = page->vmp_offset;
 
        if (!object->alive || object->terminating || object->pager == NULL) {
                /*
@@ -6436,3 +6448,98 @@ vm_page_validate_cs_mapped_chunk(
                *tainted_p = tainted;
        }
 }
+
+static void vm_rtfrecord_lock(void) {
+       lck_spin_lock(&vm_rtfr_slock);
+}
+
+static void vm_rtfrecord_unlock(void) {
+       lck_spin_unlock(&vm_rtfr_slock);
+}
+
+unsigned int vmrtfaultinfo_bufsz(void) {
+       return (vmrtf_num_records * sizeof(vm_rtfault_record_t));
+}
+
+#include <kern/backtrace.h>
+
+static void vm_record_rtfault(thread_t cthread, uint64_t fstart, vm_map_offset_t fault_vaddr, int type_of_fault) {
+       uint64_t fend = mach_continuous_time();
+
+       uint64_t cfpc = 0;
+       uint64_t ctid = cthread->thread_id;
+       uint64_t cupid = get_current_unique_pid();
+
+       uintptr_t bpc = 0;
+       uint32_t bfrs = 0;
+       bool u64 = false;
+
+       /* Capture a single-frame backtrace; this extracts just the program
+        * counter at the point of the fault into "bpc", and should perform no
+        * further user stack traversals, thus avoiding copyin()s and further
+        * faults.
+        */
+       int btr = backtrace_thread_user(cthread, &bpc, 1U, &bfrs, &u64);
+
+       if ((btr == 0) && (bfrs > 0)) {
+               cfpc = bpc;
+       }
+
+       assert((fstart != 0) && fend >= fstart);
+       vm_rtfrecord_lock();
+       assert(vmrtfrs.vmrtfr_curi <= vmrtfrs.vmrtfr_maxi);
+
+       vmrtfrs.vmrtf_total++;
+       vm_rtfault_record_t *cvmr = &vmrtfrs.vm_rtf_records[vmrtfrs.vmrtfr_curi++];
+
+       cvmr->rtfabstime = fstart;
+       cvmr->rtfduration = fend - fstart;
+       cvmr->rtfaddr = fault_vaddr;
+       cvmr->rtfpc = cfpc;
+       cvmr->rtftype = type_of_fault;
+       cvmr->rtfupid = cupid;
+       cvmr->rtftid = ctid;
+
+       if (vmrtfrs.vmrtfr_curi > vmrtfrs.vmrtfr_maxi) {
+               vmrtfrs.vmrtfr_curi = 0;
+       }
+
+       vm_rtfrecord_unlock();
+}
+
+int vmrtf_extract(uint64_t cupid, __unused boolean_t isroot, int vrecordsz, void *vrecords, int *vmrtfrv) {
+       vm_rtfault_record_t *cvmrd = vrecords;
+       size_t residue = vrecordsz;
+       int numextracted = 0;
+       boolean_t early_exit = FALSE;
+
+       vm_rtfrecord_lock();
+
+       for (int vmfi = 0; vmfi <= vmrtfrs.vmrtfr_maxi; vmfi++) {
+
+               if (residue < sizeof(vm_rtfault_record_t)) {
+                       early_exit = TRUE;
+                       break;
+               }
+
+               if (vmrtfrs.vm_rtf_records[vmfi].rtfupid != cupid) {
+#if    DEVELOPMENT || DEBUG
+                       if (isroot == FALSE) {
+                               continue;
+                       }
+#else
+                       continue;
+#endif /* DEVDEBUG */
+               }
+
+               *cvmrd = vmrtfrs.vm_rtf_records[vmfi];
+               cvmrd++;
+               residue -= sizeof(vm_rtfault_record_t);
+               numextracted++;
+       }
+
+       vm_rtfrecord_unlock();
+
+       *vmrtfrv = numextracted;
+       return (early_exit);
+}