]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/x86_64/pmap.c
xnu-4903.221.2.tar.gz
[apple/xnu.git] / osfmk / x86_64 / pmap.c
index 32d345f9c2ba4863c86181a0a54b46afe54ac252..8be1ce0deafbf2d15c95094038431cf9665272ac 100644 (file)
 #include <kern/ledger.h>
 #include <kern/mach_param.h>
 
-#include <kern/lock.h>
 #include <kern/kalloc.h>
 #include <kern/spl.h>
 
 #include <i386/tsc.h>
 #include <i386/pmap_internal.h>
 #include <i386/pmap_pcid.h>
+#if CONFIG_VMX
+#include <i386/vmx/vmx_cpu.h>
+#endif
 
 #include <vm/vm_protos.h>
+#include <san/kasan.h>
 
 #include <i386/mp.h>
 #include <i386/mp_desc.h>
 
 #include <pexpert/i386/efi.h>
 
+#if MACH_ASSERT
+int pmap_stats_assert = 1;
+#endif /* MACH_ASSERT */
 
 #ifdef IWANTTODEBUG
 #undef DEBUG
@@ -164,9 +170,15 @@ boolean_t pmap_trace = FALSE;
 
 boolean_t      no_shared_cr3 = DEBUG;          /* TRUE for DEBUG by default */
 
-int nx_enabled = 1;                    /* enable no-execute protection */
+int nx_enabled = 1;                    /* enable no-execute protection -- set during boot */
+
+#if DEBUG || DEVELOPMENT
 int allow_data_exec  = VM_ABI_32;      /* 32-bit apps may execute data by default, 64-bit apps may not */
 int allow_stack_exec = 0;              /* No apps may execute from the stack by default */
+#else /* DEBUG || DEVELOPMENT */
+const int allow_data_exec  = VM_ABI_32;        /* 32-bit apps may execute data by default, 64-bit apps may not */
+const int allow_stack_exec = 0;                /* No apps may execute from the stack by default */
+#endif /* DEBUG || DEVELOPMENT */
 
 const boolean_t cpu_64bit  = TRUE; /* Mais oui! */
 
@@ -174,7 +186,7 @@ uint64_t max_preemption_latency_tsc = 0;
 
 pv_hashed_entry_t     *pv_hash_table;  /* hash lists */
 
-uint32_t npvhash = 0;
+uint32_t npvhashmask = 0, npvhashbuckets = 0;
 
 pv_hashed_entry_t      pv_hashed_free_list = PV_HASHED_ENTRY_NULL;
 pv_hashed_entry_t      pv_hashed_kern_free_list = PV_HASHED_ENTRY_NULL;
@@ -182,6 +194,8 @@ decl_simple_lock_data(,pv_hashed_free_list_lock)
 decl_simple_lock_data(,pv_hashed_kern_free_list_lock)
 decl_simple_lock_data(,pv_hash_table_lock)
 
+decl_simple_lock_data(,phys_backup_lock)
+
 zone_t         pv_hashed_list_zone;    /* zone of pv_hashed_entry structures */
 
 /*
@@ -191,9 +205,9 @@ zone_t              pv_hashed_list_zone;    /* zone of pv_hashed_entry structures */
  */
 boolean_t      pmap_initialized = FALSE;/* Has pmap_init completed? */
 
-static struct vm_object kptobj_object_store;
-static struct vm_object kpml4obj_object_store;
-static struct vm_object kpdptobj_object_store;
+static struct vm_object kptobj_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
+static struct vm_object kpml4obj_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
+static struct vm_object kpdptobj_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
 
 /*
  *     Array of physical page attribites for managed pages.
@@ -225,6 +239,7 @@ pmap_t              kernel_pmap;
 struct zone    *pmap_zone;             /* zone of pmap structures */
 
 struct zone    *pmap_anchor_zone;
+struct zone    *pmap_uanchor_zone;
 int            pmap_debug = 0;         /* flag for debugging prints */
 
 unsigned int   inuse_ptepages_count = 0;
@@ -245,12 +260,15 @@ pt_entry_t     *DMAP1, *DMAP2;
 caddr_t         DADDR1;
 caddr_t         DADDR2;
 
-const boolean_t        pmap_disable_kheap_nx = FALSE;
-const boolean_t        pmap_disable_kstack_nx = FALSE;
-extern boolean_t doconstro_override;
+boolean_t      pmap_disable_kheap_nx = FALSE;
+boolean_t      pmap_disable_kstack_nx = FALSE;
 
 extern long __stack_chk_guard[];
 
+static uint64_t pmap_eptp_flags = 0;
+boolean_t pmap_ept_support_ad = FALSE;
+
+
 /*
  *     Map memory at initialization.  The physical addresses being
  *     mapped are not managed and are never unmapped.
@@ -266,12 +284,21 @@ pmap_map(
        vm_prot_t       prot,
        unsigned int    flags)
 {
+       kern_return_t   kr;
        int             ps;
 
        ps = PAGE_SIZE;
        while (start_addr < end_addr) {
-               pmap_enter(kernel_pmap, (vm_map_offset_t)virt,
-                          (ppnum_t) i386_btop(start_addr), prot, VM_PROT_NONE, flags, TRUE);
+               kr = pmap_enter(kernel_pmap, (vm_map_offset_t)virt,
+                               (ppnum_t) i386_btop(start_addr), prot, VM_PROT_NONE, flags, TRUE);
+
+               if (kr != KERN_SUCCESS) {
+                       panic("%s: failed pmap_enter, "
+                             "virt=%p, start_addr=%p, end_addr=%p, prot=%#x, flags=%#x",
+                             __FUNCTION__,
+                             (void *)virt, (void *)start_addr, (void *)end_addr, prot, flags);
+               }
+
                virt += ps;
                start_addr += ps;
        }
@@ -286,46 +313,76 @@ extern  vm_offset_t               eHIB;
 extern  vm_offset_t            stext;
 extern  vm_offset_t            etext;
 extern  vm_offset_t            sdata, edata;
-extern  vm_offset_t            sconstdata, econstdata;
+extern  vm_offset_t            sconst, econst;
 
 extern void                    *KPTphys;
 
 boolean_t pmap_smep_enabled = FALSE;
+boolean_t pmap_smap_enabled = FALSE;
 
 void
 pmap_cpu_init(void)
 {
        cpu_data_t      *cdp = current_cpu_datap();
-       /*
-        * Here early in the life of a processor (from cpu_mode_init()).
-        * Ensure global page feature is disabled at this point.
-        */
 
-       set_cr4(get_cr4() &~ CR4_PGE);
+       set_cr4(get_cr4() | CR4_PGE);
 
        /*
         * Initialize the per-cpu, TLB-related fields.
         */
        cdp->cpu_kernel_cr3 = kernel_pmap->pm_cr3;
+       cpu_shadowp(cdp->cpu_number)->cpu_kernel_cr3 = cdp->cpu_kernel_cr3;
        cdp->cpu_active_cr3 = kernel_pmap->pm_cr3;
        cdp->cpu_tlb_invalid = FALSE;
        cdp->cpu_task_map = TASK_MAP_64BIT;
+
        pmap_pcid_configure();
        if (cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_SMEP) {
+               pmap_smep_enabled = TRUE;
+#if    DEVELOPMENT || DEBUG
                boolean_t nsmep;
-               if (!PE_parse_boot_argn("-pmap_smep_disable", &nsmep, sizeof(nsmep))) {
+               if (PE_parse_boot_argn("-pmap_smep_disable", &nsmep, sizeof(nsmep))) {
+                       pmap_smep_enabled = FALSE;
+               }
+#endif
+               if (pmap_smep_enabled) {
                        set_cr4(get_cr4() | CR4_SMEP);
-                       pmap_smep_enabled = TRUE;
+               }
+
+       }
+       if (cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_SMAP) {
+               pmap_smap_enabled = TRUE;
+#if DEVELOPMENT || DEBUG
+               boolean_t nsmap;
+               if (PE_parse_boot_argn("-pmap_smap_disable", &nsmap, sizeof(nsmap))) {
+                       pmap_smap_enabled = FALSE;
+               }
+#endif
+               if (pmap_smap_enabled) {
+                       set_cr4(get_cr4() | CR4_SMAP);
                }
        }
 
+#if !MONOTONIC
        if (cdp->cpu_fixed_pmcs_enabled) {
                boolean_t enable = TRUE;
                cpu_pmc_control(&enable);
        }
+#endif /* !MONOTONIC */
 }
 
+static uint32_t pmap_scale_shift(void) {
+       uint32_t scale = 0;
 
+       if (sane_size <= 8*GB) {
+               scale = (uint32_t)(sane_size / (2 * GB));
+       } else if (sane_size <= 32*GB) {
+               scale = 4 + (uint32_t)((sane_size - (8 * GB))/ (4 * GB)); 
+       } else {
+               scale = 10 + (uint32_t)MIN(4, ((sane_size - (32 * GB))/ (8 * GB))); 
+       }
+       return scale;
+}
 
 /*
  *     Bootstrap the system enough to run with virtual memory.
@@ -357,15 +414,15 @@ pmap_bootstrap(
        kernel_pmap->nx_enabled = TRUE;
        kernel_pmap->pm_task_map = TASK_MAP_64BIT;
        kernel_pmap->pm_obj = (vm_object_t) NULL;
-       kernel_pmap->dirbase = (pd_entry_t *)((uintptr_t)IdlePTD);
-       kernel_pmap->pm_pdpt = (pd_entry_t *) ((uintptr_t)IdlePDPT);
        kernel_pmap->pm_pml4 = IdlePML4;
+       kernel_pmap->pm_upml4 = IdlePML4;
        kernel_pmap->pm_cr3 = (uintptr_t)ID_MAP_VTOP(IdlePML4);
-       pmap_pcid_initialize_kernel(kernel_pmap);
+       kernel_pmap->pm_ucr3 = (uintptr_t)ID_MAP_VTOP(IdlePML4);
+       kernel_pmap->pm_eptp = 0;
 
-       
+       pmap_pcid_initialize_kernel(kernel_pmap);
 
-       current_cpu_datap()->cpu_kernel_cr3 = (addr64_t) kernel_pmap->pm_cr3;
+       current_cpu_datap()->cpu_kernel_cr3 = cpu_shadowp(cpu_number())->cpu_kernel_cr3 = (addr64_t) kernel_pmap->pm_cr3;
 
        nkpt = NKPT;
        OSAddAtomic(NKPT,  &inuse_ptepages_count);
@@ -410,21 +467,23 @@ pmap_bootstrap(
 
        virtual_avail = va;
 #endif
+       if (!PE_parse_boot_argn("npvhash", &npvhashmask, sizeof (npvhashmask))) {
+               npvhashmask = ((NPVHASHBUCKETS) << pmap_scale_shift()) - 1;
 
-       if (PE_parse_boot_argn("npvhash", &npvhash, sizeof (npvhash))) {
-               if (0 != ((npvhash + 1) & npvhash)) {
-                       kprintf("invalid hash %d, must be ((2^N)-1), "
-                               "using default %d\n", npvhash, NPVHASH);
-                       npvhash = NPVHASH;
-               }
-       } else {
-               npvhash = NPVHASH;
+       }
+
+       npvhashbuckets = npvhashmask + 1;
+
+       if (0 != ((npvhashbuckets) & npvhashmask)) {
+               panic("invalid hash %d, must be ((2^N)-1), "
+                   "using default %d\n", npvhashmask, NPVHASHMASK);
        }
 
        simple_lock_init(&kernel_pmap->lock, 0);
        simple_lock_init(&pv_hashed_free_list_lock, 0);
        simple_lock_init(&pv_hashed_kern_free_list_lock, 0);
        simple_lock_init(&pv_hash_table_lock,0);
+       simple_lock_init(&phys_backup_lock, 0);
 
        pmap_cpu_init();
 
@@ -433,11 +492,14 @@ pmap_bootstrap(
 
        if (pmap_smep_enabled)
                printf("PMAP: Supervisor Mode Execute Protection enabled\n");
+       if (pmap_smap_enabled)
+               printf("PMAP: Supervisor Mode Access Protection enabled\n");
 
 #if    DEBUG
        printf("Stack canary: 0x%lx\n", __stack_chk_guard[0]);
-       printf("ml_early_random(): 0x%qx\n", ml_early_random());
+       printf("early_random(): 0x%qx\n", early_random());
 #endif
+#if    DEVELOPMENT || DEBUG
        boolean_t ptmp;
        /* Check if the user has requested disabling stack or heap no-execute
         * enforcement. These are "const" variables; that qualifier is cast away
@@ -454,6 +516,7 @@ pmap_bootstrap(
                boolean_t *pdknhp = (boolean_t *) &pmap_disable_kstack_nx;
                *pdknhp = TRUE;
        }
+#endif /* DEVELOPMENT || DEBUG */
 
        boot_args *args = (boot_args *)PE_state.bootArgs;
        if (args->efiMode == kBootArgsEfiMode32) {
@@ -470,18 +533,24 @@ pmap_bootstrap(
         * in the DEBUG kernel) to force the kernel to switch to its own map
         * (and cr3) when control is in kernelspace. The kernel's map does not
         * include (i.e. share) userspace so wild references will cause
-        * a panic. Only copyin and copyout are exempt from this. 
+        * a panic. Only copyin and copyout are exempt from this.
         */
        (void) PE_parse_boot_argn("-no_shared_cr3",
                                  &no_shared_cr3, sizeof (no_shared_cr3));
        if (no_shared_cr3)
                kprintf("Kernel not sharing user map\n");
-               
+
 #ifdef PMAP_TRACES
        if (PE_parse_boot_argn("-pmap_trace", &pmap_trace, sizeof (pmap_trace))) {
                kprintf("Kernel traces for pmap operations enabled\n");
-       }       
+       }
 #endif /* PMAP_TRACES */
+
+#if MACH_ASSERT
+       PE_parse_boot_argn("pmap_stats_assert",
+                          &pmap_stats_assert,
+                          sizeof (pmap_stats_assert));
+#endif /* MACH_ASSERT */
 }
 
 void
@@ -581,7 +650,7 @@ hibernate_teardown_pmap_structs(addr64_t *unneeded_start, addr64_t *unneeded_end
 void
 hibernate_rebuild_pmap_structs(void)
 {
-       int32_t                 cindx, eindx, rindx;
+       int32_t                 cindx, eindx, rindx = 0;
        pv_rooted_entry_t       pv_h;
 
        eindx = (int32_t)pmap_npages;
@@ -657,14 +726,13 @@ pmap_init(void)
        pmap_npages = (uint32_t)npages;
 #endif 
        s = (vm_size_t) (sizeof(struct pv_rooted_entry) * npages
-                        + (sizeof (struct pv_hashed_entry_t *) * (npvhash+1))
+                        + (sizeof (struct pv_hashed_entry_t *) * (npvhashbuckets))
                         + pv_lock_table_size(npages)
-                        + pv_hash_lock_table_size((npvhash+1))
+                        + pv_hash_lock_table_size((npvhashbuckets))
                                + npages);
-
        s = round_page(s);
        if (kernel_memory_allocate(kernel_map, &addr, s, 0,
-                                  KMA_KOBJECT | KMA_PERMANENT)
+                                  KMA_KOBJECT | KMA_PERMANENT, VM_KERN_MEMORY_PMAP)
            != KERN_SUCCESS)
                panic("pmap_init");
 
@@ -674,7 +742,7 @@ pmap_init(void)
        vsize = s;
 
 #if PV_DEBUG
-       if (0 == npvhash) panic("npvhash not initialized");
+       if (0 == npvhashmask) panic("npvhashmask not initialized");
 #endif
 
        /*
@@ -684,13 +752,13 @@ pmap_init(void)
        addr = (vm_offset_t) (pv_head_table + npages);
 
        pv_hash_table = (pv_hashed_entry_t *)addr;
-       addr = (vm_offset_t) (pv_hash_table + (npvhash + 1));
+       addr = (vm_offset_t) (pv_hash_table + (npvhashbuckets));
 
        pv_lock_table = (char *) addr;
        addr = (vm_offset_t) (pv_lock_table + pv_lock_table_size(npages));
 
        pv_hash_lock_table = (char *) addr;
-       addr = (vm_offset_t) (pv_hash_lock_table + pv_hash_lock_table_size((npvhash+1)));
+       addr = (vm_offset_t) (pv_hash_lock_table + pv_hash_lock_table_size((npvhashbuckets)));
 
        pmap_phys_attributes = (char *) addr;
 
@@ -738,11 +806,24 @@ pmap_init(void)
         */
 
        zone_change(pmap_anchor_zone, Z_ALIGNMENT_REQUIRED, TRUE);
+/* TODO: possible general optimisation...pre-allocate via zones commonly created
+ * level3/2 pagetables
+ */
+       pmap_uanchor_zone = zinit(PAGE_SIZE, task_max, PAGE_SIZE, "pagetable user anchors");
+       zone_change(pmap_uanchor_zone, Z_NOENCRYPT, TRUE);
+
+       /* The anchor is required to be page aligned. Zone debugging adds
+        * padding which may violate that requirement. Tell the zone
+        * subsystem that alignment is required.
+        */
+
+       zone_change(pmap_uanchor_zone, Z_ALIGNMENT_REQUIRED, TRUE);
 
        s = (vm_size_t) sizeof(struct pv_hashed_entry);
        pv_hashed_list_zone = zinit(s, 10000*s /* Expandable zone */,
            4096 * 3 /* LCM x86_64*/, "pv_list");
        zone_change(pv_hashed_list_zone, Z_NOENCRYPT, TRUE);
+       zone_change(pv_hashed_list_zone, Z_GZALLOC_EXEMPT, TRUE);
 
        /* create pv entries for kernel pages mapped by low level
           startup code.  these have to exist so we can pmap_remove()
@@ -753,7 +834,7 @@ pmap_init(void)
                pv_rooted_entry_t pv_e;
 
                pv_e = pai_to_pvh(ppn);
-               pv_e->va = vaddr;
+               pv_e->va_and_flags = vaddr;
                vaddr += PAGE_SIZE;
                pv_e->pmap = kernel_pmap;
                queue_init(&pv_e->qlink);
@@ -767,6 +848,11 @@ pmap_init(void)
         * before this is shared with any user.
         */
        pmap_expand_pml4(kernel_pmap, KERNEL_BASEMENT, PMAP_EXPAND_OPTIONS_NONE);
+
+#if CONFIG_VMX
+       pmap_ept_support_ad = vmx_hv_support()  && (VMX_CAP(MSR_IA32_VMX_EPT_VPID_CAP, MSR_IA32_VMX_EPT_VPID_CAP_AD_SHIFT, 1) ? TRUE : FALSE);
+       pmap_eptp_flags = HV_VMX_EPTP_MEMORY_TYPE_WB | HV_VMX_EPTP_WALK_LENGTH(4) | (pmap_ept_support_ad ? HV_VMX_EPTP_ENABLE_AD_FLAGS : 0);
+#endif /* CONFIG_VMX */
 }
 
 static
@@ -775,6 +861,8 @@ void pmap_mark_range(pmap_t npmap, uint64_t sv, uint64_t nxrosz, boolean_t NX, b
        pd_entry_t *pdep;
        pt_entry_t *ptep = NULL;
 
+       assert(!is_ept_pmap(npmap));
+
        assert(((sv & 0xFFFULL) | (nxrosz & 0xFFFULL)) == 0);
 
        for (pdep = pmap_pde(npmap, cv); pdep != NULL && (cv < ev);) {
@@ -849,9 +937,9 @@ void pmap_mark_range(pmap_t npmap, uint64_t sv, uint64_t nxrosz, boolean_t NX, b
  * The now unused level-1 PTE pages are also freed.
  */
 extern ppnum_t vm_kernel_base_page;
-void
-pmap_lowmem_finalize(void)
-{
+static uint32_t constptes = 0, dataptes = 0;
+
+void pmap_lowmem_finalize(void) {
        spl_t           spl;
        int             i;
 
@@ -911,7 +999,10 @@ pmap_lowmem_finalize(void)
         */
        DPRINTF("%s: Removing mappings from 0->0x%lx\n", __FUNCTION__, vm_kernel_base);
 
-       /* Remove all mappings past the descriptor aliases and low globals */
+       /*
+        * Remove all mappings past the boot-cpu descriptor aliases and low globals.
+        * Non-boot-cpu GDT aliases will be remapped later as needed. 
+        */
        pmap_remove(kernel_pmap, LOWGLOBAL_ALIAS + PAGE_SIZE, vm_kernel_base);
 
        /*
@@ -1011,48 +1102,45 @@ pmap_lowmem_finalize(void)
        }
 
        boolean_t doconstro = TRUE;
-
+#if DEVELOPMENT || DEBUG
        (void) PE_parse_boot_argn("dataconstro", &doconstro, sizeof(doconstro));
-
-       if ((sconstdata | econstdata) & PAGE_MASK) {
-               kprintf("Const DATA misaligned 0x%lx 0x%lx\n", sconstdata, econstdata);
-               if ((sconstdata & PAGE_MASK) || (doconstro_override == FALSE))
-                       doconstro = FALSE;
-       }
-
-       if ((sconstdata > edata) || (sconstdata < sdata) || ((econstdata - sconstdata) >= (edata - sdata))) {
-               kprintf("Const DATA incorrect size 0x%lx 0x%lx 0x%lx 0x%lx\n", sconstdata, econstdata, sdata, edata);
-               doconstro = FALSE;
-       }
-
-       if (doconstro)
+#endif
+       if (doconstro) {
+               if (sconst & PAGE_MASK) {
+                       panic("CONST segment misaligned 0x%lx 0x%lx\n",
+                           sconst, econst);
+               }
                kprintf("Marking const DATA read-only\n");
-
+       }
+       
        vm_offset_t dva;
 
        for (dva = sdata; dva < edata; dva += I386_PGBYTES) {
                assert(((sdata | edata) & PAGE_MASK) == 0);
-               if ( (sdata | edata) & PAGE_MASK) {
-                       kprintf("DATA misaligned, 0x%lx, 0x%lx\n", sdata, edata);
-                       break;
-               }
+               pt_entry_t dpte, *dptep = pmap_pte(kernel_pmap, dva);
+
+               dpte = *dptep;
+               assert((dpte & INTEL_PTE_VALID));
+               dpte |= INTEL_PTE_NX;
+               pmap_store_pte(dptep, dpte);
+               dataptes++;
+       }
+       assert(dataptes > 0);
 
+       for (dva = sconst; dva < econst; dva += I386_PGBYTES) {
                pt_entry_t dpte, *dptep = pmap_pte(kernel_pmap, dva);
 
                dpte = *dptep;
 
                assert((dpte & INTEL_PTE_VALID));
-               if ((dpte & INTEL_PTE_VALID) == 0) {
-                       kprintf("Missing data mapping 0x%lx 0x%lx 0x%lx\n", dva, sdata, edata);
-                       continue;
-               }
-
                dpte |= INTEL_PTE_NX;
-               if (doconstro && (dva >= sconstdata) && (dva < econstdata)) {
-                       dpte &= ~INTEL_PTE_WRITE;
-               }
+               dpte &= ~INTEL_PTE_WRITE;
+               constptes++;
                pmap_store_pte(dptep, dpte);
        }
+
+       assert(constptes > 0);
+
        kernel_segment_command_t * seg;
        kernel_section_t         * sec;
 
@@ -1175,6 +1263,28 @@ pmap_is_empty(
        return TRUE;
 }
 
+void
+hv_ept_pmap_create(void **ept_pmap, void **eptp)
+{
+       pmap_t p;
+
+       if ((ept_pmap == NULL) || (eptp == NULL)) {
+               return;
+       }
+
+       p = pmap_create_options(get_task_ledger(current_task()), 0, (PMAP_CREATE_64BIT | PMAP_CREATE_EPT));
+       if (p == PMAP_NULL) {
+               *ept_pmap = NULL;
+               *eptp = NULL;
+               return;
+       }
+
+       assert(is_ept_pmap(p));
+
+       *ept_pmap = (void*)p;
+       *eptp = (void*)(p->pm_eptp);
+       return;
+}
 
 /*
  *     Create and return a physical map.
@@ -1188,19 +1298,19 @@ pmap_is_empty(
  *     the map will be used in software only, and
  *     is bounded by that size.
  */
+
 pmap_t
-pmap_create(
-       ledger_t                ledger,
-           vm_map_size_t       sz,
-           boolean_t           is_64bit)
+pmap_create_options(
+       ledger_t        ledger,
+       vm_map_size_t   sz,
+       int             flags)
 {
        pmap_t          p;
        vm_size_t       size;
        pml4_entry_t    *pml4;
        pml4_entry_t    *kpml4;
 
-       PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START,
-                  (uint32_t) (sz>>32), (uint32_t) sz, is_64bit, 0, 0);
+       PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START, sz, flags);
 
        size = (vm_size_t) sz;
 
@@ -1212,37 +1322,54 @@ pmap_create(
                return(PMAP_NULL);
        }
 
+       /*
+        *      Return error when unrecognized flags are passed.
+        */
+       if (__improbable((flags & ~(PMAP_CREATE_KNOWN_FLAGS)) != 0)) {
+               return(PMAP_NULL);
+       }
+
        p = (pmap_t) zalloc(pmap_zone);
        if (PMAP_NULL == p)
                panic("pmap_create zalloc");
+
        /* Zero all fields */
        bzero(p, sizeof(*p));
        /* init counts now since we'll be bumping some */
        simple_lock_init(&p->lock, 0);
-#if 00
-       p->stats.resident_count = 0;
-       p->stats.resident_max = 0;
-       p->stats.wired_count = 0;
-#else
        bzero(&p->stats, sizeof (p->stats));
-#endif
+
        p->ref_count = 1;
        p->nx_enabled = 1;
        p->pm_shared = FALSE;
        ledger_reference(ledger);
        p->ledger = ledger;
 
-       p->pm_task_map = is_64bit ? TASK_MAP_64BIT : TASK_MAP_32BIT;;
-       if (pmap_pcid_ncpus)
+       p->pm_task_map = ((flags & PMAP_CREATE_64BIT) ? TASK_MAP_64BIT : TASK_MAP_32BIT);
+
+       p->pagezero_accessible = FALSE;
+
+       if (pmap_pcid_ncpus) {
                pmap_pcid_initialize(p);
+       }
 
        p->pm_pml4 = zalloc(pmap_anchor_zone);
+       p->pm_upml4 = zalloc(pmap_uanchor_zone); //cleanup for EPT
 
        pmap_assert((((uintptr_t)p->pm_pml4) & PAGE_MASK) == 0);
+       pmap_assert((((uintptr_t)p->pm_upml4) & PAGE_MASK) == 0);
 
        memset((char *)p->pm_pml4, 0, PAGE_SIZE);
+       memset((char *)p->pm_upml4, 0, PAGE_SIZE);
 
-       p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_pml4);
+       if (flags & PMAP_CREATE_EPT) {
+               p->pm_eptp = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_pml4) | pmap_eptp_flags;
+               p->pm_cr3 = 0;
+       } else {
+               p->pm_eptp = 0;
+               p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_pml4);
+               p->pm_ucr3 = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_upml4);
+       }
 
        /* allocate the vm_objs to hold the pdpt, pde and pte pages */
 
@@ -1258,24 +1385,174 @@ pmap_create(
        if (NULL == p->pm_obj)
                panic("pmap_create pte obj");
 
-       /* All pmaps share the kernel's pml4 */
-       pml4 = pmap64_pml4(p, 0ULL);
-       kpml4 = kernel_pmap->pm_pml4;
-       pml4[KERNEL_PML4_INDEX]    = kpml4[KERNEL_PML4_INDEX];
-       pml4[KERNEL_KEXTS_INDEX]   = kpml4[KERNEL_KEXTS_INDEX];
-       pml4[KERNEL_PHYSMAP_PML4_INDEX] = kpml4[KERNEL_PHYSMAP_PML4_INDEX];
+       if (!(flags & PMAP_CREATE_EPT)) {
+               /* All host pmaps share the kernel's pml4 */
+               pml4 = pmap64_pml4(p, 0ULL);
+               kpml4 = kernel_pmap->pm_pml4;
+               pml4[KERNEL_PML4_INDEX]    = kpml4[KERNEL_PML4_INDEX];
+               pml4[KERNEL_KEXTS_INDEX]   = kpml4[KERNEL_KEXTS_INDEX];
+               pml4[KERNEL_PHYSMAP_PML4_INDEX] = kpml4[KERNEL_PHYSMAP_PML4_INDEX];
+               pml4[KERNEL_DBLMAP_PML4_INDEX] = kpml4[KERNEL_DBLMAP_PML4_INDEX];
+#if KASAN
+               pml4[KERNEL_KASAN_PML4_INDEX0] = kpml4[KERNEL_KASAN_PML4_INDEX0];
+               pml4[KERNEL_KASAN_PML4_INDEX1] = kpml4[KERNEL_KASAN_PML4_INDEX1];
+#endif
+               pml4_entry_t    *pml4u = pmap64_user_pml4(p, 0ULL);
+               pml4u[KERNEL_DBLMAP_PML4_INDEX] = kpml4[KERNEL_DBLMAP_PML4_INDEX];
+       }
+
+#if MACH_ASSERT
+       p->pmap_stats_assert = TRUE;
+       p->pmap_pid = 0;
+       strlcpy(p->pmap_procname, "<nil>", sizeof (p->pmap_procname));
+#endif /* MACH_ASSERT */
 
-       PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START,
-                  p, is_64bit, 0, 0, 0);
+       PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_END,
+                  VM_KERNEL_ADDRHIDE(p));
 
        return(p);
 }
 
+pmap_t
+pmap_create(
+       ledger_t        ledger,
+       vm_map_size_t   sz,
+       boolean_t       is_64bit)
+{
+       return pmap_create_options(ledger, sz, ((is_64bit) ? PMAP_CREATE_64BIT : 0));
+}
+
+/*
+ * We maintain stats and ledgers so that a task's physical footprint is:
+ * phys_footprint = ((internal - alternate_accounting)
+ *                   + (internal_compressed - alternate_accounting_compressed)
+ *                   + iokit_mapped
+ *                   + purgeable_nonvolatile
+ *                   + purgeable_nonvolatile_compressed
+ *                   + page_table)
+ * where "alternate_accounting" includes "iokit" and "purgeable" memory.
+ */
+
+#if MACH_ASSERT
+struct {
+       uint64_t        num_pmaps_checked;
+
+       int             phys_footprint_over;
+       ledger_amount_t phys_footprint_over_total;
+       ledger_amount_t phys_footprint_over_max;
+       int             phys_footprint_under;
+       ledger_amount_t phys_footprint_under_total;
+       ledger_amount_t phys_footprint_under_max;
+
+       int             internal_over;
+       ledger_amount_t internal_over_total;
+       ledger_amount_t internal_over_max;
+       int             internal_under;
+       ledger_amount_t internal_under_total;
+       ledger_amount_t internal_under_max;
+
+       int             internal_compressed_over;
+       ledger_amount_t internal_compressed_over_total;
+       ledger_amount_t internal_compressed_over_max;
+       int             internal_compressed_under;
+       ledger_amount_t internal_compressed_under_total;
+       ledger_amount_t internal_compressed_under_max;
+
+       int             iokit_mapped_over;
+       ledger_amount_t iokit_mapped_over_total;
+       ledger_amount_t iokit_mapped_over_max;
+       int             iokit_mapped_under;
+       ledger_amount_t iokit_mapped_under_total;
+       ledger_amount_t iokit_mapped_under_max;
+
+       int             alternate_accounting_over;
+       ledger_amount_t alternate_accounting_over_total;
+       ledger_amount_t alternate_accounting_over_max;
+       int             alternate_accounting_under;
+       ledger_amount_t alternate_accounting_under_total;
+       ledger_amount_t alternate_accounting_under_max;
+
+       int             alternate_accounting_compressed_over;
+       ledger_amount_t alternate_accounting_compressed_over_total;
+       ledger_amount_t alternate_accounting_compressed_over_max;
+       int             alternate_accounting_compressed_under;
+       ledger_amount_t alternate_accounting_compressed_under_total;
+       ledger_amount_t alternate_accounting_compressed_under_max;
+
+       int             page_table_over;
+       ledger_amount_t page_table_over_total;
+       ledger_amount_t page_table_over_max;
+       int             page_table_under;
+       ledger_amount_t page_table_under_total;
+       ledger_amount_t page_table_under_max;
+
+       int             purgeable_volatile_over;
+       ledger_amount_t purgeable_volatile_over_total;
+       ledger_amount_t purgeable_volatile_over_max;
+       int             purgeable_volatile_under;
+       ledger_amount_t purgeable_volatile_under_total;
+       ledger_amount_t purgeable_volatile_under_max;
+
+       int             purgeable_nonvolatile_over;
+       ledger_amount_t purgeable_nonvolatile_over_total;
+       ledger_amount_t purgeable_nonvolatile_over_max;
+       int             purgeable_nonvolatile_under;
+       ledger_amount_t purgeable_nonvolatile_under_total;
+       ledger_amount_t purgeable_nonvolatile_under_max;
+
+       int             purgeable_volatile_compressed_over;
+       ledger_amount_t purgeable_volatile_compressed_over_total;
+       ledger_amount_t purgeable_volatile_compressed_over_max;
+       int             purgeable_volatile_compressed_under;
+       ledger_amount_t purgeable_volatile_compressed_under_total;
+       ledger_amount_t purgeable_volatile_compressed_under_max;
+
+       int             purgeable_nonvolatile_compressed_over;
+       ledger_amount_t purgeable_nonvolatile_compressed_over_total;
+       ledger_amount_t purgeable_nonvolatile_compressed_over_max;
+       int             purgeable_nonvolatile_compressed_under;
+       ledger_amount_t purgeable_nonvolatile_compressed_under_total;
+       ledger_amount_t purgeable_nonvolatile_compressed_under_max;
+
+       int             network_volatile_over;
+       ledger_amount_t network_volatile_over_total;
+       ledger_amount_t network_volatile_over_max;
+       int             network_volatile_under;
+       ledger_amount_t network_volatile_under_total;
+       ledger_amount_t network_volatile_under_max;
+
+       int             network_nonvolatile_over;
+       ledger_amount_t network_nonvolatile_over_total;
+       ledger_amount_t network_nonvolatile_over_max;
+       int             network_nonvolatile_under;
+       ledger_amount_t network_nonvolatile_under_total;
+       ledger_amount_t network_nonvolatile_under_max;
+
+       int             network_volatile_compressed_over;
+       ledger_amount_t network_volatile_compressed_over_total;
+       ledger_amount_t network_volatile_compressed_over_max;
+       int             network_volatile_compressed_under;
+       ledger_amount_t network_volatile_compressed_under_total;
+       ledger_amount_t network_volatile_compressed_under_max;
+
+       int             network_nonvolatile_compressed_over;
+       ledger_amount_t network_nonvolatile_compressed_over_total;
+       ledger_amount_t network_nonvolatile_compressed_over_max;
+       int             network_nonvolatile_compressed_under;
+       ledger_amount_t network_nonvolatile_compressed_under_total;
+       ledger_amount_t network_nonvolatile_compressed_under_max;
+} pmap_ledgers_drift;
+static void pmap_check_ledgers(pmap_t pmap);
+#else /* MACH_ASSERT */
+static inline void pmap_check_ledgers(__unused pmap_t pmap) {}
+#endif /* MACH_ASSERT */
+       
 /*
  *     Retire the given physical map from service.
  *     Should only be called if the map contains
  *     no valid mappings.
  */
+extern int vm_wired_objects_page_count;
 
 void
 pmap_destroy(pmap_t    p)
@@ -1286,7 +1563,7 @@ pmap_destroy(pmap_t       p)
                return;
 
        PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_START,
-                  p, 0, 0, 0, 0);
+                  VM_KERNEL_ADDRHIDe(p));
 
        PMAP_LOCK(p);
 
@@ -1309,8 +1586,7 @@ pmap_destroy(pmap_t       p)
        PMAP_UNLOCK(p);
 
        if (c != 0) {
-               PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END,
-                          p, 1, 0, 0, 0);
+               PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END);
                pmap_assert(p == kernel_pmap);
                return; /* still in use */
        }
@@ -1322,6 +1598,7 @@ pmap_destroy(pmap_t       p)
        int inuse_ptepages = 0;
 
        zfree(pmap_anchor_zone, p->pm_pml4);
+       zfree(pmap_uanchor_zone, p->pm_upml4);
 
        inuse_ptepages += p->pm_obj_pml4->resident_page_count;
        vm_object_deallocate(p->pm_obj_pml4);
@@ -1334,11 +1611,12 @@ pmap_destroy(pmap_t     p)
 
        OSAddAtomic(-inuse_ptepages,  &inuse_ptepages_count);
        PMAP_ZINFO_PFREE(p, inuse_ptepages * PAGE_SIZE);
+
+       pmap_check_ledgers(p);
        ledger_dereference(p->ledger);
        zfree(pmap_zone, p);
 
-       PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END,
-                  0, 0, 0, 0, 0);
+       PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END);
 }
 
 /*
@@ -1384,7 +1662,15 @@ pmap_protect(
 /*
  *     Set the physical protection on the
  *     specified range of this map as requested.
- *     Will not increase permissions.
+ *
+ * VERY IMPORTANT: Will *NOT* increase permissions.
+ *     pmap_protect_options() should protect the range against any access types
+ *     that are not in "prot" but it should never grant extra access.
+ *     For example, if "prot" is READ|EXECUTE, that means "remove write
+ *     access" but it does *not* mean "add read and execute" access.
+ *     VM relies on getting soft-faults to enforce extra checks (code
+ *     signing, for example), for example.
+ *     New access permissions are granted via pmap_enter() only.
  */
 void
 pmap_protect_options(
@@ -1401,6 +1687,7 @@ pmap_protect_options(
        vm_map_offset_t orig_sva;
        boolean_t       set_NX;
        int             num_found = 0;
+       boolean_t       is_ept;
 
        pmap_intr_assert();
 
@@ -1411,16 +1698,19 @@ pmap_protect_options(
                pmap_remove_options(map, sva, eva, options);
                return;
        }
+
        PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_START,
-                  map,
-                  (uint32_t) (sva >> 32), (uint32_t) sva,
-                  (uint32_t) (eva >> 32), (uint32_t) eva);
+                  VM_KERNEL_ADDRHIDE(map), VM_KERNEL_ADDRHIDE(sva),
+                  VM_KERNEL_ADDRHIDE(eva));
 
        if ((prot & VM_PROT_EXECUTE) || !nx_enabled || !map->nx_enabled)
                set_NX = FALSE;
        else
                set_NX = TRUE;
 
+       is_ept = is_ept_pmap(map);
+
+
        PMAP_LOCK(map);
 
        orig_sva = sva;
@@ -1429,8 +1719,8 @@ pmap_protect_options(
                if (lva > eva)
                        lva = eva;
                pde = pmap_pde(map, sva);
-               if (pde && (*pde & INTEL_PTE_VALID)) {
-                       if (*pde & INTEL_PTE_PS) {
+               if (pde && (*pde & PTE_VALID_MASK(is_ept))) {
+                       if (*pde & PTE_PS) {
                                /* superpage */
                                spte = pde;
                                epte = spte+1; /* excluded */
@@ -1441,18 +1731,31 @@ pmap_protect_options(
                        }
 
                        for (; spte < epte; spte++) {
-                               if (!(*spte & INTEL_PTE_VALID))
+                               if (!(*spte & PTE_VALID_MASK(is_ept)))
                                        continue;
 
-                               if (prot & VM_PROT_WRITE)
-                                       pmap_update_pte(spte, 0, INTEL_PTE_WRITE);
-                               else
-                                       pmap_update_pte(spte, INTEL_PTE_WRITE, 0);
+                               if (is_ept) {
+                                       if (! (prot & VM_PROT_READ)) {
+                                               pmap_update_pte(spte, PTE_READ(is_ept), 0);
+                                       }
+                               }
+                               if (! (prot & VM_PROT_WRITE)) {
+                                       pmap_update_pte(spte, PTE_WRITE(is_ept), 0);
+                               }
+#if DEVELOPMENT || DEBUG
+                               else if ((options & PMAP_OPTIONS_PROTECT_IMMEDIATE) &&
+                                        map == kernel_pmap) {
+                                       pmap_update_pte(spte, 0, PTE_WRITE(is_ept));
+                               }
+#endif /* DEVELOPMENT || DEBUG */
 
-                               if (set_NX)
-                                       pmap_update_pte(spte, 0, INTEL_PTE_NX);
-                               else
-                                       pmap_update_pte(spte, INTEL_PTE_NX, 0);
+                               if (set_NX) {
+                                       if (!is_ept) {
+                                               pmap_update_pte(spte, 0, INTEL_PTE_NX);
+                                       } else {
+                                               pmap_update_pte(spte, INTEL_EPT_EX, 0);
+                                       }
+                               }
                                num_found++;
                        }
                }
@@ -1466,15 +1769,14 @@ pmap_protect_options(
        }
        PMAP_UNLOCK(map);
 
-       PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_END,
-                  0, 0, 0, 0, 0);
+       PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_END);
 
 }
 
 /* Map a (possibly) autogenned block */
-void
+kern_return_t
 pmap_map_block(
-       pmap_t          pmap, 
+       pmap_t          pmap,
        addr64_t        va,
        ppnum_t         pa,
        uint32_t        size,
@@ -1482,19 +1784,38 @@ pmap_map_block(
        int             attr,
        __unused unsigned int   flags)
 {
+       kern_return_t   kr;
+       addr64_t        original_va = va;
        uint32_t        page;
        int             cur_page_size;
 
        if (attr & VM_MEM_SUPERPAGE)
                cur_page_size =  SUPERPAGE_SIZE;
-       else 
+       else
                cur_page_size =  PAGE_SIZE;
 
        for (page = 0; page < size; page+=cur_page_size/PAGE_SIZE) {
-               pmap_enter(pmap, va, pa, prot, VM_PROT_NONE, attr, TRUE);
+               kr = pmap_enter(pmap, va, pa, prot, VM_PROT_NONE, attr, TRUE);
+
+               if (kr != KERN_SUCCESS) {
+                       /*
+                        * This will panic for now, as it is unclear that
+                        * removing the mappings is correct.
+                        */
+                       panic("%s: failed pmap_enter, "
+                             "pmap=%p, va=%#llx, pa=%u, size=%u, prot=%#x, flags=%#x",
+                             __FUNCTION__,
+                             pmap, va, pa, size, prot, flags);
+
+                       pmap_remove(pmap, original_va, va - original_va);
+                       return kr;
+               }
+
                va += cur_page_size;
                pa+=cur_page_size/PAGE_SIZE;
        }
+
+       return KERN_SUCCESS;
 }
 
 kern_return_t
@@ -1508,9 +1829,14 @@ pmap_expand_pml4(
        uint64_t        i;
        ppnum_t         pn;
        pml4_entry_t    *pml4p;
+       boolean_t       is_ept = is_ept_pmap(map);
 
        DBG("pmap_expand_pml4(%p,%p)\n", map, (void *)vaddr);
 
+       /* With the exception of the kext "basement", the kernel's level 4
+        * pagetables must not be dynamically expanded.
+        */
+       assert(map != kernel_pmap || (vaddr == KERNEL_BASEMENT));
        /*
         *      Allocate a VM page for the pml4 page
         */
@@ -1523,7 +1849,7 @@ pmap_expand_pml4(
         *      put the page into the pmap's obj list so it
         *      can be found later.
         */
-       pn = m->phys_page;
+       pn = VM_PAGE_GET_PHYS_PAGE(m);
        pa = i386_ptob(pn);
        i = pml4idx(map, vaddr);
 
@@ -1533,7 +1859,7 @@ pmap_expand_pml4(
        pmap_zero_page(pn);
 
        vm_page_lockspin_queues();
-       vm_page_wire(m);
+       vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE);
        vm_page_unlock_queues();
 
        OSAddAtomic(1,  &inuse_ptepages_count);
@@ -1564,7 +1890,7 @@ pmap_expand_pml4(
                     map, map->pm_obj_pml4, vaddr, i);
        }
 #endif
-       vm_page_insert(m, map->pm_obj_pml4, (vm_object_offset_t)i * PAGE_SIZE);
+       vm_page_insert_wired(m, map->pm_obj_pml4, (vm_object_offset_t)i * PAGE_SIZE, VM_KERN_MEMORY_PTE);
        vm_object_unlock(map->pm_obj_pml4);
 
        /*
@@ -1573,9 +1899,16 @@ pmap_expand_pml4(
        pml4p = pmap64_pml4(map, vaddr); /* refetch under lock */
 
        pmap_store_pte(pml4p, pa_to_pte(pa)
-                               | INTEL_PTE_VALID
-                               | INTEL_PTE_USER
-                               | INTEL_PTE_WRITE);
+                               | PTE_READ(is_ept)
+                               | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
+                               | PTE_WRITE(is_ept));
+       pml4_entry_t    *upml4p;
+
+       upml4p = pmap64_user_pml4(map, vaddr);
+       pmap_store_pte(upml4p, pa_to_pte(pa)
+                               | PTE_READ(is_ept)
+                               | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
+                               | PTE_WRITE(is_ept));
 
        PMAP_UNLOCK(map);
 
@@ -1590,6 +1923,7 @@ pmap_expand_pdpt(pmap_t map, vm_map_offset_t vaddr, unsigned int options)
        uint64_t        i;
        ppnum_t         pn;
        pdpt_entry_t    *pdptp;
+       boolean_t       is_ept = is_ept_pmap(map);
 
        DBG("pmap_expand_pdpt(%p,%p)\n", map, (void *)vaddr);
 
@@ -1612,7 +1946,7 @@ pmap_expand_pdpt(pmap_t map, vm_map_offset_t vaddr, unsigned int options)
         *      put the page into the pmap's obj list so it
         *      can be found later.
         */
-       pn = m->phys_page;
+       pn = VM_PAGE_GET_PHYS_PAGE(m);
        pa = i386_ptob(pn);
        i = pdptidx(map, vaddr);
 
@@ -1622,7 +1956,7 @@ pmap_expand_pdpt(pmap_t map, vm_map_offset_t vaddr, unsigned int options)
        pmap_zero_page(pn);
 
        vm_page_lockspin_queues();
-       vm_page_wire(m);
+       vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE);
        vm_page_unlock_queues();
 
        OSAddAtomic(1,  &inuse_ptepages_count);
@@ -1653,7 +1987,7 @@ pmap_expand_pdpt(pmap_t map, vm_map_offset_t vaddr, unsigned int options)
                     map, map->pm_obj_pdpt, vaddr, i);
        }
 #endif
-       vm_page_insert(m, map->pm_obj_pdpt, (vm_object_offset_t)i * PAGE_SIZE);
+       vm_page_insert_wired(m, map->pm_obj_pdpt, (vm_object_offset_t)i * PAGE_SIZE, VM_KERN_MEMORY_PTE);
        vm_object_unlock(map->pm_obj_pdpt);
 
        /*
@@ -1662,9 +1996,9 @@ pmap_expand_pdpt(pmap_t map, vm_map_offset_t vaddr, unsigned int options)
        pdptp = pmap64_pdpt(map, vaddr); /* refetch under lock */
 
        pmap_store_pte(pdptp, pa_to_pte(pa)
-                               | INTEL_PTE_VALID
-                               | INTEL_PTE_USER
-                               | INTEL_PTE_WRITE);
+                               | PTE_READ(is_ept)
+                               | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
+                               | PTE_WRITE(is_ept));
 
        PMAP_UNLOCK(map);
 
@@ -1696,10 +2030,11 @@ pmap_expand(
        unsigned int options)
 {
        pt_entry_t              *pdp;
-       register vm_page_t      m;
-       register pmap_paddr_t   pa;
+       vm_page_t               m;
+       pmap_paddr_t            pa;
        uint64_t                i;
        ppnum_t                 pn;
+       boolean_t               is_ept = is_ept_pmap(map);
 
 
        /*
@@ -1707,12 +2042,16 @@ pmap_expand(
         * which is for kexts and is in the 512GB immediately below the kernel..
         * XXX - should use VM_MIN_KERNEL_AND_KEXT_ADDRESS not KERNEL_BASEMENT
         */
-       if (map == kernel_pmap && 
-           !(vaddr >= KERNEL_BASEMENT && vaddr <= VM_MAX_KERNEL_ADDRESS))
-               panic("pmap_expand: bad vaddr 0x%llx for kernel pmap", vaddr);
+       if (__improbable(map == kernel_pmap && 
+               !(vaddr >= KERNEL_BASEMENT && vaddr <= VM_MAX_KERNEL_ADDRESS))) {
+               if ((options & PMAP_EXPAND_OPTIONS_ALIASMAP) == 0) {
+                       panic("pmap_expand: bad vaddr 0x%llx for kernel pmap", vaddr);
+               }
+       }
 
 
        while ((pdp = pmap64_pde(map, vaddr)) == PD_ENTRY_NULL) {
+               assert((options & PMAP_EXPAND_OPTIONS_ALIASMAP) == 0);
                kern_return_t pepkr = pmap_expand_pdpt(map, vaddr, options);
                if (pepkr != KERN_SUCCESS)
                        return pepkr;
@@ -1731,7 +2070,7 @@ pmap_expand(
         *      put the page into the pmap's obj list so it
         *      can be found later.
         */
-       pn = m->phys_page;
+       pn = VM_PAGE_GET_PHYS_PAGE(m);
        pa = i386_ptob(pn);
        i = pdeidx(map, vaddr);
 
@@ -1741,7 +2080,7 @@ pmap_expand(
        pmap_zero_page(pn);
 
        vm_page_lockspin_queues();
-       vm_page_wire(m);
+       vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE);
        vm_page_unlock_queues();
 
        OSAddAtomic(1,  &inuse_ptepages_count);
@@ -1762,7 +2101,7 @@ pmap_expand(
 
                VM_PAGE_FREE(m);
 
-               OSAddAtomic(-1,  &inuse_ptepages_count);
+               OSAddAtomic(-1,  &inuse_ptepages_count);//todo replace all with inlines
                PMAP_ZINFO_PFREE(map, PAGE_SIZE);
                return KERN_SUCCESS;
        }
@@ -1773,7 +2112,7 @@ pmap_expand(
                     map, map->pm_obj, vaddr, i);
        }
 #endif
-       vm_page_insert(m, map->pm_obj, (vm_object_offset_t)i * PAGE_SIZE);
+       vm_page_insert_wired(m, map->pm_obj, (vm_object_offset_t)i * PAGE_SIZE, VM_KERN_MEMORY_PTE);
        vm_object_unlock(map->pm_obj);
 
        /*
@@ -1781,9 +2120,9 @@ pmap_expand(
         */
        pdp = pmap_pde(map, vaddr);
        pmap_store_pte(pdp, pa_to_pte(pa)
-                               | INTEL_PTE_VALID
-                               | INTEL_PTE_USER
-                               | INTEL_PTE_WRITE);
+                               | PTE_READ(is_ept)
+                               | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
+                               | PTE_WRITE(is_ept));
 
        PMAP_UNLOCK(map);
 
@@ -1800,6 +2139,7 @@ pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr)
 {
        ppnum_t pn;
        pt_entry_t              *pte;
+       boolean_t               is_ept = is_ept_pmap(pmap);
 
        PMAP_LOCK(pmap);
 
@@ -1812,9 +2152,17 @@ pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr)
                pte = pmap64_pml4(pmap, vaddr);
 
                pmap_store_pte(pte, pa_to_pte(i386_ptob(pn))
-                               | INTEL_PTE_VALID
-                               | INTEL_PTE_USER
-                               | INTEL_PTE_WRITE);
+                               | PTE_READ(is_ept)
+                               | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
+                               | PTE_WRITE(is_ept));
+
+               pte = pmap64_user_pml4(pmap, vaddr);
+
+               pmap_store_pte(pte, pa_to_pte(i386_ptob(pn))
+                               | PTE_READ(is_ept)
+                               | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
+                               | PTE_WRITE(is_ept));
+
        }
 
        if(pmap64_pde(pmap, vaddr) == PD_ENTRY_NULL) {
@@ -1826,9 +2174,9 @@ pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr)
                pte = pmap64_pdpt(pmap, vaddr);
 
                pmap_store_pte(pte, pa_to_pte(i386_ptob(pn))
-                               | INTEL_PTE_VALID
-                               | INTEL_PTE_USER
-                               | INTEL_PTE_WRITE);
+                               | PTE_READ(is_ept)
+                               | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
+                               | PTE_WRITE(is_ept));
        }
 
        if(pmap_pte(pmap, vaddr) == PT_ENTRY_NULL) {
@@ -1840,9 +2188,9 @@ pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr)
                pte = pmap64_pde(pmap, vaddr);
 
                pmap_store_pte(pte, pa_to_pte(i386_ptob(pn))
-                               | INTEL_PTE_VALID
-                               | INTEL_PTE_USER
-                               | INTEL_PTE_WRITE);
+                               | PTE_READ(is_ept)
+                               | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
+                               | PTE_WRITE(is_ept));
        }
 
        PMAP_UNLOCK(pmap);
@@ -1894,9 +2242,10 @@ void
 pmap_collect(
        pmap_t          p)
 {
-       register pt_entry_t     *pdp, *ptp;
+       pt_entry_t              *pdp, *ptp;
        pt_entry_t              *eptp;
        int                     wired;
+       boolean_t               is_ept;
 
        if (p == PMAP_NULL)
                return;
@@ -1904,6 +2253,8 @@ pmap_collect(
        if (p == kernel_pmap)
                return;
 
+       is_ept = is_ept_pmap(p);
+
        /*
         *      Garbage collect map.
         */
@@ -1913,75 +2264,74 @@ pmap_collect(
             pdp < (pt_entry_t *)&p->dirbase[(UMAXPTDI+1)];
             pdp++)
        {
-          if (*pdp & INTEL_PTE_VALID) {
-             if(*pdp & INTEL_PTE_REF) {
-               pmap_store_pte(pdp, *pdp & ~INTEL_PTE_REF);
-               collect_ref++;
-             } else {
-               collect_unref++;
-               ptp = pmap_pte(p, pdetova(pdp - (pt_entry_t *)p->dirbase));
-               eptp = ptp + NPTEPG;
+               if (*pdp & PTE_VALID_MASK(is_ept)) {
+                       if (*pdp & PTE_REF(is_ept)) {
+                               pmap_store_pte(pdp, *pdp & ~PTE_REF(is_ept));
+                               collect_ref++;
+                       } else {
+                               collect_unref++;
+                               ptp = pmap_pte(p, pdetova(pdp - (pt_entry_t *)p->dirbase));
+                               eptp = ptp + NPTEPG;
 
-               /*
-                * If the pte page has any wired mappings, we cannot
-                * free it.
-                */
-               wired = 0;
-               {
-                   register pt_entry_t *ptep;
-                   for (ptep = ptp; ptep < eptp; ptep++) {
-                       if (iswired(*ptep)) {
-                           wired = 1;
-                           break;
+                               /*
+                                * If the pte page has any wired mappings, we cannot
+                                * free it.
+                                */
+                               wired = 0;
+                               {
+                                       pt_entry_t *ptep;
+                                       for (ptep = ptp; ptep < eptp; ptep++) {
+                                               if (iswired(*ptep)) {
+                                                       wired = 1;
+                                                       break;
+                                               }
+                                       }
+                               }
+                               if (!wired) {
+                                       /*
+                                        * Remove the virtual addresses mapped by this pte page.
+                                        */
+                                               pmap_remove_range(p,
+                                                       pdetova(pdp - (pt_entry_t *)p->dirbase),
+                                                       ptp,
+                                                       eptp);
+
+                                       /*
+                                        * Invalidate the page directory pointer.
+                                        */
+                                       pmap_store_pte(pdp, 0x0);
+
+                                       PMAP_UNLOCK(p);
+
+                                       /*
+                                        * And free the pte page itself.
+                                        */
+                                       {
+                                               vm_page_t m;
+
+                                               vm_object_lock(p->pm_obj);
+
+                                               m = vm_page_lookup(p->pm_obj,(vm_object_offset_t)(pdp - (pt_entry_t *)&p->dirbase[0]) * PAGE_SIZE);
+                                               if (m == VM_PAGE_NULL)
+                                                       panic("pmap_collect: pte page not in object");
+
+                                               vm_object_unlock(p->pm_obj);
+
+                                               VM_PAGE_FREE(m);
+
+                                               OSAddAtomic(-1,  &inuse_ptepages_count);
+                                               PMAP_ZINFO_PFREE(p, PAGE_SIZE);
+                                       }
+
+                                       PMAP_LOCK(p);
+                               }
                        }
-                   }
-               }
-               if (!wired) {
-                   /*
-                    * Remove the virtual addresses mapped by this pte page.
-                    */
-                   pmap_remove_range(p,
-                               pdetova(pdp - (pt_entry_t *)p->dirbase),
-                               ptp,
-                               eptp);
-
-                   /*
-                    * Invalidate the page directory pointer.
-                    */
-                   pmap_store_pte(pdp, 0x0);
-                
-                   PMAP_UNLOCK(p);
-
-                   /*
-                    * And free the pte page itself.
-                    */
-                   {
-                       register vm_page_t m;
-
-                       vm_object_lock(p->pm_obj);
-
-                       m = vm_page_lookup(p->pm_obj,(vm_object_offset_t)(pdp - (pt_entry_t *)&p->dirbase[0]) * PAGE_SIZE);
-                       if (m == VM_PAGE_NULL)
-                           panic("pmap_collect: pte page not in object");
-
-                       vm_object_unlock(p->pm_obj);
-
-                       VM_PAGE_FREE(m);
-
-                       OSAddAtomic(-1,  &inuse_ptepages_count);
-                       PMAP_ZINFO_PFREE(p, PAGE_SIZE);
-                   }
-
-                   PMAP_LOCK(p);
                }
-             }
-          }
        }
 
        PMAP_UPDATE_TLBS(p, 0x0, 0xFFFFFFFFFFFFF000ULL);
        PMAP_UNLOCK(p);
        return;
-
 }
 #endif
 
@@ -2079,7 +2429,7 @@ pmap_list_resident_pages(
 #endif /* MACH_VM_DEBUG */
 
 
-
+#if CONFIG_COREDUMP
 /* temporary workaround */
 boolean_t
 coredumpok(__unused vm_map_t map, __unused vm_offset_t va)
@@ -2095,7 +2445,7 @@ coredumpok(__unused vm_map_t map, __unused vm_offset_t va)
        return TRUE;
 #endif
 }
-
+#endif
 
 boolean_t
 phys_page_exists(ppnum_t pn)
@@ -2121,9 +2471,11 @@ pmap_switch(pmap_t tpmap)
 {
         spl_t  s;
 
+       PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__SWITCH) | DBG_FUNC_START, VM_KERNEL_ADDRHIDE(tpmap));
        s = splhigh();          /* Make sure interruptions are disabled */
-       set_dirbase(tpmap, current_thread());
+       set_dirbase(tpmap, current_thread(), cpu_number());
        splx(s);
+       PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__SWITCH) | DBG_FUNC_END);
 }
 
 
@@ -2170,20 +2522,6 @@ pt_fake_zone_info(
        *caller_acct = 1;
 }
 
-static inline void
-pmap_cpuset_NMIPI(cpu_set cpu_mask) {
-       unsigned int cpu, cpu_bit;
-       uint64_t deadline;
-
-       for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
-               if (cpu_mask & cpu_bit)
-                       cpu_NMI_interrupt(cpu);
-       }
-       deadline = mach_absolute_time() + (LockTimeOut);
-       while (mach_absolute_time() < deadline)
-               cpu_pause();
-}
-
 
 void
 pmap_flush_context_init(pmap_flush_context *pfc)
@@ -2192,16 +2530,17 @@ pmap_flush_context_init(pmap_flush_context *pfc)
        pfc->pfc_invalid_global = 0;
 }
 
+extern uint64_t TLBTimeOut;
 void
 pmap_flush(
        pmap_flush_context *pfc)
 {
        unsigned int    my_cpu;
        unsigned int    cpu;
-       unsigned int    cpu_bit;
-       cpu_set         cpus_to_respond = 0;
-       cpu_set         cpus_to_signal = 0;
-       cpu_set         cpus_signaled = 0;
+       cpumask_t       cpu_bit;
+       cpumask_t       cpus_to_respond = 0;
+       cpumask_t       cpus_to_signal = 0;
+       cpumask_t       cpus_signaled = 0;
        boolean_t       flush_self = FALSE;
        uint64_t        deadline;
 
@@ -2211,7 +2550,7 @@ pmap_flush(
        cpus_to_signal = pfc->pfc_cpus;
 
        PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_DELAYED_TLBS) | DBG_FUNC_START,
-                           NULL, cpus_to_signal, 0, 0, 0);
+                           NULL, cpus_to_signal);
 
        for (cpu = 0, cpu_bit = 1; cpu < real_ncpus && cpus_to_signal; cpu++, cpu_bit <<= 1) {
 
@@ -2219,7 +2558,7 @@ pmap_flush(
 
                        cpus_to_signal &= ~cpu_bit;
 
-                       if (!cpu_datap(cpu)->cpu_running)
+                       if (!cpu_is_running(cpu))
                                continue;
 
                        if (pfc->pfc_invalid_global & cpu_bit)
@@ -2249,7 +2588,10 @@ pmap_flush(
 
        if (cpus_to_respond) {
 
-               deadline = mach_absolute_time() + LockTimeOut;
+               deadline = mach_absolute_time() +
+                               (TLBTimeOut ? TLBTimeOut : LockTimeOut);
+               boolean_t is_timeout_traced = FALSE;
+
                /*
                 * Wait for those other cpus to acknowledge
                 */
@@ -2261,7 +2603,7 @@ pmap_flush(
                                 * as appropriate in the PCID case.
                                 */
                                if ((cpus_to_respond & cpu_bit) != 0) {
-                                       if (!cpu_datap(cpu)->cpu_running ||
+                                       if (!cpu_is_running(cpu) ||
                                            cpu_datap(cpu)->cpu_tlb_invalid == FALSE ||
                                            !CPU_CR3_IS_ACTIVE(cpu)) {
                                                cpus_to_respond &= ~cpu_bit;
@@ -2274,23 +2616,44 @@ pmap_flush(
                        if (cpus_to_respond && (mach_absolute_time() > deadline)) {
                                if (machine_timeout_suspended())
                                        continue;
-                               pmap_tlb_flush_timeout = TRUE;
-                               orig_acks = NMIPI_acks;
-                               pmap_cpuset_NMIPI(cpus_to_respond);
+                               if (TLBTimeOut == 0) {
+                                       if (is_timeout_traced)
+                                               continue;
+
+                                       PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_TLBS_TO),
+                                                           NULL, cpus_to_signal, cpus_to_respond);
 
-                               panic("TLB invalidation IPI timeout: "
-                                   "CPU(s) failed to respond to interrupts, unresponsive CPU bitmap: 0x%lx, NMIPI acks: orig: 0x%lx, now: 0x%lx",
-                                   cpus_to_respond, orig_acks, NMIPI_acks);
+                                       is_timeout_traced = TRUE;
+                                       continue;
+                               }
+                               orig_acks = NMIPI_acks;
+                               NMIPI_panic(cpus_to_respond, TLB_FLUSH_TIMEOUT);
+                               panic("Uninterruptible processor(s): CPU bitmap: 0x%llx, NMIPI acks: 0x%lx, now: 0x%lx, deadline: %llu",
+                                     cpus_to_respond, orig_acks, NMIPI_acks, deadline);
                        }
                }
        }
+
        PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_DELAYED_TLBS) | DBG_FUNC_END,
-                           NULL, cpus_signaled, flush_self, 0, 0);
+                           NULL, cpus_signaled, flush_self);
 
        mp_enable_preemption();
 }
 
 
+static void
+invept(void *eptp)
+{
+       struct {
+               uint64_t eptp;
+               uint64_t reserved;
+       } __attribute__((aligned(16), packed)) invept_descriptor = {(uint64_t)eptp, 0};
+
+       __asm__ volatile("invept (%%rax), %%rcx"
+               : : "c" (PMAP_INVEPT_SINGLE_CONTEXT), "a" (&invept_descriptor)
+               : "cc", "memory");
+}
+
 /*
  * Called with pmap locked, we:
  *  - scan through per-cpu data to see which other cpus need to flush
@@ -2305,25 +2668,49 @@ void
 pmap_flush_tlbs(pmap_t pmap, vm_map_offset_t startv, vm_map_offset_t endv, int options, pmap_flush_context *pfc)
 {
        unsigned int    cpu;
-       unsigned int    cpu_bit;
-       cpu_set         cpus_to_signal;
+       cpumask_t       cpu_bit;
+       cpumask_t       cpus_to_signal = 0;
        unsigned int    my_cpu = cpu_number();
        pmap_paddr_t    pmap_cr3 = pmap->pm_cr3;
        boolean_t       flush_self = FALSE;
        uint64_t        deadline;
        boolean_t       pmap_is_shared = (pmap->pm_shared || (pmap == kernel_pmap));
        boolean_t       need_global_flush = FALSE;
+       uint32_t        event_code;
+       vm_map_offset_t event_startv, event_endv;
+       boolean_t       is_ept = is_ept_pmap(pmap);
 
        assert((processor_avail_count < 2) ||
               (ml_get_interrupts_enabled() && get_preemption_level() != 0));
 
+       if (pmap == kernel_pmap) {
+               event_code = PMAP_CODE(PMAP__FLUSH_KERN_TLBS);
+               event_startv = VM_KERNEL_UNSLIDE_OR_PERM(startv);
+               event_endv = VM_KERNEL_UNSLIDE_OR_PERM(endv);
+       } else if (is_ept) {
+               event_code = PMAP_CODE(PMAP__FLUSH_EPT);
+               event_startv = startv;
+               event_endv = endv;
+       } else {
+               event_code = PMAP_CODE(PMAP__FLUSH_TLBS);
+               event_startv = startv;
+               event_endv = endv;
+       }
+
+       PMAP_TRACE_CONSTANT(event_code | DBG_FUNC_START,
+                           VM_KERNEL_UNSLIDE_OR_PERM(pmap), options,
+                           event_startv, event_endv);
+
+       if (is_ept) {
+               mp_cpus_call(CPUMASK_ALL, ASYNC, invept, (void*)pmap->pm_eptp);
+               goto out;
+       }
+
        /*
         * Scan other cpus for matching active or task CR3.
         * For idle cpus (with no active map) we mark them invalid but
         * don't signal -- they'll check as they go busy.
         */
-       cpus_to_signal = 0;
-
        if (pmap_pcid_ncpus) {
                if (pmap_is_shared)
                        need_global_flush = TRUE;
@@ -2331,11 +2718,11 @@ pmap_flush_tlbs(pmap_t  pmap, vm_map_offset_t startv, vm_map_offset_t endv, int o
                mfence();
        }
        for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
-               if (!cpu_datap(cpu)->cpu_running)
+               if (!cpu_is_running(cpu))
                        continue;
                uint64_t        cpu_active_cr3 = CPU_GET_ACTIVE_CR3(cpu);
                uint64_t        cpu_task_cr3 = CPU_GET_TASK_CR3(cpu);
-
+//recall that the shadowed task cr3 is pre-composed
                if ((pmap_cr3 == cpu_task_cr3) ||
                    (pmap_cr3 == cpu_active_cr3) ||
                    (pmap_is_shared)) {
@@ -2381,15 +2768,8 @@ pmap_flush_tlbs(pmap_t   pmap, vm_map_offset_t startv, vm_map_offset_t endv, int o
                }
        }
        if ((options & PMAP_DELAY_TLB_FLUSH))
-               return;
+               goto out;
 
-       if (pmap == kernel_pmap) {
-               PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_KERN_TLBS) | DBG_FUNC_START,
-                                   pmap, cpus_to_signal, flush_self, startv, endv);
-       } else {
-               PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_TLBS) | DBG_FUNC_START,
-                                   pmap, cpus_to_signal, flush_self, startv, endv);
-       }
        /*
         * Flush local tlb if required.
         * Do this now to overlap with other processors responding.
@@ -2407,9 +2787,12 @@ pmap_flush_tlbs(pmap_t   pmap, vm_map_offset_t startv, vm_map_offset_t endv, int o
        }
 
        if (cpus_to_signal) {
-               cpu_set cpus_to_respond = cpus_to_signal;
+               cpumask_t       cpus_to_respond = cpus_to_signal;
+
+               deadline = mach_absolute_time() +
+                               (TLBTimeOut ? TLBTimeOut : LockTimeOut);
+               boolean_t is_timeout_traced = FALSE;
 
-               deadline = mach_absolute_time() + LockTimeOut;
                /*
                 * Wait for those other cpus to acknowledge
                 */
@@ -2421,7 +2804,7 @@ pmap_flush_tlbs(pmap_t    pmap, vm_map_offset_t startv, vm_map_offset_t endv, int o
                                 * as appropriate in the PCID case.
                                 */
                                if ((cpus_to_respond & cpu_bit) != 0) {
-                                       if (!cpu_datap(cpu)->cpu_running ||
+                                       if (!cpu_is_running(cpu) ||
                                            cpu_datap(cpu)->cpu_tlb_invalid == FALSE ||
                                            !CPU_CR3_IS_ACTIVE(cpu)) {
                                                cpus_to_respond &= ~cpu_bit;
@@ -2434,13 +2817,24 @@ pmap_flush_tlbs(pmap_t  pmap, vm_map_offset_t startv, vm_map_offset_t endv, int o
                        if (cpus_to_respond && (mach_absolute_time() > deadline)) {
                                if (machine_timeout_suspended())
                                        continue;
-                               pmap_tlb_flush_timeout = TRUE;
+                               if (TLBTimeOut == 0) {
+                                       /* cut tracepoint but don't panic */
+                                       if (is_timeout_traced)
+                                               continue;
+
+                                       PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_TLBS_TO),
+                                                           VM_KERNEL_UNSLIDE_OR_PERM(pmap),
+                                                           cpus_to_signal,
+                                                           cpus_to_respond);
+
+                                       is_timeout_traced = TRUE;
+                                       continue;
+                               }
                                orig_acks = NMIPI_acks;
-                               pmap_cpuset_NMIPI(cpus_to_respond);
 
-                               panic("TLB invalidation IPI timeout: "
-                                   "CPU(s) failed to respond to interrupts, unresponsive CPU bitmap: 0x%lx, NMIPI acks: orig: 0x%lx, now: 0x%lx",
-                                   cpus_to_respond, orig_acks, NMIPI_acks);
+                               NMIPI_panic(cpus_to_respond, TLB_FLUSH_TIMEOUT);
+                               panic("TLB invalidation IPI timeout, unresponsive CPU bitmap: 0x%llx, NMIPI acks: 0x%lx, now: 0x%lx, deadline: %llu",
+                                     cpus_to_respond, orig_acks, NMIPI_acks, deadline);
                        }
                }
        }
@@ -2449,13 +2843,10 @@ pmap_flush_tlbs(pmap_t  pmap, vm_map_offset_t startv, vm_map_offset_t endv, int o
                panic("pmap_flush_tlbs: pmap == kernel_pmap && flush_self != TRUE; kernel CR3: 0x%llX, pmap_cr3: 0x%llx, CPU active CR3: 0x%llX, CPU Task Map: %d", kernel_pmap->pm_cr3, pmap_cr3, current_cpu_datap()->cpu_active_cr3, current_cpu_datap()->cpu_task_map);
        }
 
-       if (pmap == kernel_pmap) {
-               PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_KERN_TLBS) | DBG_FUNC_END,
-                                   pmap, cpus_to_signal, startv, endv, 0);
-       } else {
-               PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_TLBS) | DBG_FUNC_END,
-                                   pmap, cpus_to_signal, startv, endv, 0);
-       }
+out:
+       PMAP_TRACE_CONSTANT(event_code | DBG_FUNC_END,
+                           VM_KERNEL_UNSLIDE_OR_PERM(pmap), cpus_to_signal,
+                           event_startv, event_endv);
 
 }
 
@@ -2466,16 +2857,9 @@ process_pmap_updates(void)
        pmap_assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
        if (pmap_pcid_ncpus) {
                pmap_pcid_validate_current();
-               if (cpu_datap(ccpu)->cpu_tlb_invalid_global) {
-                       cpu_datap(ccpu)->cpu_tlb_invalid = FALSE;
-                       tlb_flush_global();
-               }
-               else {
-                       cpu_datap(ccpu)->cpu_tlb_invalid_local = FALSE;
-                       flush_tlb_raw();
-               }
-       }
-       else {
+               cpu_datap(ccpu)->cpu_tlb_invalid = FALSE;
+               tlb_flush_global();
+       } else {
                current_cpu_datap()->cpu_tlb_invalid = FALSE;
                flush_tlb_raw();
        }
@@ -2486,14 +2870,12 @@ process_pmap_updates(void)
 void
 pmap_update_interrupt(void)
 {
-        PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_START,
-                  0, 0, 0, 0, 0);
+        PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_START);
 
        if (current_cpu_datap()->cpu_tlb_invalid)
                process_pmap_updates();
 
-        PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_END,
-                  0, 0, 0, 0, 0);
+        PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_END);
 }
 
 #include <mach/mach_vm.h>      /* mach_vm_region_recurse() */
@@ -2506,6 +2888,8 @@ pmap_permissions_verify(pmap_t ipmap, vm_map_t ivmmap, vm_offset_t sv, vm_offset
        kern_return_t rv = KERN_SUCCESS;
        uint64_t skip4 = 0, skip2 = 0;
 
+       assert(!is_ept_pmap(ipmap));
+
        sv &= ~PAGE_MASK_64;
        ev &= ~PAGE_MASK_64;
        while (cv < ev) {
@@ -2606,3 +2990,217 @@ pmap_permissions_verify(pmap_t ipmap, vm_map_t ivmmap, vm_offset_t sv, vm_offset
        }
        return rv;
 }
+
+#if MACH_ASSERT
+extern int pmap_ledgers_panic;
+extern int pmap_ledgers_panic_leeway;
+
+static void
+pmap_check_ledgers(
+       pmap_t pmap)
+{
+       ledger_amount_t bal;
+       int             pid;
+       char            *procname;
+       boolean_t       do_panic;
+
+       if (pmap->pmap_pid == 0) {
+               /*
+                * This pmap was not or is no longer fully associated
+                * with a task (e.g. the old pmap after a fork()/exec() or
+                * spawn()).  Its "ledger" still points at a task that is
+                * now using a different (and active) address space, so
+                * we can't check that all the pmap ledgers are balanced here.
+                *
+                * If the "pid" is set, that means that we went through
+                * pmap_set_process() in task_terminate_internal(), so
+                * this task's ledger should not have been re-used and
+                * all the pmap ledgers should be back to 0.
+                */
+               return;
+       }
+
+       do_panic = FALSE;
+       pid = pmap->pmap_pid;
+       procname = pmap->pmap_procname;
+
+       pmap_ledgers_drift.num_pmaps_checked++;
+
+#define LEDGER_CHECK_BALANCE(__LEDGER)                                 \
+MACRO_BEGIN                                                            \
+       int panic_on_negative = TRUE;                                   \
+       ledger_get_balance(pmap->ledger,                                \
+                          task_ledgers.__LEDGER,                       \
+                          &bal);                                       \
+       ledger_get_panic_on_negative(pmap->ledger,                      \
+                                    task_ledgers.__LEDGER,             \
+                                    &panic_on_negative);               \
+       if (bal != 0) {                                                 \
+               if (panic_on_negative ||                                \
+                   (pmap_ledgers_panic &&                              \
+                    pmap_ledgers_panic_leeway > 0 &&                   \
+                    (bal > (pmap_ledgers_panic_leeway * PAGE_SIZE) ||  \
+                     bal < (pmap_ledgers_panic_leeway * PAGE_SIZE)))) { \
+                       do_panic = TRUE;                                \
+               }                                                       \
+               printf("LEDGER BALANCE proc %d (%s) "                   \
+                      "\"%s\" = %lld\n",                               \
+                      pid, procname, #__LEDGER, bal);                  \
+               if (bal > 0) {                                          \
+                       pmap_ledgers_drift.__LEDGER##_over++;           \
+                       pmap_ledgers_drift.__LEDGER##_over_total += bal; \
+                       if (bal > pmap_ledgers_drift.__LEDGER##_over_max) { \
+                               pmap_ledgers_drift.__LEDGER##_over_max = bal; \
+                       }                                               \
+               } else if (bal < 0) {                                   \
+                       pmap_ledgers_drift.__LEDGER##_under++;          \
+                       pmap_ledgers_drift.__LEDGER##_under_total += bal; \
+                       if (bal < pmap_ledgers_drift.__LEDGER##_under_max) { \
+                               pmap_ledgers_drift.__LEDGER##_under_max = bal; \
+                       }                                               \
+               }                                                       \
+       }                                                               \
+MACRO_END
+
+       LEDGER_CHECK_BALANCE(phys_footprint);
+       LEDGER_CHECK_BALANCE(internal);
+       LEDGER_CHECK_BALANCE(internal_compressed);
+       LEDGER_CHECK_BALANCE(iokit_mapped);
+       LEDGER_CHECK_BALANCE(alternate_accounting);
+       LEDGER_CHECK_BALANCE(alternate_accounting_compressed);
+       LEDGER_CHECK_BALANCE(page_table);
+       LEDGER_CHECK_BALANCE(purgeable_volatile);
+       LEDGER_CHECK_BALANCE(purgeable_nonvolatile);
+       LEDGER_CHECK_BALANCE(purgeable_volatile_compressed);
+       LEDGER_CHECK_BALANCE(purgeable_nonvolatile_compressed);
+       LEDGER_CHECK_BALANCE(network_volatile);
+       LEDGER_CHECK_BALANCE(network_nonvolatile);
+       LEDGER_CHECK_BALANCE(network_volatile_compressed);
+       LEDGER_CHECK_BALANCE(network_nonvolatile_compressed);
+
+       if (do_panic) {
+               if (pmap_ledgers_panic) {
+                       panic("pmap_destroy(%p) %d[%s] has imbalanced ledgers\n",
+                             pmap, pid, procname);
+               } else {
+                       printf("pmap_destroy(%p) %d[%s] has imbalanced ledgers\n",
+                              pmap, pid, procname);
+               }
+       }
+
+       if (pmap->stats.resident_count != 0 ||
+#if 35156815
+           /*
+            * "wired_count" is unfortunately a bit inaccurate, so let's
+            * tolerate some slight deviation to limit the amount of
+            * somewhat-spurious assertion failures.
+            */
+           pmap->stats.wired_count > 10 ||
+#else /* 35156815 */
+           pmap->stats.wired_count != 0 ||
+#endif /* 35156815 */
+           pmap->stats.device != 0 ||
+           pmap->stats.internal != 0 ||
+           pmap->stats.external != 0 ||
+           pmap->stats.reusable != 0 ||
+           pmap->stats.compressed != 0) {
+               if (pmap_stats_assert &&
+                   pmap->pmap_stats_assert) {
+                       panic("pmap_destroy(%p) %d[%s] imbalanced stats: resident=%d wired=%d device=%d internal=%d external=%d reusable=%d compressed=%lld",
+                             pmap, pid, procname,
+                             pmap->stats.resident_count,
+                             pmap->stats.wired_count,
+                             pmap->stats.device,
+                             pmap->stats.internal,
+                             pmap->stats.external,
+                             pmap->stats.reusable,
+                             pmap->stats.compressed);
+               } else {
+                       printf("pmap_destroy(%p) %d[%s] imbalanced stats: resident=%d wired=%d device=%d internal=%d external=%d reusable=%d compressed=%lld",
+                              pmap, pid, procname,
+                              pmap->stats.resident_count,
+                              pmap->stats.wired_count,
+                              pmap->stats.device,
+                              pmap->stats.internal,
+                              pmap->stats.external,
+                              pmap->stats.reusable,
+                              pmap->stats.compressed);
+               }
+       }
+}
+
+void
+pmap_set_process(
+       pmap_t pmap,
+       int pid,
+       char *procname)
+{
+       if (pmap == NULL)
+               return;
+
+       pmap->pmap_pid = pid;
+       strlcpy(pmap->pmap_procname, procname, sizeof (pmap->pmap_procname));
+       if (pmap_ledgers_panic_leeway) {
+               /*
+                * XXX FBDP
+                * Some processes somehow trigger some issues that make
+                * the pmap stats and ledgers go off track, causing
+                * some assertion failures and ledger panics.
+                * Turn off the sanity checks if we allow some ledger leeway
+                * because of that.  We'll still do a final check in
+                * pmap_check_ledgers() for discrepancies larger than the
+                * allowed leeway after the address space has been fully
+                * cleaned up.
+                */
+               pmap->pmap_stats_assert = FALSE;
+               ledger_disable_panic_on_negative(pmap->ledger,
+                                                task_ledgers.phys_footprint);
+               ledger_disable_panic_on_negative(pmap->ledger,
+                                                task_ledgers.internal);
+               ledger_disable_panic_on_negative(pmap->ledger,
+                                                task_ledgers.internal_compressed);
+               ledger_disable_panic_on_negative(pmap->ledger,
+                                                task_ledgers.iokit_mapped);
+               ledger_disable_panic_on_negative(pmap->ledger,
+                                                task_ledgers.alternate_accounting);
+               ledger_disable_panic_on_negative(pmap->ledger,
+                                                task_ledgers.alternate_accounting_compressed);
+       }
+}
+#endif /* MACH_ASSERT */
+
+
+#if DEVELOPMENT || DEBUG
+int pmap_pagezero_mitigation = 1;
+#endif
+
+void pmap_advise_pagezero_range(pmap_t lpmap, uint64_t low_bound) {
+#if DEVELOPMENT || DEBUG
+       if (pmap_pagezero_mitigation == 0) {
+               lpmap->pagezero_accessible = FALSE;
+               return;
+       }
+#endif
+       lpmap->pagezero_accessible = ((pmap_smap_enabled == FALSE) && (low_bound < 0x1000));
+       if (lpmap == current_pmap()) {
+               mp_disable_preemption();
+               current_cpu_datap()->cpu_pagezero_mapped = lpmap->pagezero_accessible;
+               mp_enable_preemption();
+       }
+}
+
+void pmap_verify_noncacheable(uintptr_t vaddr) {
+       pt_entry_t *ptep = NULL;
+       ptep = pmap_pte(kernel_pmap, vaddr);
+       if (ptep == NULL) {
+               panic("pmap_verify_noncacheable: no translation for 0x%lx", vaddr);
+       }
+       /* Non-cacheable OK */
+       if (*ptep & (INTEL_PTE_NCACHE))
+               return;
+       /* Write-combined OK */
+       if (*ptep & (INTEL_PTE_PTA))
+               return;
+       panic("pmap_verify_noncacheable: IO read from a cacheable address? address: 0x%lx, PTE: %p, *PTE: 0x%llx", vaddr, ptep, *ptep);
+}
+