]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/i386/pmap_x86_common.c
xnu-3789.60.24.tar.gz
[apple/xnu.git] / osfmk / i386 / pmap_x86_common.c
index 7994bfbd7a90eaa4224f41a6aa112251f2171618..1bfecd7cb12bc3de9ef6c47cc47fca556a210c59 100644 (file)
  * 
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
+
+#include <mach_assert.h>
+
 #include <vm/pmap.h>
 #include <vm/vm_map.h>
+#include <vm/vm_kern.h>
 #include <kern/ledger.h>
 #include <i386/pmap_internal.h>
 
@@ -91,6 +95,9 @@ kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t va_start, addr64_t
        unsigned int    i;
        uint64_t        num_pde;
 
+       assert(!is_ept_pmap(grand));
+       assert(!is_ept_pmap(subord));
+
        if ((size & (pmap_nesting_size_min-1)) ||
            (va_start & (pmap_nesting_size_min-1)) ||
            (nstart & (pmap_nesting_size_min-1)) ||
@@ -172,11 +179,10 @@ kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t va_start, addr64_t
                        i += (uint32_t) NPDEPG;
                }
                else {
-                       npde = pmap_pde(subord, nstart);
+                       npde = pmap_pde(subord, vaddr);
                        if (npde == 0)
-                               panic("pmap_nest: no npde, subord %p nstart 0x%llx", subord, nstart);
+                               panic("pmap_nest: no npde, subord %p vaddr 0x%llx", subord, vaddr);
                        tpde = *npde;
-                       nstart += NBPDE;
                        pde = pmap_pde(grand, vaddr);
                        if ((0 == pde) && cpu_64bit) {
                                PMAP_UNLOCK(grand);
@@ -227,6 +233,8 @@ kern_return_t pmap_unnest(pmap_t grand, addr64_t vaddr, uint64_t size) {
                    grand, vaddr, size);
        }
 
+       assert(!is_ept_pmap(grand));
+
        /* align everything to PDE boundaries */
        va_start = vaddr & ~(NBPDE-1);
        va_end = (vaddr + size + NBPDE - 1) & ~(NBPDE-1);
@@ -265,6 +273,15 @@ kern_return_t pmap_unnest(pmap_t grand, addr64_t vaddr, uint64_t size) {
        return KERN_SUCCESS;
 }
 
+kern_return_t
+pmap_unnest_options(
+       pmap_t grand,
+       addr64_t vaddr,
+       __unused uint64_t size,
+       __unused unsigned int options) {
+       return pmap_unnest(grand, vaddr, size);
+}
+
 /* Invoked by the Mach VM to determine the platform specific unnest region */
 
 boolean_t pmap_adjust_unnest_parameters(pmap_t p, vm_map_offset_t *s, vm_map_offset_t *e) {
@@ -308,6 +325,9 @@ pmap_find_phys(pmap_t pmap, addr64_t va)
        ppnum_t         ppn = 0;
        pd_entry_t      pde;
        pt_entry_t      pte;
+       boolean_t       is_ept;
+
+       is_ept = is_ept_pmap(pmap);
 
        mp_disable_preemption();
 
@@ -321,14 +341,14 @@ pmap_find_phys(pmap_t pmap, addr64_t va)
 
        pdep = pmap_pde(pmap, va);
 
-       if ((pdep != PD_ENTRY_NULL) && ((pde = *pdep) & INTEL_PTE_VALID)) {
-               if (pde & INTEL_PTE_PS) {
+       if ((pdep != PD_ENTRY_NULL) && ((pde = *pdep) & PTE_VALID_MASK(is_ept))) {
+               if (pde & PTE_PS) {
                        ppn = (ppnum_t) i386_btop(pte_to_pa(pde));
                        ppn += (ppnum_t) ptenum(va);
                }
                else {
                        ptp = pmap_pte(pmap, va);
-                       if ((PT_ENTRY_NULL != ptp) && (((pte = *ptp) & INTEL_PTE_VALID) != 0)) {
+                       if ((PT_ENTRY_NULL != ptp) && (((pte = *ptp) & PTE_VALID_MASK(is_ept)) != 0)) {
                                ppn = (ppnum_t) i386_btop(pte_to_pa(pte));
                        }
                }
@@ -342,9 +362,13 @@ pfp_exit:
 /*
  * Update cache attributes for all extant managed mappings.
  * Assumes PV for this page is locked, and that the page
- * is managed.
+ * is managed. We assume that this physical page may be mapped in
+ * both EPT and normal Intel PTEs, so we convert the attributes
+ * to the corresponding format for each pmap.
+ *
+ * We assert that the passed set of attributes is a subset of the
+ * PHYS_CACHEABILITY_MASK.
  */
-
 void
 pmap_update_cache_attributes_locked(ppnum_t pn, unsigned attributes) {
        pv_rooted_entry_t       pv_h, pv_e;
@@ -352,8 +376,17 @@ pmap_update_cache_attributes_locked(ppnum_t pn, unsigned attributes) {
        vm_map_offset_t vaddr;
        pmap_t  pmap;
        pt_entry_t      *ptep;
+       boolean_t       is_ept;
+       unsigned        ept_attributes;
        
        assert(IS_MANAGED_PAGE(pn));
+       assert(((~PHYS_CACHEABILITY_MASK) & attributes) == 0);
+
+       /* We don't support the PTA bit for EPT PTEs */
+       if (attributes & INTEL_PTE_NCACHE)
+               ept_attributes = INTEL_EPT_NCACHE;
+       else
+               ept_attributes = INTEL_EPT_WB;
 
        pv_h = pai_to_pvh(pn);
        /* TODO: translate the PHYS_* bits to PTE bits, while they're
@@ -372,14 +405,20 @@ pmap_update_cache_attributes_locked(ppnum_t pn, unsigned attributes) {
 
                do {
                        pmap = pv_e->pmap;
-                       vaddr = pv_e->va;
+                       vaddr = PVE_VA(pv_e);
                        ptep = pmap_pte(pmap, vaddr);
-               
+                       
                        if (0 == ptep)
                                panic("pmap_update_cache_attributes_locked: Missing PTE, pmap: %p, pn: 0x%x vaddr: 0x%llx kernel_pmap: %p", pmap, pn, vaddr, kernel_pmap);
 
+                       is_ept = is_ept_pmap(pmap);
+
                        nexth = (pv_hashed_entry_t)queue_next(&pvh_e->qlink);
-                       pmap_update_pte(ptep, PHYS_CACHEABILITY_MASK, attributes);
+                       if (!is_ept) {
+                               pmap_update_pte(ptep, PHYS_CACHEABILITY_MASK, attributes);
+                       } else {
+                               pmap_update_pte(ptep, INTEL_EPT_CACHE_MASK, ept_attributes);
+                       }
                        PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
                        pvh_e = nexth;
                } while ((pv_e = (pv_rooted_entry_t)nexth) != pv_h);
@@ -415,7 +454,7 @@ void x86_filter_TLB_coherency_interrupts(boolean_t dofilter) {
 
 void
 pmap_enter(
-       register pmap_t         pmap,
+       pmap_t          pmap,
        vm_map_offset_t         vaddr,
        ppnum_t                 pn,
        vm_prot_t               prot,
@@ -429,7 +468,7 @@ pmap_enter(
 
 kern_return_t
 pmap_enter_options(
-       register pmap_t         pmap,
+       pmap_t          pmap,
        vm_map_offset_t         vaddr,
        ppnum_t                 pn,
        vm_prot_t               prot,
@@ -457,12 +496,16 @@ pmap_enter_options(
        uint64_t                delpage_pde_index = 0;
        pt_entry_t              old_pte;
        kern_return_t           kr_expand;
+       boolean_t               is_ept;
+       boolean_t               is_altacct;
 
        pmap_intr_assert();
 
        if (pmap == PMAP_NULL)
                return KERN_INVALID_ARGUMENT;
 
+       is_ept = is_ept_pmap(pmap);
+
        /* N.B. We can be supplied a zero page frame in the NOENTER case, it's an
         * unused value for that scenario.
         */
@@ -504,7 +547,7 @@ Retry:
         *      pmap is always expanded to include enough hardware
         *      pages to map one VM page.
         */
-        if(superpage) {
+        if (superpage) {
                while ((pte = pmap64_pde(pmap, vaddr)) == PD_ENTRY_NULL) {
                        /* need room for another pde entry */
                        PMAP_UNLOCK(pmap);
@@ -531,7 +574,7 @@ Retry:
                return KERN_SUCCESS;
        }
 
-       if (superpage && *pte && !(*pte & INTEL_PTE_PS)) {
+       if (superpage && *pte && !(*pte & PTE_PS)) {
                /*
                 * There is still an empty page table mapped that
                 * was used for a previous base page mapping.
@@ -548,9 +591,27 @@ Retry:
        old_pa_locked = FALSE;
 
        if (old_pa == 0 &&
-           (*pte & INTEL_PTE_COMPRESSED)) {
+           PTE_IS_COMPRESSED(*pte)) {
+               /*
+                * "pmap" should be locked at this point, so this should
+                * not race with another pmap_enter() or pmap_remove_range().
+                */
+               assert(pmap != kernel_pmap);
+
                /* one less "compressed" */
                OSAddAtomic64(-1, &pmap->stats.compressed);
+               pmap_ledger_debit(pmap, task_ledgers.internal_compressed,
+                                 PAGE_SIZE);
+               if (*pte & PTE_COMPRESSED_ALT) {
+                       pmap_ledger_debit(
+                               pmap,
+                               task_ledgers.alternate_accounting_compressed,
+                               PAGE_SIZE);
+               } else {
+                       /* was part of the footprint */
+                       pmap_ledger_debit(pmap, task_ledgers.phys_footprint,
+                                         PAGE_SIZE);
+               }
                /* marker will be cleared below */
        }
 
@@ -575,32 +636,58 @@ Retry:
         */
        if (old_pa == pa) {
                pt_entry_t old_attributes =
-                   *pte & ~(INTEL_PTE_REF | INTEL_PTE_MOD);
+                   *pte & ~(PTE_REF(is_ept) | PTE_MOD(is_ept));
 
                /*
                 *      May be changing its wired attribute or protection
                 */
 
-               template = pa_to_pte(pa) | INTEL_PTE_VALID;
-               template |= pmap_get_cache_attributes(pa_index(pa));
+               template =  pa_to_pte(pa);
+
+               /* ?: WORTH ASSERTING THAT AT LEAST ONE RWX (implicit valid) PASSED FOR EPT? */
+               if (!is_ept) {
+                       template |= INTEL_PTE_VALID;
+               } else {
+                       template |= INTEL_EPT_IPTA;
+               }
+
+               template |= pmap_get_cache_attributes(pa_index(pa), is_ept);
 
-               if (VM_MEM_NOT_CACHEABLE ==
-                   (flags & (VM_MEM_NOT_CACHEABLE | VM_WIMG_USE_DEFAULT))) {
+               /*
+                * We don't support passing VM_MEM_NOT_CACHEABLE flags for EPT PTEs
+                */
+               if (!is_ept && (VM_MEM_NOT_CACHEABLE ==
+                   (flags & (VM_MEM_NOT_CACHEABLE | VM_WIMG_USE_DEFAULT)))) {
                        if (!(flags & VM_MEM_GUARDED))
                                template |= INTEL_PTE_PTA;
                        template |= INTEL_PTE_NCACHE;
                }
-               if (pmap != kernel_pmap)
+               if (pmap != kernel_pmap && !is_ept)
                        template |= INTEL_PTE_USER;
+
+               if (prot & VM_PROT_READ)
+                       template |= PTE_READ(is_ept);
+
                if (prot & VM_PROT_WRITE) {
-                       template |= INTEL_PTE_WRITE;
+                       template |= PTE_WRITE(is_ept);
+                       if (is_ept && !pmap_ept_support_ad) {
+                               template |= PTE_MOD(is_ept);
+                               if (old_pa_locked) {
+                                       assert(IS_MANAGED_PAGE(pai));
+                                       pmap_phys_attributes[pai] |= PHYS_MODIFIED;
+                               }
+                       }
+               }
+               if (prot & VM_PROT_EXECUTE) {
+                       assert(set_NX == 0);
+                       template = pte_set_ex(template, is_ept);
                }
 
                if (set_NX)
-                       template |= INTEL_PTE_NX;
+                       template = pte_remove_ex(template, is_ept);
 
                if (wired) {
-                       template |= INTEL_PTE_WIRED;
+                       template |= PTE_WIRED;
                        if (!iswired(old_attributes))  {
                                OSAddAtomic(+1, &pmap->stats.wired_count);
                                pmap_ledger_credit(pmap, task_ledgers.wired_mem, PAGE_SIZE);
@@ -612,26 +699,36 @@ Retry:
                                pmap_ledger_debit(pmap, task_ledgers.wired_mem, PAGE_SIZE);
                        }
                }
+
                if (superpage)          /* this path can not be used */
-                       template |= INTEL_PTE_PS;       /* to change the page size! */
+                       template |= PTE_PS;     /* to change the page size! */
 
                if (old_attributes == template)
                        goto dont_update_pte;
 
                /* Determine delta, PV locked */
                need_tlbflush =
-                   ((old_attributes ^ template) != INTEL_PTE_WIRED);
+                   ((old_attributes ^ template) != PTE_WIRED);
                
-               if (need_tlbflush == TRUE && !(old_attributes & INTEL_PTE_WRITE)) {
-                       if ((old_attributes ^ template) == INTEL_PTE_WRITE)
+               if (need_tlbflush == TRUE && !(old_attributes & PTE_WRITE(is_ept))) {
+                       if ((old_attributes ^ template) == PTE_WRITE(is_ept))
                                need_tlbflush = FALSE;
                }
 
+               /* For hardware that doesn't have EPT AD support, we always set REFMOD for EPT PTEs */
+               if (is_ept && !pmap_ept_support_ad) {
+                       template |= PTE_REF(is_ept);
+                       if (old_pa_locked) {
+                               assert(IS_MANAGED_PAGE(pai));
+                               pmap_phys_attributes[pai] |= PHYS_REFERENCED;
+                       }
+               }
+
                /* store modified PTE and preserve RC bits */
                pt_entry_t npte, opte;;
                do {
                        opte = *pte;
-                       npte = template | (opte & (INTEL_PTE_REF | INTEL_PTE_MOD));
+                       npte = template | (opte & (PTE_REF(is_ept) | PTE_MOD(is_ept)));
                } while (!pmap_cmpx_pte(pte, opte, npte));
 dont_update_pte:
                if (old_pa_locked) {
@@ -655,6 +752,7 @@ dont_update_pte:
         */
 
        if (old_pa != (pmap_paddr_t) 0) {
+               boolean_t       was_altacct;
 
                /*
                 *      Don't do anything to pages outside valid memory here.
@@ -663,32 +761,69 @@ dont_update_pte:
                 */
 
                /* invalidate the PTE */
-               pmap_update_pte(pte, INTEL_PTE_VALID, 0);
+               pmap_update_pte(pte, PTE_VALID_MASK(is_ept), 0);
                /* propagate invalidate everywhere */
                PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
                /* remember reference and change */
                old_pte = *pte;
-               oattr = (char) (old_pte & (PHYS_MODIFIED | PHYS_REFERENCED));
+               oattr = (char) (old_pte & (PTE_MOD(is_ept) | PTE_REF(is_ept)));
                /* completely invalidate the PTE */
                pmap_store_pte(pte, 0);
 
+               if (IS_MANAGED_PAGE(pai)) {
+                       /*
+                        *      Remove the mapping from the pvlist for
+                        *      this physical page.
+                        *      We'll end up with either a rooted pv or a
+                        *      hashed pv
+                        */
+                       pvh_e = pmap_pv_remove(pmap, vaddr, (ppnum_t *) &pai, &old_pte, &was_altacct);
+               }
+
                if (IS_MANAGED_PAGE(pai)) {
                        pmap_assert(old_pa_locked == TRUE);
                        pmap_ledger_debit(pmap, task_ledgers.phys_mem, PAGE_SIZE);
-                       pmap_ledger_debit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);                        
                        assert(pmap->stats.resident_count >= 1);
                        OSAddAtomic(-1, &pmap->stats.resident_count);
                        if (pmap != kernel_pmap) {
+                               /* update pmap stats */
                                if (IS_REUSABLE_PAGE(pai)) {
-                                       assert(pmap->stats.reusable > 0);
+                                       PMAP_STATS_ASSERTF(
+                                               (pmap->stats.reusable > 0,
+                                                "reusable %d",
+                                                pmap->stats.reusable));
                                        OSAddAtomic(-1, &pmap->stats.reusable);
                                } else if (IS_INTERNAL_PAGE(pai)) {
-                                       assert(pmap->stats.internal > 0);
+                                       PMAP_STATS_ASSERTF(
+                                               (pmap->stats.internal > 0,
+                                                "internal %d",
+                                                pmap->stats.internal));
                                        OSAddAtomic(-1, &pmap->stats.internal);
                                } else {
-                                       assert(pmap->stats.external > 0);
+                                       PMAP_STATS_ASSERTF(
+                                               (pmap->stats.external > 0,
+                                                "external %d",
+                                                pmap->stats.external));
                                        OSAddAtomic(-1, &pmap->stats.external);
                                }
+
+                               /* update ledgers */
+                               if (was_altacct) {
+                                       assert(IS_INTERNAL_PAGE(pai));
+                                       pmap_ledger_debit(pmap, task_ledgers.internal, PAGE_SIZE);
+                                       pmap_ledger_debit(pmap, task_ledgers.alternate_accounting, PAGE_SIZE);
+                               } else if (IS_REUSABLE_PAGE(pai)) {
+                                       assert(!was_altacct);
+                                       assert(IS_INTERNAL_PAGE(pai));
+                                       /* was already not in phys_footprint */
+                               } else if (IS_INTERNAL_PAGE(pai)) {
+                                       assert(!was_altacct);
+                                       assert(!IS_REUSABLE_PAGE(pai));
+                                       pmap_ledger_debit(pmap, task_ledgers.internal, PAGE_SIZE);
+                                       pmap_ledger_debit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
+                               } else {
+                                       /* not an internal page */
+                               }
                        }
                        if (iswired(*pte)) {
                                assert(pmap->stats.wired_count >= 1);
@@ -696,15 +831,12 @@ dont_update_pte:
                                pmap_ledger_debit(pmap, task_ledgers.wired_mem,
                                    PAGE_SIZE);
                        }
-                       pmap_phys_attributes[pai] |= oattr;
 
-                       /*
-                        *      Remove the mapping from the pvlist for
-                        *      this physical page.
-                        *      We'll end up with either a rooted pv or a
-                        *      hashed pv
-                        */
-                       pvh_e = pmap_pv_remove(pmap, vaddr, (ppnum_t *) &pai, &old_pte);
+                       if (!is_ept) {
+                               pmap_phys_attributes[pai] |= oattr;
+                       } else {
+                               pmap_phys_attributes[pai] |= ept_refmod_to_physmap(oattr);
+                       }
 
                } else {
 
@@ -750,7 +882,7 @@ dont_update_pte:
                        /*
                         *      No mappings yet, use rooted pv
                         */
-                       pv_h->va = vaddr;
+                       pv_h->va_and_flags = vaddr;
                        pv_h->pmap = pmap;
                        queue_init(&pv_h->qlink);
 
@@ -764,6 +896,14 @@ dont_update_pte:
                        } else {
                                pmap_phys_attributes[pai] &= ~PHYS_REUSABLE;
                        }
+                       if ((options & PMAP_OPTIONS_ALT_ACCT) &&
+                           IS_INTERNAL_PAGE(pai)) {
+                               pv_h->va_and_flags |= PVE_IS_ALTACCT;
+                               is_altacct = TRUE;
+                       } else {
+                               pv_h->va_and_flags &= ~PVE_IS_ALTACCT;
+                               is_altacct = FALSE;
+                       }
                } else {
                        /*
                         *      Add new pv_hashed_entry after header.
@@ -798,9 +938,17 @@ dont_update_pte:
                        if (PV_HASHED_ENTRY_NULL == pvh_e)
                                panic("Mapping alias chain exhaustion, possibly induced by numerous kernel virtual double mappings");
 
-                       pvh_e->va = vaddr;
+                       pvh_e->va_and_flags = vaddr;
                        pvh_e->pmap = pmap;
                        pvh_e->ppn = pn;
+                       if ((options & PMAP_OPTIONS_ALT_ACCT) &&
+                           IS_INTERNAL_PAGE(pai)) {
+                               pvh_e->va_and_flags |= PVE_IS_ALTACCT;
+                               is_altacct = TRUE;
+                       } else {
+                               pvh_e->va_and_flags &= ~PVE_IS_ALTACCT;
+                               is_altacct = FALSE;
+                       }
                        pv_hash_add(pvh_e, pv_h);
 
                        /*
@@ -814,12 +962,12 @@ dont_update_pte:
                 * for 'managed memory'
                 */
                pmap_ledger_credit(pmap, task_ledgers.phys_mem, PAGE_SIZE);
-               pmap_ledger_credit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);               
                OSAddAtomic(+1,  &pmap->stats.resident_count);
                if (pmap->stats.resident_count > pmap->stats.resident_max) {
                        pmap->stats.resident_max = pmap->stats.resident_count;
                }
                if (pmap != kernel_pmap) {
+                       /* update pmap stats */
                        if (IS_REUSABLE_PAGE(pai)) {
                                OSAddAtomic(+1, &pmap->stats.reusable);
                                PMAP_STATS_PEAK(pmap->stats.reusable);
@@ -830,13 +978,33 @@ dont_update_pte:
                                OSAddAtomic(+1, &pmap->stats.external);
                                PMAP_STATS_PEAK(pmap->stats.external);
                        }
+
+                       /* update ledgers */
+                       if (is_altacct) {
+                               /* internal but also alternate accounting */
+                               assert(IS_INTERNAL_PAGE(pai));
+                               pmap_ledger_credit(pmap, task_ledgers.internal, PAGE_SIZE);
+                               pmap_ledger_credit(pmap, task_ledgers.alternate_accounting, PAGE_SIZE);
+                               /* alternate accounting, so not in footprint */
+                       } else if (IS_REUSABLE_PAGE(pai)) {
+                               assert(!is_altacct);
+                               assert(IS_INTERNAL_PAGE(pai));
+                               /* internal but reusable: not in footprint */
+                       } else if (IS_INTERNAL_PAGE(pai)) {
+                               assert(!is_altacct);
+                               assert(!IS_REUSABLE_PAGE(pai));
+                               /* internal: add to footprint */
+                               pmap_ledger_credit(pmap, task_ledgers.internal, PAGE_SIZE);
+                               pmap_ledger_credit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
+                       } else {
+                               /* not internal: not in footprint */
+                       }
                }
        } else if (last_managed_page == 0) {
                /* Account for early mappings created before "managed pages"
                 * are determined. Consider consulting the available DRAM map.
                 */
                pmap_ledger_credit(pmap, task_ledgers.phys_mem, PAGE_SIZE);
-               pmap_ledger_credit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);               
                OSAddAtomic(+1,  &pmap->stats.resident_count);
                if (pmap != kernel_pmap) {
 #if 00
@@ -851,25 +1019,49 @@ dont_update_pte:
         *      Build a template to speed up entering -
         *      only the pfn changes.
         */
-       template = pa_to_pte(pa) | INTEL_PTE_VALID;
+       template = pa_to_pte(pa);
+
+       if (!is_ept) {
+               template |= INTEL_PTE_VALID;
+       } else {
+               template |= INTEL_EPT_IPTA;
+       }
+
+
        /*
         * DRK: It may be worth asserting on cache attribute flags that diverge
         * from the existing physical page attributes.
         */
 
-       template |= pmap_get_cache_attributes(pa_index(pa));
-       
-       if (flags & VM_MEM_NOT_CACHEABLE) {
+       template |= pmap_get_cache_attributes(pa_index(pa), is_ept);
+
+       /*
+        * We don't support passing VM_MEM_NOT_CACHEABLE flags for EPT PTEs
+        */
+       if (!is_ept && (flags & VM_MEM_NOT_CACHEABLE)) {
                if (!(flags & VM_MEM_GUARDED))
                        template |= INTEL_PTE_PTA;
                template |= INTEL_PTE_NCACHE;
        }
-       if (pmap != kernel_pmap)
+       if (pmap != kernel_pmap && !is_ept)
                template |= INTEL_PTE_USER;
-       if (prot & VM_PROT_WRITE)
-               template |= INTEL_PTE_WRITE;
+       if (prot & VM_PROT_READ)
+               template |= PTE_READ(is_ept);
+       if (prot & VM_PROT_WRITE) {
+               template |= PTE_WRITE(is_ept);
+               if (is_ept && !pmap_ept_support_ad) {
+                       template |= PTE_MOD(is_ept);
+                       if (IS_MANAGED_PAGE(pai))
+                               pmap_phys_attributes[pai] |= PHYS_MODIFIED;
+               }
+       }
+       if (prot & VM_PROT_EXECUTE) {
+               assert(set_NX == 0);
+               template = pte_set_ex(template, is_ept);
+       }
+
        if (set_NX)
-               template |= INTEL_PTE_NX;
+               template = pte_remove_ex(template, is_ept);
        if (wired) {
                template |= INTEL_PTE_WIRED;
                OSAddAtomic(+1,  & pmap->stats.wired_count);
@@ -877,6 +1069,14 @@ dont_update_pte:
        }
        if (superpage)
                template |= INTEL_PTE_PS;
+
+       /* For hardware that doesn't have EPT AD support, we always set REFMOD for EPT PTEs */
+       if (is_ept && !pmap_ept_support_ad) {
+               template |= PTE_REF(is_ept);
+               if (IS_MANAGED_PAGE(pai))
+                       pmap_phys_attributes[pai] |= PHYS_REFERENCED;
+       }
+
        pmap_store_pte(pte, template);
 
        /*
@@ -909,8 +1109,8 @@ Done:
                m = vm_page_lookup(delpage_pm_obj, (delpage_pde_index * PAGE_SIZE));
                if (m == VM_PAGE_NULL)
                    panic("pmap_enter: pte page not in object");
-               vm_object_unlock(delpage_pm_obj);
                VM_PAGE_FREE(m);
+               vm_object_unlock(delpage_pm_obj);
                OSAddAtomic(-1,  &inuse_ptepages_count);
                PMAP_ZINFO_PFREE(pmap, PAGE_SIZE);
        }
@@ -938,7 +1138,8 @@ pmap_remove_range(
        pt_entry_t              *spte,
        pt_entry_t              *epte)
 {
-       pmap_remove_range_options(pmap, start_vaddr, spte, epte, 0);
+       pmap_remove_range_options(pmap, start_vaddr, spte, epte,
+                                 PMAP_OPTIONS_REMOVE);
 }
 
 void
@@ -955,21 +1156,28 @@ pmap_remove_range_options(
        pv_hashed_entry_t       pvh_e;
        int                     pvh_cnt = 0;
        int                     num_removed, num_unwired, num_found, num_invalid;
-       int                     num_device, num_external, num_internal, num_reusable;
-       uint64_t                num_compressed;
+       int                     stats_external, stats_internal, stats_reusable;
+       uint64_t                stats_compressed;
+       int                     ledgers_internal, ledgers_alt_internal;
+       uint64_t                ledgers_compressed, ledgers_alt_compressed;
        ppnum_t                 pai;
        pmap_paddr_t            pa;
        vm_map_offset_t         vaddr;
+       boolean_t               is_ept = is_ept_pmap(pmap);
+       boolean_t               was_altacct;
 
        num_removed = 0;
        num_unwired = 0;
        num_found   = 0;
        num_invalid = 0;
-       num_device  = 0;
-       num_external = 0;
-       num_internal = 0;
-       num_reusable = 0;
-       num_compressed = 0;
+       stats_external = 0;
+       stats_internal = 0;
+       stats_reusable = 0;
+       stats_compressed = 0;
+       ledgers_internal = 0;
+       ledgers_compressed = 0;
+       ledgers_alt_internal = 0;
+       ledgers_alt_compressed = 0;
        /* invalidate the PTEs first to "freeze" them */
        for (cpte = spte, vaddr = start_vaddr;
             cpte < epte;
@@ -978,14 +1186,19 @@ pmap_remove_range_options(
 
                pa = pte_to_pa(p);
                if (pa == 0) {
-                       if (pmap != kernel_pmap &&
-                           (options & PMAP_OPTIONS_REMOVE) &&
-                           (p & INTEL_PTE_COMPRESSED)) {
-                               /* one less "compressed" */
-                               num_compressed++;
-                               /* clear marker */
+                       if ((options & PMAP_OPTIONS_REMOVE) &&
+                           (PTE_IS_COMPRESSED(p))) {
+                               assert(pmap != kernel_pmap);
+                               /* one less "compressed"... */
+                               stats_compressed++;
+                               ledgers_compressed++;
+                               if (p & PTE_COMPRESSED_ALT) {
+                                       /* ... but it used to be "ALTACCT" */
+                                       ledgers_alt_compressed++;
+                               }
+                               /* clear marker(s) */
                                /* XXX probably does not need to be atomic! */
-                               pmap_update_pte(cpte, INTEL_PTE_COMPRESSED, 0);
+                               pmap_update_pte(cpte, INTEL_PTE_COMPRESSED_MASK, 0);
                        }
                        continue;
                }
@@ -1002,15 +1215,14 @@ pmap_remove_range_options(
                         *      Just remove the mappings.
                         */
                        pmap_store_pte(cpte, 0);
-                       num_device++;
                        continue;
                }
 
-               if ((p & INTEL_PTE_VALID) == 0)
+               if ((p & PTE_VALID_MASK(is_ept)) == 0)
                        num_invalid++;
 
                /* invalidate the PTE */
-               pmap_update_pte(cpte, INTEL_PTE_VALID, 0);
+               pmap_update_pte(cpte, PTE_VALID_MASK(is_ept), 0);
        }
 
        if (num_found == 0) {
@@ -1027,8 +1239,27 @@ pmap_remove_range_options(
             cpte++, vaddr += PAGE_SIZE_64) {
 
                pa = pte_to_pa(*cpte);
-               if (pa == 0)
+               if (pa == 0) {
+               check_pte_for_compressed_marker:
+                       /*
+                        * This PTE could have been replaced with a
+                        * "compressed" marker after our first "freeze"
+                        * loop above, so check again.
+                        */
+                       if ((options & PMAP_OPTIONS_REMOVE) &&
+                           (PTE_IS_COMPRESSED(*cpte))) {
+                               assert(pmap != kernel_pmap);
+                               /* one less "compressed"... */
+                               stats_compressed++;
+                               ledgers_compressed++;
+                               if (*cpte & PTE_COMPRESSED_ALT) {
+                                       /* ... but it used to be "ALTACCT" */
+                                       ledgers_alt_compressed++;
+                               }
+                               pmap_store_pte(cpte, 0);
+                       }
                        continue;
+               }
 
                pai = pa_index(pa);
 
@@ -1037,15 +1268,40 @@ pmap_remove_range_options(
                pa = pte_to_pa(*cpte);
                if (pa == 0) {
                        UNLOCK_PVH(pai);
-                       continue;
+                       goto check_pte_for_compressed_marker;
                }
+
+               /*
+                * Remove the mapping from the pvlist for this physical page.
+                */
+               pvh_e = pmap_pv_remove(pmap, vaddr, (ppnum_t *) &pai, cpte, &was_altacct);
+
                num_removed++;
+               /* update pmap stats */
                if (IS_REUSABLE_PAGE(pai)) {
-                       num_reusable++;
+                       stats_reusable++;
+               } else if (IS_INTERNAL_PAGE(pai)) {
+                       stats_internal++;
+               } else {
+                       stats_external++;
+               }
+               /* update ledgers */
+               if (was_altacct) {
+                       /* internal and alternate accounting */
+                       assert(IS_INTERNAL_PAGE(pai));
+                       ledgers_internal++;
+                       ledgers_alt_internal++;
+               } else if (IS_REUSABLE_PAGE(pai)) {
+                       /* internal but reusable */
+                       assert(!was_altacct);
+                       assert(IS_INTERNAL_PAGE(pai));
                } else if (IS_INTERNAL_PAGE(pai)) {
-                       num_internal++;
+                       /* internal */
+                       assert(!was_altacct);
+                       assert(!IS_REUSABLE_PAGE(pai));
+                       ledgers_internal++;
                } else {
-                       num_external++;
+                       /* not internal */
                }
 
                /*
@@ -1053,13 +1309,13 @@ pmap_remove_range_options(
                 * nuke the entry in the page table
                 */
                /* remember reference and change */
-               pmap_phys_attributes[pai] |=
-                       (char) (*cpte & (PHYS_MODIFIED | PHYS_REFERENCED));
-
-               /*
-                * Remove the mapping from the pvlist for this physical page.
-                */
-               pvh_e = pmap_pv_remove(pmap, vaddr, (ppnum_t *) &pai, cpte);
+               if (!is_ept) {
+                       pmap_phys_attributes[pai] |=
+                               *cpte & (PHYS_MODIFIED | PHYS_REFERENCED);
+               } else {
+                       pmap_phys_attributes[pai] |=
+                               ept_refmod_to_physmap((*cpte & (INTEL_EPT_REF | INTEL_EPT_MOD))) & (PHYS_MODIFIED | PHYS_REFERENCED);
+               }
 
                /* completely invalidate the PTE */
                pmap_store_pte(cpte, 0);
@@ -1089,35 +1345,72 @@ update_counts:
                panic("pmap_remove_range: resident_count");
 #endif
        pmap_ledger_debit(pmap, task_ledgers.phys_mem, machine_ptob(num_removed));
-       pmap_ledger_debit(pmap, task_ledgers.phys_footprint, machine_ptob(num_removed));        
-       assert(pmap->stats.resident_count >= num_removed);
+       PMAP_STATS_ASSERTF((pmap->stats.resident_count >= num_removed,
+                           "pmap=%p num_removed=%d stats.resident_count=%d",
+                           pmap, num_removed, pmap->stats.resident_count));
        OSAddAtomic(-num_removed,  &pmap->stats.resident_count);
 
        if (pmap != kernel_pmap) {
-#if 00
-               assert(pmap->stats.device >= num_device);
-               if (num_device)
-                       OSAddAtomic(-num_device, &pmap->stats.device);
-#endif /* 00 */
-               assert(pmap->stats.external >= num_external);
-               if (num_external)
-                       OSAddAtomic(-num_external, &pmap->stats.external);
-               assert(pmap->stats.internal >= num_internal);
-               if (num_internal)
-                       OSAddAtomic(-num_internal, &pmap->stats.internal);
-               assert(pmap->stats.reusable >= num_reusable);
-               if (num_reusable)
-                       OSAddAtomic(-num_reusable, &pmap->stats.reusable);
-               assert(pmap->stats.compressed >= num_compressed);
-               if (num_compressed)
-                       OSAddAtomic64(-num_compressed, &pmap->stats.compressed);
+               PMAP_STATS_ASSERTF((pmap->stats.external >= stats_external,
+                                   "pmap=%p stats_external=%d stats.external=%d",
+                                   pmap, stats_external, pmap->stats.external));
+               PMAP_STATS_ASSERTF((pmap->stats.internal >= stats_internal,
+                                   "pmap=%p stats_internal=%d stats.internal=%d",
+                                   pmap, stats_internal, pmap->stats.internal));
+               PMAP_STATS_ASSERTF((pmap->stats.reusable >= stats_reusable,
+                                   "pmap=%p stats_reusable=%d stats.reusable=%d",
+                                   pmap, stats_reusable, pmap->stats.reusable));
+               PMAP_STATS_ASSERTF((pmap->stats.compressed >= stats_compressed,
+                                   "pmap=%p stats_compressed=%lld, stats.compressed=%lld",
+                                   pmap, stats_compressed, pmap->stats.compressed));
+
+               /* update pmap stats */
+               if (stats_external) {
+                       OSAddAtomic(-stats_external, &pmap->stats.external);
+               }
+               if (stats_internal) {
+                       OSAddAtomic(-stats_internal, &pmap->stats.internal);
+               }
+               if (stats_reusable)
+                       OSAddAtomic(-stats_reusable, &pmap->stats.reusable);
+               if (stats_compressed)
+                       OSAddAtomic64(-stats_compressed, &pmap->stats.compressed);
+               /* update ledgers */
+               if (ledgers_internal) {
+                       pmap_ledger_debit(pmap,
+                                         task_ledgers.internal,
+                                         machine_ptob(ledgers_internal));
+               }
+               if (ledgers_compressed) {
+                       pmap_ledger_debit(pmap,
+                                         task_ledgers.internal_compressed,
+                                         machine_ptob(ledgers_compressed));
+               }
+               if (ledgers_alt_internal) {
+                       pmap_ledger_debit(pmap,
+                                         task_ledgers.alternate_accounting,
+                                         machine_ptob(ledgers_alt_internal));
+               }
+               if (ledgers_alt_compressed) {
+                       pmap_ledger_debit(pmap,
+                                         task_ledgers.alternate_accounting_compressed,
+                                         machine_ptob(ledgers_alt_compressed));
+               }
+               pmap_ledger_debit(pmap,
+                                 task_ledgers.phys_footprint,
+                                 machine_ptob((ledgers_internal -
+                                               ledgers_alt_internal) +
+                                              (ledgers_compressed -
+                                               ledgers_alt_compressed)));
        }
 
 #if TESTING
        if (pmap->stats.wired_count < num_unwired)
                panic("pmap_remove_range: wired_count");
 #endif
-       assert(pmap->stats.wired_count >= num_unwired);
+       PMAP_STATS_ASSERTF((pmap->stats.wired_count >= num_unwired,
+                           "pmap=%p num_unwired=%d stats.wired_count=%d",
+                           pmap, num_unwired, pmap->stats.wired_count));
        OSAddAtomic(-num_unwired,  &pmap->stats.wired_count);
        pmap_ledger_debit(pmap, task_ledgers.wired_mem, machine_ptob(num_unwired));
 
@@ -1138,7 +1431,7 @@ pmap_remove(
        addr64_t        s64,
        addr64_t        e64)
 {
-       pmap_remove_options(map, s64, e64, 0);
+       pmap_remove_options(map, s64, e64, PMAP_OPTIONS_REMOVE);
 }
 
 void
@@ -1152,12 +1445,15 @@ pmap_remove_options(
        pt_entry_t     *spte, *epte;
        addr64_t        l64;
        uint64_t        deadline;
+       boolean_t       is_ept;
 
        pmap_intr_assert();
 
        if (map == PMAP_NULL || s64 == e64)
                return;
 
+       is_ept = is_ept_pmap(map);
+
        PMAP_TRACE(PMAP_CODE(PMAP__REMOVE) | DBG_FUNC_START,
                   map,
                   (uint32_t) (s64 >> 32), s64,
@@ -1206,8 +1502,8 @@ pmap_remove_options(
                        l64 = e64;
                pde = pmap_pde(map, s64);
 
-               if (pde && (*pde & INTEL_PTE_VALID)) {
-                       if (*pde & INTEL_PTE_PS) {
+               if (pde && (*pde & PTE_VALID_MASK(is_ept))) {
+                       if (*pde & PTE_PS) {
                                /*
                                 * If we're removing a superpage, pmap_remove_range()
                                 * must work on level 2 instead of level 1; and we're
@@ -1228,6 +1524,14 @@ pmap_remove_options(
 
                if (s64 < e64 && rdtsc64() >= deadline) {
                        PMAP_UNLOCK(map)
+                           /* TODO: Rapid release/reacquisition can defeat
+                            * the "backoff" intent here; either consider a
+                            * fair spinlock, or a scheme whereby each lock
+                            * attempt marks the processor as within a spinlock
+                            * acquisition, and scan CPUs here to determine
+                            * if a backoff is necessary, to avoid sacrificing
+                            * performance in the common case.
+                            */
                        PMAP_LOCK(map)
                        deadline = rdtsc64() + max_preemption_latency_tsc;
                }
@@ -1274,6 +1578,7 @@ pmap_page_protect_options(
        pmap_t                  pmap;
        boolean_t               remove;
        pt_entry_t              new_pte_value;
+       boolean_t               is_ept;
 
        pmap_intr_assert();
        assert(pn != vm_page_fictitious_addr);
@@ -1323,8 +1628,16 @@ pmap_page_protect_options(
        do {
                vm_map_offset_t vaddr;
 
+               if ((options & PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED) &&
+                   (pmap_phys_attributes[pai] & PHYS_MODIFIED)) {
+                       /* page was modified, so it will be compressed */
+                       options &= ~PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
+                       options |= PMAP_OPTIONS_COMPRESSOR;
+               }
+
                pmap = pv_e->pmap;
-               vaddr = pv_e->va;
+               is_ept = is_ept_pmap(pmap);
+               vaddr = PVE_VA(pv_e);
                pte = pmap_pte(pmap, vaddr);
 
                pmap_assert2((pa_index(pte_to_pa(*pte)) == pn),
@@ -1351,12 +1664,12 @@ pmap_page_protect_options(
                        if (pmap != kernel_pmap &&
                            (options & PMAP_OPTIONS_COMPRESSOR) &&
                            IS_INTERNAL_PAGE(pai)) {
-                               /* adjust "reclaimed" stats */
-                               OSAddAtomic64(+1, &pmap->stats.compressed);
-                               PMAP_STATS_PEAK(pmap->stats.compressed);
-                               pmap->stats.compressed_lifetime++;
-                               /* mark this PTE as having been "reclaimed" */
-                               new_pte_value = INTEL_PTE_COMPRESSED;
+                               assert(!PTE_IS_COMPRESSED(*pte));
+                               /* mark this PTE as having been "compressed" */
+                               new_pte_value = PTE_COMPRESSED;
+                               if (IS_ALTACCT_PAGE(pai, pv_e)) {
+                                       new_pte_value |= PTE_COMPRESSED_ALT;
+                               }
                        } else {
                                new_pte_value = 0;
                        }
@@ -1372,13 +1685,40 @@ pmap_page_protect_options(
                                /*
                                 * Remove the mapping, collecting dirty bits.
                                 */
-                               pmap_update_pte(pte, INTEL_PTE_VALID, 0);
+                               pmap_update_pte(pte, PTE_VALID_MASK(is_ept), 0);
 
                                PMAP_UPDATE_TLBS(pmap, vaddr, vaddr+PAGE_SIZE);
-                               pmap_phys_attributes[pai] |=
-                                       *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
+                               if (!is_ept) {
+                                       pmap_phys_attributes[pai] |=
+                                               *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
+                               } else {
+                                       pmap_phys_attributes[pai] |=
+                                               ept_refmod_to_physmap((*pte & (INTEL_EPT_REF | INTEL_EPT_MOD))) & (PHYS_MODIFIED | PHYS_REFERENCED);
+                               }
+                               if ((options &
+                                    PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED) &&
+                                   IS_INTERNAL_PAGE(pai) &&
+                                   (pmap_phys_attributes[pai] &
+                                    PHYS_MODIFIED)) {
+                                       /*
+                                        * Page is actually "modified" and
+                                        * will be compressed.  Start
+                                        * accounting for it as "compressed".
+                                        */
+                                       assert(!(options & PMAP_OPTIONS_COMPRESSOR));
+                                       options &= ~PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
+                                       options |= PMAP_OPTIONS_COMPRESSOR;
+                                       assert(new_pte_value == 0);
+                                       if (pmap != kernel_pmap) {
+                                               new_pte_value = PTE_COMPRESSED;
+                                               if (IS_ALTACCT_PAGE(pai, pv_e)) {
+                                                       new_pte_value |= PTE_COMPRESSED_ALT;
+                                               }
+                                       }
+                               }
                                pmap_store_pte(pte, new_pte_value);
                        }
+
 #if TESTING
                        if (pmap->stats.resident_count < 1)
                                panic("pmap_page_protect: resident_count");
@@ -1386,17 +1726,15 @@ pmap_page_protect_options(
                        pmap_ledger_debit(pmap, task_ledgers.phys_mem, PAGE_SIZE);
                        assert(pmap->stats.resident_count >= 1);
                        OSAddAtomic(-1,  &pmap->stats.resident_count);
+
+                       /*
+                        * We only ever compress internal pages.
+                        */
                        if (options & PMAP_OPTIONS_COMPRESSOR) {
-                               /*
-                                * This removal is only being done so we can send this page to
-                                * the compressor; therefore it mustn't affect total task footprint.
-                                */
-                               pmap_ledger_credit(pmap, task_ledgers.phys_compressed, PAGE_SIZE);
-                       } else {
-                               pmap_ledger_debit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
+                               assert(IS_INTERNAL_PAGE(pai));
                        }
-
                        if (pmap != kernel_pmap) {
+                               /* update pmap stats */
                                if (IS_REUSABLE_PAGE(pai)) {
                                        assert(pmap->stats.reusable > 0);
                                        OSAddAtomic(-1, &pmap->stats.reusable);
@@ -1407,6 +1745,59 @@ pmap_page_protect_options(
                                        assert(pmap->stats.external > 0);
                                        OSAddAtomic(-1, &pmap->stats.external);
                                }
+                               if ((options & PMAP_OPTIONS_COMPRESSOR) &&
+                                   IS_INTERNAL_PAGE(pai)) {
+                                       /* adjust "compressed" stats */
+                                       OSAddAtomic64(+1, &pmap->stats.compressed);
+                                       PMAP_STATS_PEAK(pmap->stats.compressed);
+                                       pmap->stats.compressed_lifetime++;
+                               }
+
+                               /* update ledgers */
+                               if (IS_ALTACCT_PAGE(pai, pv_e)) {
+                                       assert(IS_INTERNAL_PAGE(pai));
+                                       pmap_ledger_debit(pmap, task_ledgers.internal, PAGE_SIZE);
+                                       pmap_ledger_debit(pmap, task_ledgers.alternate_accounting, PAGE_SIZE);
+                                       if (options & PMAP_OPTIONS_COMPRESSOR) {
+                                               pmap_ledger_credit(pmap, task_ledgers.internal_compressed, PAGE_SIZE);
+                                               pmap_ledger_credit(pmap, task_ledgers.alternate_accounting_compressed, PAGE_SIZE);
+                                       }
+                               } else if (IS_REUSABLE_PAGE(pai)) {
+                                       assert(!IS_ALTACCT_PAGE(pai, pv_e));
+                                       assert(IS_INTERNAL_PAGE(pai));
+                                       if (options & PMAP_OPTIONS_COMPRESSOR) {
+                                               pmap_ledger_credit(pmap, task_ledgers.internal_compressed, PAGE_SIZE);
+                                               /* was not in footprint, but is now */
+                                               pmap_ledger_credit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
+                                       }
+                               } else if (IS_INTERNAL_PAGE(pai)) {
+                                       assert(!IS_ALTACCT_PAGE(pai, pv_e));
+                                       assert(!IS_REUSABLE_PAGE(pai));
+                                       pmap_ledger_debit(pmap, task_ledgers.internal, PAGE_SIZE);
+                                       /*
+                                        * Update all stats related to physical
+                                        * footprint, which only deals with
+                                        * internal pages.
+                                        */
+                                       if (options & PMAP_OPTIONS_COMPRESSOR) {
+                                               /*
+                                                * This removal is only being
+                                                * done so we can send this page
+                                                * to the compressor;  therefore
+                                                * it mustn't affect total task
+                                                * footprint.
+                                                */
+                                               pmap_ledger_credit(pmap, task_ledgers.internal_compressed, PAGE_SIZE);
+                                       } else {
+                                               /*
+                                                * This internal page isn't
+                                                * going to the compressor,
+                                                * so adjust stats to keep 
+                                                * phys_footprint up to date.
+                                                */
+                                               pmap_ledger_debit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
+                                       }
+                               }
                        }
 
                        /*
@@ -1434,9 +1825,14 @@ pmap_page_protect_options(
                        /*
                         * Write-protect, after opportunistic refmod collect
                         */
-                       pmap_phys_attributes[pai] |=
-                           *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
-                       pmap_update_pte(pte, INTEL_PTE_WRITE, 0);
+                       if (!is_ept) {
+                               pmap_phys_attributes[pai] |=
+                                       *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
+                       } else {
+                               pmap_phys_attributes[pai] |=
+                                       ept_refmod_to_physmap((*pte & (INTEL_EPT_REF | INTEL_EPT_MOD))) & (PHYS_MODIFIED | PHYS_REFERENCED);
+                       }
+                       pmap_update_pte(pte, PTE_WRITE(is_ept), 0);
 
                        if (options & PMAP_OPTIONS_NOFLUSH)
                                PMAP_UPDATE_TLBS_DELAYED(pmap, vaddr, vaddr + PAGE_SIZE, (pmap_flush_context *)arg);
@@ -1456,7 +1852,7 @@ pmap_page_protect_options(
                if (pvh_e != (pv_hashed_entry_t) pv_h) {
                        pv_hash_remove(pvh_e);
                        pv_h->pmap = pvh_e->pmap;
-                       pv_h->va = pvh_e->va;
+                       pv_h->va_and_flags = pvh_e->va_and_flags;
                        pvh_e->qlink.next = (queue_entry_t) pvh_eh;
                        pvh_eh = pvh_e;
 
@@ -1492,7 +1888,23 @@ phys_attribute_clear(
        int                     pai;
        pmap_t                  pmap;
        char                    attributes = 0;
-       
+       boolean_t               is_internal, is_reusable, is_altacct, is_ept;
+       int                     ept_bits_to_clear;
+       boolean_t               ept_keep_global_mod = FALSE;
+
+       if ((bits & PHYS_MODIFIED) &&
+           (options & PMAP_OPTIONS_NOFLUSH) &&
+           arg == NULL) {
+               panic("phys_attribute_clear(0x%x,0x%x,0x%x,%p): "
+                     "should not clear 'modified' without flushing TLBs\n",
+                     pn, bits, options, arg);
+       }
+
+       /* We only support converting MOD and REF bits for EPT PTEs in this function */
+       assert((bits & ~(PHYS_REFERENCED | PHYS_MODIFIED)) == 0);
+
+       ept_bits_to_clear = (unsigned)physmap_refmod_to_ept(bits & (PHYS_MODIFIED | PHYS_REFERENCED));
+
        pmap_intr_assert();
        assert(pn != vm_page_fictitious_addr);
        if (pn == vm_page_guard_addr)
@@ -1514,6 +1926,7 @@ phys_attribute_clear(
 
        LOCK_PVH(pai);
 
+
        /*
         * Walk down PV list, clearing all modify or reference bits.
         * We do not have to lock the pv_list because we have
@@ -1524,31 +1937,128 @@ phys_attribute_clear(
                 * There are some mappings.
                 */
 
+               is_internal = IS_INTERNAL_PAGE(pai);
+               is_reusable = IS_REUSABLE_PAGE(pai);
+
                pv_e = (pv_hashed_entry_t)pv_h;
 
                do {
                        vm_map_offset_t va;
+                       char pte_bits;
 
                        pmap = pv_e->pmap;
-                       va = pv_e->va;
+                       is_ept = is_ept_pmap(pmap);
+                       is_altacct = IS_ALTACCT_PAGE(pai, pv_e);
+                       va = PVE_VA(pv_e);
+                       pte_bits = 0;
+
+                       if (bits) {
+                               pte = pmap_pte(pmap, va);
+                               /* grab ref/mod bits from this PTE */
+                               pte_bits = (*pte & (PTE_REF(is_ept) | PTE_MOD(is_ept)));
+                               /* propagate to page's global attributes */
+                               if (!is_ept) {
+                                       attributes |= pte_bits;
+                               } else {
+                                       attributes |= ept_refmod_to_physmap(pte_bits);
+                                       if (!pmap_ept_support_ad && (pte_bits & INTEL_EPT_MOD)) {
+                                               ept_keep_global_mod = TRUE;
+                                       }
+                               }
+                               /* which bits to clear for this PTE? */
+                               if (!is_ept) {
+                                       pte_bits &= bits;
+                               } else {
+                                       pte_bits &= ept_bits_to_clear;
+                               }
+                       }
 
                         /*
                          * Clear modify and/or reference bits.
                          */
-                       pte = pmap_pte(pmap, va);
-                       attributes |= *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
-                       pmap_update_pte(pte, bits, 0);
-                       /* Ensure all processors using this translation
-                        * invalidate this TLB entry. The invalidation *must*
-                        * follow the PTE update, to ensure that the TLB
-                        * shadow of the 'D' bit (in particular) is
-                        * synchronized with the updated PTE.
-                        */
-                       if (options & PMAP_OPTIONS_NOFLUSH) {
-                               if (arg)
-                                       PMAP_UPDATE_TLBS_DELAYED(pmap, va, va + PAGE_SIZE, (pmap_flush_context *)arg);
-                       } else
-                               PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
+                       if (pte_bits) {
+                               pmap_update_pte(pte, bits, 0);
+
+                               /* Ensure all processors using this translation
+                                * invalidate this TLB entry. The invalidation
+                                * *must* follow the PTE update, to ensure that
+                                * the TLB shadow of the 'D' bit (in particular)
+                                * is synchronized with the updated PTE.
+                                */
+                               if (! (options & PMAP_OPTIONS_NOFLUSH)) {
+                                       /* flush TLBS now */
+                                       PMAP_UPDATE_TLBS(pmap,
+                                                        va,
+                                                        va + PAGE_SIZE);
+                               } else if (arg) {
+                                       /* delayed TLB flush: add "pmap" info */
+                                       PMAP_UPDATE_TLBS_DELAYED(
+                                               pmap,
+                                               va,
+                                               va + PAGE_SIZE,
+                                               (pmap_flush_context *)arg);
+                               } else {
+                                       /* no TLB flushing at all */
+                               }
+                       }
+
+                       /* update pmap "reusable" stats */
+                       if ((options & PMAP_OPTIONS_CLEAR_REUSABLE) &&
+                           is_reusable &&
+                           pmap != kernel_pmap) {
+                               /* one less "reusable" */
+                               assert(pmap->stats.reusable > 0);
+                               OSAddAtomic(-1, &pmap->stats.reusable);
+                               if (is_internal) {
+                                       /* one more "internal" */
+                                       OSAddAtomic(+1, &pmap->stats.internal);
+                                       PMAP_STATS_PEAK(pmap->stats.internal);
+                                       assert(pmap->stats.internal > 0);
+                                       if (is_altacct) {
+                                               /* no impact on ledgers */
+                                       } else {
+                                               pmap_ledger_credit(pmap,
+                                                                  task_ledgers.internal,
+                                                                  PAGE_SIZE);
+                                               pmap_ledger_credit(
+                                                       pmap,
+                                                       task_ledgers.phys_footprint,
+                                                       PAGE_SIZE);
+                                       }
+                               } else {
+                                       /* one more "external" */
+                                       OSAddAtomic(+1, &pmap->stats.external);
+                                       PMAP_STATS_PEAK(pmap->stats.external);
+                                       assert(pmap->stats.external > 0);
+                               }
+                       } else if ((options & PMAP_OPTIONS_SET_REUSABLE) &&
+                                  !is_reusable &&
+                                  pmap != kernel_pmap) {
+                               /* one more "reusable" */
+                               OSAddAtomic(+1, &pmap->stats.reusable);
+                               PMAP_STATS_PEAK(pmap->stats.reusable);
+                               assert(pmap->stats.reusable > 0);
+                               if (is_internal) {
+                                       /* one less "internal" */
+                                       assert(pmap->stats.internal > 0);
+                                       OSAddAtomic(-1, &pmap->stats.internal);
+                                       if (is_altacct) {
+                                               /* no impact on footprint */
+                                       } else {
+                                               pmap_ledger_debit(pmap,
+                                                                 task_ledgers.internal,
+                                                                 PAGE_SIZE);
+                                               pmap_ledger_debit(
+                                                       pmap,
+                                                       task_ledgers.phys_footprint,
+                                                       PAGE_SIZE);
+                                       }
+                               } else {
+                                       /* one less "external" */
+                                       assert(pmap->stats.external > 0);
+                                       OSAddAtomic(-1, &pmap->stats.external);
+                               }
+                       }
 
                        pv_e = (pv_hashed_entry_t)queue_next(&pv_e->qlink);
 
@@ -1559,7 +2069,24 @@ phys_attribute_clear(
         */
 
        pmap_phys_attributes[pai] |= attributes;
-       pmap_phys_attributes[pai] &= (~bits);
+
+       if (ept_keep_global_mod) {
+               /*
+                * If the hardware doesn't support AD bits for EPT PTEs and someone is
+                * requesting that we clear the modified bit for a phys page, we need
+                * to ensure that there are no EPT mappings for the page with the
+                * modified bit set. If there are, we cannot clear the global modified bit.
+                */
+               bits &= ~PHYS_MODIFIED;
+       }
+       pmap_phys_attributes[pai] &= ~(bits);
+
+       /* update this page's "reusable" status */
+       if (options & PMAP_OPTIONS_CLEAR_REUSABLE) {
+               pmap_phys_attributes[pai] &= ~PHYS_REUSABLE;
+       } else if (options & PMAP_OPTIONS_SET_REUSABLE) {
+               pmap_phys_attributes[pai] |= PHYS_REUSABLE;
+       }
 
        UNLOCK_PVH(pai);
 
@@ -1581,9 +2108,11 @@ phys_attribute_test(
        int                     pai;
        pmap_t                  pmap;
        int                     attributes = 0;
+       boolean_t               is_ept;
 
        pmap_intr_assert();
        assert(pn != vm_page_fictitious_addr);
+       assert((bits & ~(PHYS_MODIFIED | PHYS_REFERENCED)) == 0);
        if (pn == vm_page_guard_addr)
                return 0;
 
@@ -1627,13 +2156,19 @@ phys_attribute_test(
                        vm_map_offset_t va;
 
                        pmap = pv_e->pmap;
-                       va = pv_e->va;
+                       is_ept = is_ept_pmap(pmap);
+                       va = PVE_VA(pv_e);
                        /*
                         * pick up modify and/or reference bits from mapping
                         */
 
                        pte = pmap_pte(pmap, va);
-                       attributes |= (int)(*pte & bits);
+                       if (!is_ept) {
+                               attributes |= (int)(*pte & bits);
+                       } else {
+                               attributes |= (int)(ept_refmod_to_physmap((*pte & (INTEL_EPT_REF | INTEL_EPT_MOD))) & (PHYS_MODIFIED | PHYS_REFERENCED));
+
+                       }
 
                        pv_e = (pv_hashed_entry_t)queue_next(&pv_e->qlink);
 
@@ -1664,7 +2199,8 @@ pmap_change_wiring(
        PMAP_LOCK(map);
 
        if ((pte = pmap_pte(map, vaddr)) == PT_ENTRY_NULL)
-               panic("pmap_change_wiring: pte missing");
+               panic("pmap_change_wiring(%p,0x%llx,%d): pte missing",
+                     map, vaddr, wired);
 
        if (wired && !iswired(*pte)) {
                /*
@@ -1672,7 +2208,7 @@ pmap_change_wiring(
                 */
                pmap_ledger_credit(map, task_ledgers.wired_mem, PAGE_SIZE);
                OSAddAtomic(+1,  &map->stats.wired_count);
-               pmap_update_pte(pte, 0, INTEL_PTE_WIRED);
+               pmap_update_pte(pte, 0, PTE_WIRED);
        }
        else if (!wired && iswired(*pte)) {
                /*
@@ -1681,7 +2217,7 @@ pmap_change_wiring(
                assert(map->stats.wired_count >= 1);
                OSAddAtomic(-1,  &map->stats.wired_count);
                pmap_ledger_debit(map, task_ledgers.wired_mem, PAGE_SIZE);
-               pmap_update_pte(pte, INTEL_PTE_WIRED, 0);
+               pmap_update_pte(pte, PTE_WIRED, 0);
        }
 
        PMAP_UNLOCK(map);
@@ -1742,28 +2278,40 @@ pmap_map_bd(
        return(virt);
 }
 
-void
-pmap_reusable(
+mach_vm_size_t
+pmap_query_resident(
        pmap_t          pmap,
        addr64_t        s64,
        addr64_t        e64,
-       boolean_t       reusable)
+       mach_vm_size_t  *compressed_bytes_p)
 {
        pt_entry_t     *pde;
        pt_entry_t     *spte, *epte;
        addr64_t        l64;
        uint64_t        deadline;
+       mach_vm_size_t  resident_bytes;
+       mach_vm_size_t  compressed_bytes;
+       boolean_t       is_ept;
 
        pmap_intr_assert();
 
-       if (pmap == PMAP_NULL || pmap == kernel_pmap || s64 == e64)
-               return;
+       if (pmap == PMAP_NULL || pmap == kernel_pmap || s64 == e64) {
+               if (compressed_bytes_p) {
+                       *compressed_bytes_p = 0;
+               }
+               return 0;
+       }
 
-       PMAP_TRACE(PMAP_CODE(PMAP__REUSABLE) | DBG_FUNC_START,
+       is_ept = is_ept_pmap(pmap);
+
+       PMAP_TRACE(PMAP_CODE(PMAP__QUERY_RESIDENT) | DBG_FUNC_START,
                   pmap,
                   (uint32_t) (s64 >> 32), s64,
                   (uint32_t) (e64 >> 32), e64);
 
+       resident_bytes = 0;
+       compressed_bytes = 0;
+
        PMAP_LOCK(pmap);
 
        deadline = rdtsc64() + max_preemption_latency_tsc;
@@ -1774,16 +2322,23 @@ pmap_reusable(
                        l64 = e64;
                pde = pmap_pde(pmap, s64);
 
-               if (pde && (*pde & INTEL_PTE_VALID)) {
-                       if (*pde & INTEL_PTE_PS) {
+               if (pde && (*pde & PTE_VALID_MASK(is_ept))) {
+                       if (*pde & PTE_PS) {
                                /* superpage: not supported */
                        } else {
                                spte = pmap_pte(pmap,
                                                (s64 & ~(pde_mapped_size - 1)));
                                spte = &spte[ptenum(s64)];
                                epte = &spte[intel_btop(l64 - s64)];
-                               pmap_reusable_range(pmap, s64, spte, epte,
-                                                   reusable);
+
+                               for (; spte < epte; spte++) {
+                                       if (pte_to_pa(*spte) != 0) {
+                                               resident_bytes += PAGE_SIZE;
+                                       } else if (*spte & PTE_COMPRESSED) {
+                                               compressed_bytes += PAGE_SIZE;
+                                       }
+                               }
+
                        }
                }
                s64 = l64;
@@ -1797,171 +2352,90 @@ pmap_reusable(
 
        PMAP_UNLOCK(pmap);
 
-       PMAP_TRACE(PMAP_CODE(PMAP__REUSABLE) | DBG_FUNC_END,
-                  pmap, reusable, 0, 0, 0);
-}
-
-void
-pmap_reusable_range(
-       pmap_t                  pmap,
-       vm_map_offset_t         start_vaddr,
-       pt_entry_t              *spte,
-       pt_entry_t              *epte,
-       boolean_t               reusable)
-{
-       pt_entry_t              *cpte;
-       int                     num_external, num_internal, num_reusable;
-       ppnum_t                 pai;
-       pmap_paddr_t            pa;
-       vm_map_offset_t         vaddr;
-
-       num_external = 0;
-       num_internal = 0;
-       num_reusable = 0;
-
-       for (cpte = spte, vaddr = start_vaddr;
-            cpte < epte;
-            cpte++, vaddr += PAGE_SIZE_64) {
-
-               pa = pte_to_pa(*cpte);
-               if (pa == 0)
-                       continue;
-
-               pai = pa_index(pa);
-
-               LOCK_PVH(pai);
-
-               pa = pte_to_pa(*cpte);
-               if (pa == 0) {
-                       UNLOCK_PVH(pai);
-                       continue;
-               }
-               if (reusable) {
-                       /* we want to set "reusable" */
-                       if (IS_REUSABLE_PAGE(pai)) {
-                               /* already reusable: no change */
-                       } else {
-                               pmap_phys_attributes[pai] |= PHYS_REUSABLE;
-                               /* one more "reusable" */
-                               num_reusable++;
-                               if (IS_INTERNAL_PAGE(pai)) {
-                                       /* one less "internal" */
-                                       num_internal--;
-                               } else {
-                                       /* one less "external" */
-                                       num_external--;
-                               }
-                       }
-               } else {
-                       /* we want to clear "reusable" */
-                       if (IS_REUSABLE_PAGE(pai)) {
-                               pmap_phys_attributes[pai] &= ~PHYS_REUSABLE;
-                               /* one less "reusable" */
-                               num_reusable--;
-                               if (IS_INTERNAL_PAGE(pai)) {
-                                       /* one more "internal" */
-                                       num_internal++;
-                               } else {
-                                       /* one more "external" */
-                                       num_external++;
-                               }
-                       } else {
-                               /* already not reusable: no change */
-                       }
-               }
-
-               UNLOCK_PVH(pai);
-
-       } /* for loop */
+       PMAP_TRACE(PMAP_CODE(PMAP__QUERY_RESIDENT) | DBG_FUNC_END,
+                  pmap, 0, 0, 0, 0);
 
-       /*
-        *      Update the counts
-        */
-       if (pmap != kernel_pmap) {
-               if (num_external) {
-                       OSAddAtomic(num_external, &pmap->stats.external);
-                       PMAP_STATS_PEAK(pmap->stats.external);
-               }
-               assert(pmap->stats.external >= 0);
-               if (num_internal) {
-                       OSAddAtomic(num_internal, &pmap->stats.internal);
-                       PMAP_STATS_PEAK(pmap->stats.internal);
-               }
-               assert(pmap->stats.internal >= 0);
-               if (num_reusable) {
-                       OSAddAtomic(num_reusable, &pmap->stats.reusable);
-                       PMAP_STATS_PEAK(pmap->stats.reusable);
-               }
-               assert(pmap->stats.reusable >= 0);
+       if (compressed_bytes_p) {
+               *compressed_bytes_p = compressed_bytes;
        }
-
-       return;
+       return resident_bytes;
 }
 
-unsigned int
-pmap_query_resident(
+kern_return_t
+pmap_query_page_info(
        pmap_t          pmap,
-       addr64_t        s64,
-       addr64_t        e64)
+       vm_map_offset_t va,
+       int             *disp_p)
 {
-       pt_entry_t     *pde;
-       pt_entry_t     *spte, *epte;
-       addr64_t        l64;
-       uint64_t        deadline;
-       unsigned int    result;
+       int             disp;
+       boolean_t       is_ept;
+       pmap_paddr_t    pa;
+       ppnum_t         pai;
+       pd_entry_t      *pde;
+       pt_entry_t      *pte;
 
        pmap_intr_assert();
+       if (pmap == PMAP_NULL || pmap == kernel_pmap) {
+               *disp_p = 0;
+               return KERN_INVALID_ARGUMENT;
+       }
 
-       if (pmap == PMAP_NULL || pmap == kernel_pmap || s64 == e64)
-               return 0;
-
-       PMAP_TRACE(PMAP_CODE(PMAP__QUERY_RESIDENT) | DBG_FUNC_START,
-                  pmap,
-                  (uint32_t) (s64 >> 32), s64,
-                  (uint32_t) (e64 >> 32), e64);
-
-       result = 0;
+       disp = 0;
+       is_ept = is_ept_pmap(pmap);
 
        PMAP_LOCK(pmap);
 
-       deadline = rdtsc64() + max_preemption_latency_tsc;
-
-       while (s64 < e64) {
-               l64 = (s64 + pde_mapped_size) & ~(pde_mapped_size - 1);
-               if (l64 > e64)
-                       l64 = e64;
-               pde = pmap_pde(pmap, s64);
-
-               if (pde && (*pde & INTEL_PTE_VALID)) {
-                       if (*pde & INTEL_PTE_PS) {
-                               /* superpage: not supported */
-                       } else {
-                               spte = pmap_pte(pmap,
-                                               (s64 & ~(pde_mapped_size - 1)));
-                               spte = &spte[ptenum(s64)];
-                               epte = &spte[intel_btop(l64 - s64)];
+       pde = pmap_pde(pmap, va);
+       if (!pde ||
+           !(*pde & PTE_VALID_MASK(is_ept)) ||
+           (*pde & PTE_PS)) {
+               goto done;
+       }
 
-                               for (; spte < epte; spte++) {
-                                       if (pte_to_pa(*spte) != 0) {
-                                               result++;
-                                       }
-                               }
+       pte = pmap_pte(pmap, va);
+       if (pte == PT_ENTRY_NULL) {
+               goto done;
+       }
 
+       pa = pte_to_pa(*pte);
+       if (pa == 0) {
+               if (PTE_IS_COMPRESSED(*pte)) {
+                       disp |= PMAP_QUERY_PAGE_COMPRESSED;
+                       if (*pte & PTE_COMPRESSED_ALT) {
+                               disp |= PMAP_QUERY_PAGE_COMPRESSED_ALTACCT;
                        }
                }
-               s64 = l64;
-
-               if (s64 < e64 && rdtsc64() >= deadline) {
-                       PMAP_UNLOCK(pmap);
-                       PMAP_LOCK(pmap);
-                       deadline = rdtsc64() + max_preemption_latency_tsc;
+       } else {
+               disp |= PMAP_QUERY_PAGE_PRESENT;
+               pai = pa_index(pa);
+               if (!IS_MANAGED_PAGE(pai)) {
+               } else if (pmap_pv_is_altacct(pmap, va, pai)) {
+                       assert(IS_INTERNAL_PAGE(pai));
+                       disp |= PMAP_QUERY_PAGE_INTERNAL;
+                       disp |= PMAP_QUERY_PAGE_ALTACCT;
+               } else if (IS_REUSABLE_PAGE(pai)) {
+                       disp |= PMAP_QUERY_PAGE_REUSABLE;
+               } else if (IS_INTERNAL_PAGE(pai)) {
+                       disp |= PMAP_QUERY_PAGE_INTERNAL;
                }
        }
 
+done:
        PMAP_UNLOCK(pmap);
+       *disp_p = disp;
+       return KERN_SUCCESS;
+}
 
-       PMAP_TRACE(PMAP_CODE(PMAP__QUERY_RESIDENT) | DBG_FUNC_END,
-                  pmap, 0, 0, 0, 0);
+#if DEBUG || DEVELOPMENT
+void
+kernel_pmap_lock(void)
+{
+    PMAP_LOCK(kernel_pmap);
+}
 
-       return result;
+void
+kernel_pmap_unlock(void)
+{
+    PMAP_UNLOCK(kernel_pmap);
 }
+#endif /* DEBUG || DEVELOPMENT */