]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/i386/pmap_x86_common.c
xnu-2050.48.11.tar.gz
[apple/xnu.git] / osfmk / i386 / pmap_x86_common.c
index 53c1996e1eb227a6ec5e11305eb93cfd5c92bdbc..f400bc280a62a2d1d565f882d9d6a0e6d0f94622 100644 (file)
  */
 #include <vm/pmap.h>
 #include <vm/vm_map.h>
+#include <kern/ledger.h>
 #include <i386/pmap_internal.h>
 
-
 void           pmap_remove_range(
                        pmap_t          pmap,
                        vm_map_offset_t va,
                        pt_entry_t      *spte,
                        pt_entry_t      *epte);
 
-pv_rooted_entry_t      pv_head_table;          /* array of entries, one per
-                                                * page */
-thread_call_t          mapping_adjust_call;
-static thread_call_data_t mapping_adjust_call_data;
-uint32_t               mappingrecurse = 0;
-
-pmap_pagetable_corruption_record_t pmap_pagetable_corruption_records[PMAP_PAGETABLE_CORRUPTION_MAX_LOG];
-uint32_t pmap_pagetable_corruption_incidents;
-uint64_t pmap_pagetable_corruption_last_abstime = (~(0ULL) >> 1);
-uint64_t pmap_pagetable_corruption_interval_abstime;
-thread_call_t  pmap_pagetable_corruption_log_call;
-static thread_call_data_t      pmap_pagetable_corruption_log_call_data;
-boolean_t pmap_pagetable_corruption_timeout = FALSE;
+uint32_t pmap_update_clear_pte_count;
 
 /*
  * The Intel platform can nest at the PDE level, so NBPDE (i.e. 2MB) at a time,
@@ -103,8 +91,8 @@ kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t va_start, addr64_t
                panic("pmap_nest: va_start(0x%llx) != nstart(0x%llx)\n", va_start, nstart);
 
        PMAP_TRACE(PMAP_CODE(PMAP__NEST) | DBG_FUNC_START,
-           (int) grand, (int) subord,
-           (int) (va_start>>32), (int) va_start, 0);
+       (uintptr_t) grand, (uintptr_t) subord,
+           (uintptr_t) (va_start>>32), (uintptr_t) va_start, 0);
 
        nvaddr = (vm_map_offset_t)nstart;
        num_pde = size >> PDESHIFT;
@@ -120,7 +108,7 @@ kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t va_start, addr64_t
 
                        while (0 == npde || ((*npde & INTEL_PTE_VALID) == 0)) {
                                PMAP_UNLOCK(subord);
-                               pmap_expand_pdpt(subord, nvaddr);
+                               pmap_expand_pdpt(subord, nvaddr, PMAP_EXPAND_OPTIONS_NONE);
                                PMAP_LOCK(subord);
                                npde = pmap64_pdpt(subord, nvaddr);
                        }
@@ -133,7 +121,7 @@ kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t va_start, addr64_t
 
                        while (0 == npde || ((*npde & INTEL_PTE_VALID) == 0)) {
                                PMAP_UNLOCK(subord);
-                               pmap_expand(subord, nvaddr);
+                               pmap_expand(subord, nvaddr, PMAP_EXPAND_OPTIONS_NONE);
                                PMAP_LOCK(subord);
                                npde = pmap_pde(subord, nvaddr);
                        }
@@ -159,7 +147,7 @@ kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t va_start, addr64_t
                        pde = pmap64_pdpt(grand, vaddr);
                        if (0 == pde) {
                                PMAP_UNLOCK(grand);
-                               pmap_expand_pml4(grand, vaddr);
+                               pmap_expand_pml4(grand, vaddr, PMAP_EXPAND_OPTIONS_NONE);
                                PMAP_LOCK(grand);
                                pde = pmap64_pdpt(grand, vaddr);
                        }
@@ -178,7 +166,7 @@ kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t va_start, addr64_t
                        pde = pmap_pde(grand, vaddr);
                        if ((0 == pde) && cpu_64bit) {
                                PMAP_UNLOCK(grand);
-                               pmap_expand_pdpt(grand, vaddr);
+                               pmap_expand_pdpt(grand, vaddr, PMAP_EXPAND_OPTIONS_NONE);
                                PMAP_LOCK(grand);
                                pde = pmap_pde(grand, vaddr);
                        }
@@ -216,8 +204,8 @@ kern_return_t pmap_unnest(pmap_t grand, addr64_t vaddr, uint64_t size) {
        uint64_t npdpt = PMAP_INVALID_PDPTNUM;
 
        PMAP_TRACE(PMAP_CODE(PMAP__UNNEST) | DBG_FUNC_START,
-           (int) grand, 
-           (int) (vaddr>>32), (int) vaddr, 0, 0);
+           (uintptr_t) grand, 
+           (uintptr_t) (vaddr>>32), (uintptr_t) vaddr, 0, 0);
 
        if ((size & (pmap_nesting_size_min-1)) ||
            (vaddr & (pmap_nesting_size_min-1))) {
@@ -337,6 +325,67 @@ pfp_exit:
         return ppn;
 }
 
+/*
+ * Update cache attributes for all extant managed mappings.
+ * Assumes PV for this page is locked, and that the page
+ * is managed.
+ */
+
+void
+pmap_update_cache_attributes_locked(ppnum_t pn, unsigned attributes) {
+       pv_rooted_entry_t       pv_h, pv_e;
+       pv_hashed_entry_t       pvh_e, nexth;
+       vm_map_offset_t vaddr;
+       pmap_t  pmap;
+       pt_entry_t      *ptep;
+       
+       assert(IS_MANAGED_PAGE(pn));
+
+       pv_h = pai_to_pvh(pn);
+       /* TODO: translate the PHYS_* bits to PTE bits, while they're
+        * currently identical, they may not remain so
+        * Potential optimization (here and in page_protect),
+        * parallel shootdowns, check for redundant
+        * attribute modifications.
+        */
+       
+       /*
+        * Alter attributes on all mappings
+        */
+       if (pv_h->pmap != PMAP_NULL) {
+               pv_e = pv_h;
+               pvh_e = (pv_hashed_entry_t)pv_e;
+
+               do {
+                       pmap = pv_e->pmap;
+                       vaddr = pv_e->va;
+                       ptep = pmap_pte(pmap, vaddr);
+               
+                       if (0 == ptep)
+                               panic("pmap_update_cache_attributes_locked: Missing PTE, pmap: %p, pn: 0x%x vaddr: 0x%llx kernel_pmap: %p", pmap, pn, vaddr, kernel_pmap);
+
+                       nexth = (pv_hashed_entry_t)queue_next(&pvh_e->qlink);
+                       pmap_update_pte(ptep, PHYS_CACHEABILITY_MASK, attributes);
+                       PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
+                       pvh_e = nexth;
+               } while ((pv_e = (pv_rooted_entry_t)nexth) != pv_h);
+       }
+}
+
+void x86_filter_TLB_coherency_interrupts(boolean_t dofilter) {
+       assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
+
+       if (dofilter) {
+               CPU_CR3_MARK_INACTIVE();
+       } else {
+               CPU_CR3_MARK_ACTIVE();
+               __asm__ volatile("mfence");
+               if (current_cpu_datap()->cpu_tlb_invalid)
+                       process_pmap_updates();
+       }
+}
+
+
 /*
  *     Insert the given physical page (p) at
  *     the specified virtual address (v) in the
@@ -349,18 +398,34 @@ pfp_exit:
  *     or lose information.  That is, this routine must actually
  *     insert this page into the given map NOW.
  */
+
 void
 pmap_enter(
        register pmap_t         pmap,
        vm_map_offset_t         vaddr,
        ppnum_t                 pn,
        vm_prot_t               prot,
+       vm_prot_t               fault_type,
        unsigned int            flags,
        boolean_t               wired)
+{
+       (void) pmap_enter_options(pmap, vaddr, pn, prot, fault_type, flags, wired, PMAP_EXPAND_OPTIONS_NONE);
+}
+
+kern_return_t
+pmap_enter_options(
+       register pmap_t         pmap,
+       vm_map_offset_t         vaddr,
+       ppnum_t                 pn,
+       vm_prot_t               prot,
+       __unused vm_prot_t      fault_type,
+       unsigned int            flags,
+       boolean_t               wired,
+       unsigned int            options)
 {
        pt_entry_t              *pte;
        pv_rooted_entry_t       pv_h;
-       int                     pai;
+       ppnum_t                 pai;
        pv_hashed_entry_t       pvh_e;
        pv_hashed_entry_t       pvh_new;
        pt_entry_t              template;
@@ -375,25 +440,35 @@ pmap_enter(
        vm_object_t             delpage_pm_obj = NULL;
        int                     delpage_pde_index = 0;
        pt_entry_t              old_pte;
+       kern_return_t           kr_expand;
 
        pmap_intr_assert();
-       assert(pn != vm_page_fictitious_addr);
 
        if (pmap == PMAP_NULL)
-               return;
+               return KERN_INVALID_ARGUMENT;
+
+       /* N.B. We can be supplied a zero page frame in the NOENTER case, it's an
+        * unused value for that scenario.
+        */
+       assert(pn != vm_page_fictitious_addr);
+
        if (pn == vm_page_guard_addr)
-               return;
+               return KERN_INVALID_ARGUMENT;
 
        PMAP_TRACE(PMAP_CODE(PMAP__ENTER) | DBG_FUNC_START,
-                  pmap,
-                  (uint32_t) (vaddr >> 32), (uint32_t) vaddr,
-                  pn, prot);
+           pmap,
+           (uint32_t) (vaddr >> 32), (uint32_t) vaddr,
+           pn, prot);
 
        if ((prot & VM_PROT_EXECUTE) || !nx_enabled || !pmap->nx_enabled)
                set_NX = FALSE;
        else
                set_NX = TRUE;
 
+       if (__improbable(set_NX && (pmap == kernel_pmap) && ((pmap_disable_kstack_nx && (flags & VM_MEM_STACK)) || (pmap_disable_kheap_nx && !(flags & VM_MEM_STACK))))) {
+               set_NX = FALSE;
+       }
+
        /*
         *      Must allocate a new pvlist entry while we're unlocked;
         *      zalloc may cause pageout (which will lock the pmap system).
@@ -417,7 +492,9 @@ Retry:
                while ((pte = pmap64_pde(pmap, vaddr)) == PD_ENTRY_NULL) {
                        /* need room for another pde entry */
                        PMAP_UNLOCK(pmap);
-                       pmap_expand_pdpt(pmap, vaddr);
+                       kr_expand = pmap_expand_pdpt(pmap, vaddr, options);
+                       if (kr_expand != KERN_SUCCESS)
+                               return kr_expand;
                        PMAP_LOCK(pmap);
                }
        } else {
@@ -427,10 +504,16 @@ Retry:
                         * going to grow pde level page(s)
                         */
                        PMAP_UNLOCK(pmap);
-                       pmap_expand(pmap, vaddr);
+                       kr_expand = pmap_expand(pmap, vaddr, options);
+                       if (kr_expand != KERN_SUCCESS)
+                               return kr_expand;
                        PMAP_LOCK(pmap);
                }
        }
+       if (options & PMAP_EXPAND_OPTIONS_NOENTER) {
+               PMAP_UNLOCK(pmap);
+               return KERN_SUCCESS;
+       }
 
        if (superpage && *pte && !(*pte & INTEL_PTE_PS)) {
                /*
@@ -444,7 +527,6 @@ Retry:
                *pte = 0;
        }
 
-
        old_pa = pte_to_pa(*pte);
        pai = pa_index(old_pa);
        old_pa_locked = FALSE;
@@ -469,12 +551,15 @@ Retry:
         *      at this address.
         */
        if (old_pa == pa) {
+               pt_entry_t old_attributes =
+                   *pte & ~(INTEL_PTE_REF | INTEL_PTE_MOD);
 
                /*
                 *      May be changing its wired attribute or protection
                 */
 
                template = pa_to_pte(pa) | INTEL_PTE_VALID;
+               template |= pmap_get_cache_attributes(pa_index(pa));
 
                if (VM_MEM_NOT_CACHEABLE ==
                    (flags & (VM_MEM_NOT_CACHEABLE | VM_WIMG_USE_DEFAULT))) {
@@ -492,27 +577,33 @@ Retry:
 
                if (wired) {
                        template |= INTEL_PTE_WIRED;
-                       if (!iswired(*pte))
-                               OSAddAtomic(+1,
-                                       &pmap->stats.wired_count);
+                       if (!iswired(old_attributes))  {
+                               OSAddAtomic(+1, &pmap->stats.wired_count);
+                               pmap_ledger_credit(pmap, task_ledgers.wired_mem, PAGE_SIZE);
+                       }
                } else {
-                       if (iswired(*pte)) {
+                       if (iswired(old_attributes)) {
                                assert(pmap->stats.wired_count >= 1);
-                               OSAddAtomic(-1,
-                                       &pmap->stats.wired_count);
+                               OSAddAtomic(-1, &pmap->stats.wired_count);
+                               pmap_ledger_debit(pmap, task_ledgers.wired_mem, PAGE_SIZE);
                        }
                }
                if (superpage)          /* this path can not be used */
                        template |= INTEL_PTE_PS;       /* to change the page size! */
+               /* Determine delta, PV locked */
+               need_tlbflush =
+                   ((old_attributes ^ template) != INTEL_PTE_WIRED);
 
                /* store modified PTE and preserve RC bits */
-               pmap_update_pte(pte, *pte,
-                       template | (*pte & (INTEL_PTE_REF | INTEL_PTE_MOD)));
+               pt_entry_t npte, opte;;
+               do {
+                       opte = *pte;
+                       npte = template | (opte & (INTEL_PTE_REF | INTEL_PTE_MOD));
+               } while (!pmap_cmpx_pte(pte, opte, npte));
                if (old_pa_locked) {
                        UNLOCK_PVH(pai);
                        old_pa_locked = FALSE;
                }
-               need_tlbflush = TRUE;
                goto Done;
        }
 
@@ -538,7 +629,7 @@ Retry:
                 */
 
                /* invalidate the PTE */
-               pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_VALID));
+               pmap_update_pte(pte, INTEL_PTE_VALID, 0);
                /* propagate invalidate everywhere */
                PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
                /* remember reference and change */
@@ -548,22 +639,15 @@ Retry:
                pmap_store_pte(pte, 0);
 
                if (IS_MANAGED_PAGE(pai)) {
-#if TESTING
-                       if (pmap->stats.resident_count < 1)
-                               panic("pmap_enter: resident_count");
-#endif
+                       pmap_assert(old_pa_locked == TRUE);
+                       pmap_ledger_debit(pmap, task_ledgers.phys_mem, PAGE_SIZE);
                        assert(pmap->stats.resident_count >= 1);
-                       OSAddAtomic(-1,
-                               &pmap->stats.resident_count);
-
+                       OSAddAtomic(-1, &pmap->stats.resident_count);
                        if (iswired(*pte)) {
-#if TESTING
-                               if (pmap->stats.wired_count < 1)
-                                       panic("pmap_enter: wired_count");
-#endif
                                assert(pmap->stats.wired_count >= 1);
-                               OSAddAtomic(-1,
-                                       &pmap->stats.wired_count);
+                               OSAddAtomic(-1, &pmap->stats.wired_count);
+                               pmap_ledger_debit(pmap, task_ledgers.wired_mem,
+                                   PAGE_SIZE);
                        }
                        pmap_phys_attributes[pai] |= oattr;
 
@@ -584,8 +668,8 @@ Retry:
 
                        if (iswired(*pte)) {
                                assert(pmap->stats.wired_count >= 1);
-                               OSAddAtomic(-1,
-                                       &pmap->stats.wired_count);
+                               OSAddAtomic(-1, &pmap->stats.wired_count);
+                               pmap_ledger_debit(pmap, task_ledgers.wired_mem, PAGE_SIZE);
                        }
                }
        }
@@ -624,7 +708,7 @@ Retry:
                                pvh_e = pvh_new;
                                pvh_new = PV_HASHED_ENTRY_NULL;
                        } else if (PV_HASHED_ENTRY_NULL == pvh_e) {
-                               PV_HASHED_ALLOC(pvh_e);
+                               PV_HASHED_ALLOC(&pvh_e);
                                if (PV_HASHED_ENTRY_NULL == pvh_e) {
                                        /*
                                         * the pv list is empty. if we are on
@@ -636,10 +720,11 @@ Retry:
                                         * us.
                                         */
                                        if (kernel_pmap == pmap) {
-                                               PV_HASHED_KERN_ALLOC(pvh_e);
+                                               PV_HASHED_KERN_ALLOC(&pvh_e);
                                        } else {
                                                UNLOCK_PVH(pai);
                                                PMAP_UNLOCK(pmap);
+                                               pmap_pv_throttle(pmap);
                                                pvh_new = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
                                                goto Retry;
                                        }
@@ -664,10 +749,17 @@ Retry:
                 * only count the mapping
                 * for 'managed memory'
                 */
-               OSAddAtomic(+1,  & pmap->stats.resident_count);
+               pmap_ledger_credit(pmap, task_ledgers.phys_mem, PAGE_SIZE);
+               OSAddAtomic(+1,  &pmap->stats.resident_count);
                if (pmap->stats.resident_count > pmap->stats.resident_max) {
                        pmap->stats.resident_max = pmap->stats.resident_count;
                }
+       } else if (last_managed_page == 0) {
+               /* Account for early mappings created before "managed pages"
+                * are determined. Consider consulting the available DRAM map.
+                */
+               pmap_ledger_credit(pmap, task_ledgers.phys_mem, PAGE_SIZE);
+               OSAddAtomic(+1,  &pmap->stats.resident_count);
        }
        /*
         * Step 3) Enter the mapping.
@@ -676,7 +768,13 @@ Retry:
         *      only the pfn changes.
         */
        template = pa_to_pte(pa) | INTEL_PTE_VALID;
+       /*
+        * DRK: It may be worth asserting on cache attribute flags that diverge
+        * from the existing physical page attributes.
+        */
 
+       template |= pmap_get_cache_attributes(pa_index(pa));
+       
        if (flags & VM_MEM_NOT_CACHEABLE) {
                if (!(flags & VM_MEM_GUARDED))
                        template |= INTEL_PTE_PTA;
@@ -691,6 +789,7 @@ Retry:
        if (wired) {
                template |= INTEL_PTE_WIRED;
                OSAddAtomic(+1,  & pmap->stats.wired_count);
+               pmap_ledger_credit(pmap, task_ledgers.wired_mem, PAGE_SIZE);
        }
        if (superpage)
                template |= INTEL_PTE_PS;
@@ -723,12 +822,14 @@ Done:
                m = vm_page_lookup(delpage_pm_obj, delpage_pde_index);
                if (m == VM_PAGE_NULL)
                    panic("pmap_enter: pte page not in object");
+               vm_object_unlock(delpage_pm_obj);
                VM_PAGE_FREE(m);
                OSAddAtomic(-1,  &inuse_ptepages_count);
-               vm_object_unlock(delpage_pm_obj);
+               PMAP_ZINFO_PFREE(pmap, PAGE_SIZE);
        }
 
        PMAP_TRACE(PMAP_CODE(PMAP__ENTER) | DBG_FUNC_END, 0, 0, 0, 0, 0);
+       return KERN_SUCCESS;
 }
 
 /*
@@ -756,7 +857,7 @@ pmap_remove_range(
        pv_hashed_entry_t       pvh_e;
        int                     pvh_cnt = 0;
        int                     num_removed, num_unwired, num_found, num_invalid;
-       int                     pai;
+       ppnum_t                 pai;
        pmap_paddr_t            pa;
        vm_map_offset_t         vaddr;
 
@@ -805,8 +906,8 @@ pmap_remove_range(
                if ((p & INTEL_PTE_VALID) == 0)
                        num_invalid++;
 
-               /* invalidate the PTE */ 
-               pmap_update_pte(cpte, *cpte, (*cpte & ~INTEL_PTE_VALID));
+               /* invalidate the PTE */
+               pmap_update_pte(cpte, INTEL_PTE_VALID, 0);
        }
 
        if (num_found == 0) {
@@ -877,6 +978,7 @@ update_counts:
        if (pmap->stats.resident_count < num_removed)
                panic("pmap_remove_range: resident_count");
 #endif
+       pmap_ledger_debit(pmap, task_ledgers.phys_mem, machine_ptob(num_removed));
        assert(pmap->stats.resident_count >= num_removed);
        OSAddAtomic(-num_removed,  &pmap->stats.resident_count);
 
@@ -886,6 +988,7 @@ update_counts:
 #endif
        assert(pmap->stats.wired_count >= num_unwired);
        OSAddAtomic(-num_unwired,  &pmap->stats.wired_count);
+       pmap_ledger_debit(pmap, task_ledgers.wired_mem, machine_ptob(num_unwired));
 
        return;
 }
@@ -1071,10 +1174,9 @@ pmap_page_protect(
                vaddr = pv_e->va;
                pte = pmap_pte(pmap, vaddr);
 
-#if    DEBUG
-               if (pa_index(pte_to_pa(*pte)) != pn)
-                       panic("pmap_page_protect: PTE mismatch, pn: 0x%x, pmap: %p, vaddr: 0x%llx, pte: 0x%llx", pn, pmap, vaddr, *pte);
-#endif
+               pmap_assert2((pa_index(pte_to_pa(*pte)) == pn),
+                   "pmap_page_protect: PTE mismatch, pn: 0x%x, pmap: %p, vaddr: 0x%llx, pte: 0x%llx", pn, pmap, vaddr, *pte);
+
                if (0 == pte) {
                        panic("pmap_page_protect() "
                                "pmap=%p pn=0x%x vaddr=0x%llx\n",
@@ -1084,25 +1186,31 @@ pmap_page_protect(
 
                /*
                 * Remove the mapping if new protection is NONE
-                * or if write-protecting a kernel mapping.
                 */
-               if (remove || pmap == kernel_pmap) {
+               if (remove) {
                        /*
                         * Remove the mapping, collecting dirty bits.
                         */
-                       pmap_update_pte(pte, *pte, *pte & ~INTEL_PTE_VALID);
+                       pmap_update_pte(pte, INTEL_PTE_VALID, 0);
+
+                       /* Remove per-pmap wired count */
+                       if (iswired(*pte)) {
+                               OSAddAtomic(-1, &pmap->stats.wired_count);
+                               pmap_ledger_debit(pmap, task_ledgers.wired_mem, PAGE_SIZE);
+                       }
+
                        PMAP_UPDATE_TLBS(pmap, vaddr, vaddr+PAGE_SIZE);
                        pmap_phys_attributes[pai] |=
-                               *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
+                           *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
                        pmap_store_pte(pte, 0);
 
 #if TESTING
                        if (pmap->stats.resident_count < 1)
                                panic("pmap_page_protect: resident_count");
 #endif
+                       pmap_ledger_debit(pmap, task_ledgers.phys_mem, PAGE_SIZE);
                        assert(pmap->stats.resident_count >= 1);
                        OSAddAtomic(-1,  &pmap->stats.resident_count);
-
                        /*
                         * Deal with the pv_rooted_entry.
                         */
@@ -1126,9 +1234,11 @@ pmap_page_protect(
                        }
                } else {
                        /*
-                        * Write-protect.
+                        * Write-protect, after opportunistic refmod collect
                         */
-                       pmap_update_pte(pte, *pte, *pte & ~INTEL_PTE_WRITE);
+                       pmap_phys_attributes[pai] |=
+                           *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
+                       pmap_update_pte(pte, INTEL_PTE_WRITE, 0);
                        PMAP_UPDATE_TLBS(pmap, vaddr, vaddr+PAGE_SIZE);
                }
                pvh_e = nexth;
@@ -1163,116 +1273,262 @@ done:
                   0, 0, 0, 0, 0);
 }
 
-__private_extern__ void
-pmap_pagetable_corruption_msg_log(int (*log_func)(const char * fmt, ...)__printflike(1,2)) {
-       if (pmap_pagetable_corruption_incidents > 0) {
-               int i, e = MIN(pmap_pagetable_corruption_incidents, PMAP_PAGETABLE_CORRUPTION_MAX_LOG);
-               (*log_func)("%u pagetable corruption incident(s) detected, timeout: %u\n", pmap_pagetable_corruption_incidents, pmap_pagetable_corruption_timeout);
-               for (i = 0; i < e; i++) {
-                       (*log_func)("Incident 0x%x, reason: 0x%x, action: 0x%x, time: 0x%llx\n", pmap_pagetable_corruption_records[i].incident,  pmap_pagetable_corruption_records[i].reason, pmap_pagetable_corruption_records[i].action, pmap_pagetable_corruption_records[i].abstime);
-               }
+/*
+ *     Clear specified attribute bits.
+ */
+void
+phys_attribute_clear(
+       ppnum_t         pn,
+       int             bits)
+{
+       pv_rooted_entry_t       pv_h;
+       pv_hashed_entry_t       pv_e;
+       pt_entry_t              *pte;
+       int                     pai;
+       pmap_t                  pmap;
+       char                    attributes = 0;
+       
+       pmap_intr_assert();
+       assert(pn != vm_page_fictitious_addr);
+       if (pn == vm_page_guard_addr)
+               return;
+
+       pai = ppn_to_pai(pn);
+
+       if (!IS_MANAGED_PAGE(pai)) {
+               /*
+                *      Not a managed page.
+                */
+               return;
+       }
+
+       PMAP_TRACE(PMAP_CODE(PMAP__ATTRIBUTE_CLEAR) | DBG_FUNC_START,
+                  pn, bits, 0, 0, 0);
+
+       pv_h = pai_to_pvh(pai);
+
+       LOCK_PVH(pai);
+
+       /*
+        * Walk down PV list, clearing all modify or reference bits.
+        * We do not have to lock the pv_list because we have
+        * the per-pmap lock
+        */
+       if (pv_h->pmap != PMAP_NULL) {
+               /*
+                * There are some mappings.
+                */
+
+               pv_e = (pv_hashed_entry_t)pv_h;
+
+               do {
+                       vm_map_offset_t va;
+
+                       pmap = pv_e->pmap;
+                       va = pv_e->va;
+
+                        /*
+                         * Clear modify and/or reference bits.
+                         */
+                       pte = pmap_pte(pmap, va);
+                       attributes |= *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
+                       pmap_update_pte(pte, bits, 0);
+                       /* Ensure all processors using this translation
+                        * invalidate this TLB entry. The invalidation *must*
+                        * follow the PTE update, to ensure that the TLB
+                        * shadow of the 'D' bit (in particular) is
+                        * synchronized with the updated PTE.
+                        */
+                       PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
+
+                       pv_e = (pv_hashed_entry_t)queue_next(&pv_e->qlink);
+
+               } while (pv_e != (pv_hashed_entry_t)pv_h);
        }
+       /* Opportunistic refmod collection, annulled
+        * if both REF and MOD are being cleared.
+        */
+
+       pmap_phys_attributes[pai] |= attributes;
+       pmap_phys_attributes[pai] &= (~bits);
+
+       UNLOCK_PVH(pai);
+
+       PMAP_TRACE(PMAP_CODE(PMAP__ATTRIBUTE_CLEAR) | DBG_FUNC_END,
+                  0, 0, 0, 0, 0);
 }
 
-void
-mapping_free_prime(void)
+/*
+ *     Check specified attribute bits.
+ */
+int
+phys_attribute_test(
+       ppnum_t         pn,
+       int             bits)
 {
-       int                     i;
-       pv_hashed_entry_t       pvh_e;
-       pv_hashed_entry_t       pvh_eh;
-       pv_hashed_entry_t       pvh_et;
-       int                     pv_cnt;
+       pv_rooted_entry_t       pv_h;
+       pv_hashed_entry_t       pv_e;
+       pt_entry_t              *pte;
+       int                     pai;
+       pmap_t                  pmap;
+       int                     attributes = 0;
 
-       pv_cnt = 0;
-       pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
-       for (i = 0; i < (5 * PV_HASHED_ALLOC_CHUNK); i++) {
-               pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
+       pmap_intr_assert();
+       assert(pn != vm_page_fictitious_addr);
+       if (pn == vm_page_guard_addr)
+               return 0;
 
-               pvh_e->qlink.next = (queue_entry_t)pvh_eh;
-               pvh_eh = pvh_e;
+       pai = ppn_to_pai(pn);
 
-               if (pvh_et == PV_HASHED_ENTRY_NULL)
-                       pvh_et = pvh_e;
-               pv_cnt++;
+       if (!IS_MANAGED_PAGE(pai)) {
+               /*
+                *      Not a managed page.
+                */
+               return 0;
        }
-       PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
 
-       pv_cnt = 0;
-       pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
-       for (i = 0; i < PV_HASHED_KERN_ALLOC_CHUNK; i++) {
-               pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
+       /*
+        * Fast check...  if bits already collected
+        * no need to take any locks...
+        * if not set, we need to recheck after taking
+        * the lock in case they got pulled in while
+        * we were waiting for the lock
+        */
+       if ((pmap_phys_attributes[pai] & bits) == bits)
+               return bits;
 
-               pvh_e->qlink.next = (queue_entry_t)pvh_eh;
-               pvh_eh = pvh_e;
+       pv_h = pai_to_pvh(pai);
 
-               if (pvh_et == PV_HASHED_ENTRY_NULL)
-                       pvh_et = pvh_e;
-               pv_cnt++;
-       }
-       PV_HASHED_KERN_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
+       LOCK_PVH(pai);
+
+       attributes = pmap_phys_attributes[pai] & bits;
 
-}
 
-static inline void
-pmap_pagetable_corruption_log_setup(void) {
-       if (pmap_pagetable_corruption_log_call == NULL) {
-               nanotime_to_absolutetime(PMAP_PAGETABLE_CORRUPTION_INTERVAL, 0, &pmap_pagetable_corruption_interval_abstime);
-               thread_call_setup(&pmap_pagetable_corruption_log_call_data,
-                   (thread_call_func_t) pmap_pagetable_corruption_msg_log,
-                   (thread_call_param_t) &printf);
-               pmap_pagetable_corruption_log_call = &pmap_pagetable_corruption_log_call_data;
+       /*
+        * Walk down PV list, checking the mappings until we
+        * reach the end or we've found the desired attributes.
+        */
+       if (attributes != bits &&
+           pv_h->pmap != PMAP_NULL) {
+               /*
+                * There are some mappings.
+                */
+               pv_e = (pv_hashed_entry_t)pv_h;
+               do {
+                       vm_map_offset_t va;
+
+                       pmap = pv_e->pmap;
+                       va = pv_e->va;
+                       /*
+                        * pick up modify and/or reference bits from mapping
+                        */
+
+                       pte = pmap_pte(pmap, va);
+                       attributes |= (int)(*pte & bits);
+
+                       pv_e = (pv_hashed_entry_t)queue_next(&pv_e->qlink);
+
+               } while ((attributes != bits) &&
+                        (pv_e != (pv_hashed_entry_t)pv_h));
        }
+       pmap_phys_attributes[pai] |= attributes;
+
+       UNLOCK_PVH(pai);
+       return (attributes);
 }
 
+/*
+ *     Routine:        pmap_change_wiring
+ *     Function:       Change the wiring attribute for a map/virtual-address
+ *                     pair.
+ *     In/out conditions:
+ *                     The mapping must already exist in the pmap.
+ */
 void
-mapping_adjust(void)
+pmap_change_wiring(
+       pmap_t          map,
+       vm_map_offset_t vaddr,
+       boolean_t       wired)
 {
-       pv_hashed_entry_t       pvh_e;
-       pv_hashed_entry_t       pvh_eh;
-       pv_hashed_entry_t       pvh_et;
-       int                     pv_cnt;
-       int                     i;
-
-       if (mapping_adjust_call == NULL) {
-               thread_call_setup(&mapping_adjust_call_data,
-                                 (thread_call_func_t) mapping_adjust,
-                                 (thread_call_param_t) NULL);
-               mapping_adjust_call = &mapping_adjust_call_data;
-       }
+       pt_entry_t      *pte;
 
-       pmap_pagetable_corruption_log_setup();
+       PMAP_LOCK(map);
 
-       pv_cnt = 0;
-       pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
-       if (pv_hashed_kern_free_count < PV_HASHED_KERN_LOW_WATER_MARK) {
-               for (i = 0; i < PV_HASHED_KERN_ALLOC_CHUNK; i++) {
-                       pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
+       if ((pte = pmap_pte(map, vaddr)) == PT_ENTRY_NULL)
+               panic("pmap_change_wiring: pte missing");
 
-                       pvh_e->qlink.next = (queue_entry_t)pvh_eh;
-                       pvh_eh = pvh_e;
+       if (wired && !iswired(*pte)) {
+               /*
+                * wiring down mapping
+                */
+               pmap_ledger_credit(map, task_ledgers.wired_mem, PAGE_SIZE);
+               OSAddAtomic(+1,  &map->stats.wired_count);
+               pmap_update_pte(pte, 0, INTEL_PTE_WIRED);
+       }
+       else if (!wired && iswired(*pte)) {
+               /*
+                * unwiring mapping
+                */
+               assert(map->stats.wired_count >= 1);
+               OSAddAtomic(-1,  &map->stats.wired_count);
+               pmap_ledger_debit(map, task_ledgers.wired_mem, PAGE_SIZE);
+               pmap_update_pte(pte, INTEL_PTE_WIRED, 0);
+       }
 
-                       if (pvh_et == PV_HASHED_ENTRY_NULL)
-                               pvh_et = pvh_e;
-                       pv_cnt++;
-               }
-               PV_HASHED_KERN_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
+       PMAP_UNLOCK(map);
+}
+
+/*
+ *     "Backdoor" direct map routine for early mappings.
+ *     Useful for mapping memory outside the range
+ *      Sets A, D and NC if requested
+ */
+
+vm_offset_t
+pmap_map_bd(
+       vm_offset_t     virt,
+       vm_map_offset_t start_addr,
+       vm_map_offset_t end_addr,
+       vm_prot_t       prot,
+       unsigned int    flags)
+{
+       pt_entry_t      template;
+       pt_entry_t      *pte;
+       spl_t           spl;
+       vm_offset_t     base = virt;
+       template = pa_to_pte(start_addr)
+               | INTEL_PTE_REF
+               | INTEL_PTE_MOD
+               | INTEL_PTE_WIRED
+               | INTEL_PTE_VALID;
+
+       if ((flags & (VM_MEM_NOT_CACHEABLE | VM_WIMG_USE_DEFAULT)) == VM_MEM_NOT_CACHEABLE) {
+               template |= INTEL_PTE_NCACHE;
+               if (!(flags & (VM_MEM_GUARDED)))
+                       template |= INTEL_PTE_PTA;
        }
 
-       pv_cnt = 0;
-       pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
-       if (pv_hashed_free_count < PV_HASHED_LOW_WATER_MARK) {
-               for (i = 0; i < PV_HASHED_ALLOC_CHUNK; i++) {
-                       pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
+#if    defined(__x86_64__)
+       if ((prot & VM_PROT_EXECUTE) == 0)
+               template |= INTEL_PTE_NX;
+#endif
 
-                       pvh_e->qlink.next = (queue_entry_t)pvh_eh;
-                       pvh_eh = pvh_e;
+       if (prot & VM_PROT_WRITE)
+               template |= INTEL_PTE_WRITE;
 
-                       if (pvh_et == PV_HASHED_ENTRY_NULL)
-                               pvh_et = pvh_e;
-                       pv_cnt++;
+       while (start_addr < end_addr) {
+               spl = splhigh();
+               pte = pmap_pte(kernel_pmap, (vm_map_offset_t)virt);
+               if (pte == PT_ENTRY_NULL) {
+                       panic("pmap_map_bd: Invalid kernel address\n");
                }
-               PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
+               pmap_store_pte(pte, template);
+               splx(spl);
+               pte_increment_pa(template);
+               virt += PAGE_SIZE;
+               start_addr += PAGE_SIZE;
        }
-       mappingrecurse = 0;
+       flush_tlb_raw();
+       PMAP_UPDATE_TLBS(kernel_pmap, base, base + end_addr - start_addr);
+       return(virt);
 }
-