-
-/*
- * Insert the given physical page (p) at
- * the specified virtual address (v) in the
- * target physical map with the protection requested.
- *
- * If specified, the page will be wired down, meaning
- * that the related pte cannot be reclaimed.
- *
- * NB: This is the only routine which MAY NOT lazy-evaluate
- * or lose information. That is, this routine must actually
- * insert this page into the given map NOW.
- */
-void
-pmap_enter(
- register pmap_t pmap,
- vm_map_offset_t vaddr,
- ppnum_t pn,
- vm_prot_t prot,
- unsigned int flags,
- boolean_t wired)
-{
- pt_entry_t *pte;
- pv_rooted_entry_t pv_h;
- int pai;
- pv_hashed_entry_t pvh_e;
- pv_hashed_entry_t pvh_new;
- pt_entry_t template;
- pmap_paddr_t old_pa;
- pmap_paddr_t pa = (pmap_paddr_t) i386_ptob(pn);
- boolean_t need_tlbflush = FALSE;
- boolean_t set_NX;
- char oattr;
- boolean_t old_pa_locked;
- boolean_t superpage = flags & VM_MEM_SUPERPAGE;
- vm_object_t delpage_pm_obj = NULL;
- int delpage_pde_index = 0;
-
-
- pmap_intr_assert();
- assert(pn != vm_page_fictitious_addr);
- if (pmap_debug)
- kprintf("pmap_enter(%p,%llu,%u)\n", pmap, vaddr, pn);
- if (pmap == PMAP_NULL)
- return;
- if (pn == vm_page_guard_addr)
- return;
-
- PMAP_TRACE(PMAP_CODE(PMAP__ENTER) | DBG_FUNC_START,
- pmap,
- (uint32_t) (vaddr >> 32), (uint32_t) vaddr,
- pn, prot);
-
- if ((prot & VM_PROT_EXECUTE) || !nx_enabled || !pmap->nx_enabled)
- set_NX = FALSE;
- else
- set_NX = TRUE;
-
- /*
- * Must allocate a new pvlist entry while we're unlocked;
- * zalloc may cause pageout (which will lock the pmap system).
- * If we determine we need a pvlist entry, we will unlock
- * and allocate one. Then we will retry, throughing away
- * the allocated entry later (if we no longer need it).
- */
-
- pvh_new = PV_HASHED_ENTRY_NULL;
-Retry:
- pvh_e = PV_HASHED_ENTRY_NULL;
-
- PMAP_LOCK(pmap);
-
- /*
- * Expand pmap to include this pte. Assume that
- * pmap is always expanded to include enough hardware
- * pages to map one VM page.
- */
- if(superpage) {
- while ((pte = pmap64_pde(pmap, vaddr)) == PD_ENTRY_NULL) {
- /* need room for another pde entry */
- PMAP_UNLOCK(pmap);
- pmap_expand_pdpt(pmap, vaddr);
- PMAP_LOCK(pmap);
- }
- } else {
- while ((pte = pmap_pte(pmap, vaddr)) == PT_ENTRY_NULL) {
- /*
- * Must unlock to expand the pmap
- * going to grow pde level page(s)
- */
- PMAP_UNLOCK(pmap);
- pmap_expand(pmap, vaddr);
- PMAP_LOCK(pmap);
- }
- }
-
- if (superpage && *pte && !(*pte & INTEL_PTE_PS)) {
- /*
- * There is still an empty page table mapped that
- * was used for a previous base page mapping.
- * Remember the PDE and the PDE index, so that we
- * can free the page at the end of this function.
- */
- delpage_pde_index = (int)pdeidx(pmap, vaddr);
- delpage_pm_obj = pmap->pm_obj;
- *pte = 0;
- }
-
- old_pa = pte_to_pa(*pte);
- pai = pa_index(old_pa);
- old_pa_locked = FALSE;
-
- /*
- * if we have a previous managed page, lock the pv entry now. after
- * we lock it, check to see if someone beat us to the lock and if so
- * drop the lock
- */
- if ((0 != old_pa) && IS_MANAGED_PAGE(pai)) {
- LOCK_PVH(pai);
- old_pa_locked = TRUE;
- old_pa = pte_to_pa(*pte);
- if (0 == old_pa) {
- UNLOCK_PVH(pai); /* another path beat us to it */
- old_pa_locked = FALSE;
- }
- }
-
- /*
- * Special case if the incoming physical page is already mapped
- * at this address.
- */
- if (old_pa == pa) {
-
- /*
- * May be changing its wired attribute or protection
- */
-
- template = pa_to_pte(pa) | INTEL_PTE_VALID;
-
- if (VM_MEM_NOT_CACHEABLE ==
- (flags & (VM_MEM_NOT_CACHEABLE | VM_WIMG_USE_DEFAULT))) {
- if (!(flags & VM_MEM_GUARDED))
- template |= INTEL_PTE_PTA;
- template |= INTEL_PTE_NCACHE;
- }
- if (pmap != kernel_pmap)
- template |= INTEL_PTE_USER;
- if (prot & VM_PROT_WRITE)
- template |= INTEL_PTE_WRITE;
-
- if (set_NX)
- template |= INTEL_PTE_NX;
-
- if (wired) {
- template |= INTEL_PTE_WIRED;
- if (!iswired(*pte))
- OSAddAtomic(+1,
- &pmap->stats.wired_count);
- } else {
- if (iswired(*pte)) {
- assert(pmap->stats.wired_count >= 1);
- OSAddAtomic(-1,
- &pmap->stats.wired_count);
- }
- }
- if (superpage) /* this path can not be used */
- template |= INTEL_PTE_PS; /* to change the page size! */
-
- /* store modified PTE and preserve RC bits */
- pmap_update_pte(pte, *pte,
- template | (*pte & (INTEL_PTE_REF | INTEL_PTE_MOD)));
- if (old_pa_locked) {
- UNLOCK_PVH(pai);
- old_pa_locked = FALSE;
- }
- need_tlbflush = TRUE;
- goto Done;
- }
-
- /*
- * Outline of code from here:
- * 1) If va was mapped, update TLBs, remove the mapping
- * and remove old pvlist entry.
- * 2) Add pvlist entry for new mapping
- * 3) Enter new mapping.
- *
- * If the old physical page is not managed step 1) is skipped
- * (except for updating the TLBs), and the mapping is
- * overwritten at step 3). If the new physical page is not
- * managed, step 2) is skipped.
- */
-
- if (old_pa != (pmap_paddr_t) 0) {
-
- /*
- * Don't do anything to pages outside valid memory here.
- * Instead convince the code that enters a new mapping
- * to overwrite the old one.
- */
-
- /* invalidate the PTE */
- pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_VALID));
- /* propagate invalidate everywhere */
- PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
- /* remember reference and change */
- oattr = (char) (*pte & (PHYS_MODIFIED | PHYS_REFERENCED));
- /* completely invalidate the PTE */
- pmap_store_pte(pte, 0);
-
- if (IS_MANAGED_PAGE(pai)) {
-#if TESTING
- if (pmap->stats.resident_count < 1)
- panic("pmap_enter: resident_count");
-#endif
- assert(pmap->stats.resident_count >= 1);
- OSAddAtomic(-1,
- &pmap->stats.resident_count);
-
- if (iswired(*pte)) {
-#if TESTING
- if (pmap->stats.wired_count < 1)
- panic("pmap_enter: wired_count");
-#endif
- assert(pmap->stats.wired_count >= 1);
- OSAddAtomic(-1,
- &pmap->stats.wired_count);
- }
- pmap_phys_attributes[pai] |= oattr;
-
- /*
- * Remove the mapping from the pvlist for
- * this physical page.
- * We'll end up with either a rooted pv or a
- * hashed pv
- */
- pvh_e = pmap_pv_remove(pmap, vaddr, (ppnum_t) pai);
-
- } else {
-
- /*
- * old_pa is not managed.
- * Do removal part of accounting.
- */
-
- if (iswired(*pte)) {
- assert(pmap->stats.wired_count >= 1);
- OSAddAtomic(-1,
- &pmap->stats.wired_count);
- }
- }
- }
-
- /*
- * if we had a previously managed paged locked, unlock it now
- */
- if (old_pa_locked) {
- UNLOCK_PVH(pai);
- old_pa_locked = FALSE;
- }
-
- pai = pa_index(pa); /* now working with new incoming phys page */
- if (IS_MANAGED_PAGE(pai)) {
-
- /*
- * Step 2) Enter the mapping in the PV list for this
- * physical page.
- */
- pv_h = pai_to_pvh(pai);
-
- LOCK_PVH(pai);
-
- if (pv_h->pmap == PMAP_NULL) {
- /*
- * No mappings yet, use rooted pv
- */
- pv_h->va = vaddr;
- pv_h->pmap = pmap;
- queue_init(&pv_h->qlink);
- } else {
- /*
- * Add new pv_hashed_entry after header.
- */
- if ((PV_HASHED_ENTRY_NULL == pvh_e) && pvh_new) {
- pvh_e = pvh_new;
- pvh_new = PV_HASHED_ENTRY_NULL;
- } else if (PV_HASHED_ENTRY_NULL == pvh_e) {
- PV_HASHED_ALLOC(pvh_e);
- if (PV_HASHED_ENTRY_NULL == pvh_e) {
- /*
- * the pv list is empty. if we are on
- * the kernel pmap we'll use one of
- * the special private kernel pv_e's,
- * else, we need to unlock
- * everything, zalloc a pv_e, and
- * restart bringing in the pv_e with
- * us.
- */
- if (kernel_pmap == pmap) {
- PV_HASHED_KERN_ALLOC(pvh_e);
- } else {
- UNLOCK_PVH(pai);
- PMAP_UNLOCK(pmap);
- pvh_new = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
- goto Retry;
- }
- }
- }
- if (PV_HASHED_ENTRY_NULL == pvh_e)
- panic("pvh_e exhaustion");
-
- pvh_e->va = vaddr;
- pvh_e->pmap = pmap;
- pvh_e->ppn = pn;
- pv_hash_add(pvh_e, pv_h);
-
- /*
- * Remember that we used the pvlist entry.
- */
- pvh_e = PV_HASHED_ENTRY_NULL;
- }
-
- /*
- * only count the mapping
- * for 'managed memory'
- */
- OSAddAtomic(+1, & pmap->stats.resident_count);
- if (pmap->stats.resident_count > pmap->stats.resident_max) {
- pmap->stats.resident_max = pmap->stats.resident_count;
- }
- }
- /*
- * Step 3) Enter the mapping.
- *
- * Build a template to speed up entering -
- * only the pfn changes.
- */
- template = pa_to_pte(pa) | INTEL_PTE_VALID;
-
- if (flags & VM_MEM_NOT_CACHEABLE) {
- if (!(flags & VM_MEM_GUARDED))
- template |= INTEL_PTE_PTA;
- template |= INTEL_PTE_NCACHE;
- }
- if (pmap != kernel_pmap)
- template |= INTEL_PTE_USER;
- if (prot & VM_PROT_WRITE)
- template |= INTEL_PTE_WRITE;
- if (set_NX)
- template |= INTEL_PTE_NX;
- if (wired) {
- template |= INTEL_PTE_WIRED;
- OSAddAtomic(+1, & pmap->stats.wired_count);
- }
- if (superpage)
- template |= INTEL_PTE_PS;
- pmap_store_pte(pte, template);
-
- /*
- * if this was a managed page we delayed unlocking the pv until here
- * to prevent pmap_page_protect et al from finding it until the pte
- * has been stored
- */
- if (IS_MANAGED_PAGE(pai)) {
- UNLOCK_PVH(pai);
- }
-Done:
- if (need_tlbflush == TRUE)
- PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
-
- if (pvh_e != PV_HASHED_ENTRY_NULL) {
- PV_HASHED_FREE_LIST(pvh_e, pvh_e, 1);
- }
- if (pvh_new != PV_HASHED_ENTRY_NULL) {
- PV_HASHED_KERN_FREE_LIST(pvh_new, pvh_new, 1);
- }
- PMAP_UNLOCK(pmap);
-
- if (delpage_pm_obj) {
- vm_page_t m;
-
- vm_object_lock(delpage_pm_obj);
- m = vm_page_lookup(delpage_pm_obj, delpage_pde_index);
- if (m == VM_PAGE_NULL)
- panic("pmap_enter: pte page not in object");
- VM_PAGE_FREE(m);
- OSAddAtomic(-1, &inuse_ptepages_count);
- vm_object_unlock(delpage_pm_obj);
- }
-
- PMAP_TRACE(PMAP_CODE(PMAP__ENTER) | DBG_FUNC_END, 0, 0, 0, 0, 0);
-}
-