- if(VM_MEM_NOT_CACHEABLE == (flags & (VM_MEM_NOT_CACHEABLE | VM_WIMG_USE_DEFAULT))) {
- if(!(flags & VM_MEM_GUARDED))
- template |= INTEL_PTE_PTA;
- template |= INTEL_PTE_NCACHE;
- }
-
- if (pmap != kernel_pmap)
- template |= INTEL_PTE_USER;
- if (prot & VM_PROT_WRITE)
- template |= INTEL_PTE_WRITE;
-
- if (set_NX == TRUE)
- template |= INTEL_PTE_NX;
-
- if (wired) {
- template |= INTEL_PTE_WIRED;
- if (!iswired(*pte))
- pmap->stats.wired_count++;
- }
- else {
- if (iswired(*pte)) {
- assert(pmap->stats.wired_count >= 1);
- pmap->stats.wired_count--;
- }
- }
-
- if (*pte & INTEL_PTE_MOD)
- template |= INTEL_PTE_MOD;
-
- pmap_store_pte(pte, template);
- pte++;
-
- need_tlbflush = TRUE;
- goto Done;
- }
-
- /*
- * Outline of code from here:
- * 1) If va was mapped, update TLBs, remove the mapping
- * and remove old pvlist entry.
- * 2) Add pvlist entry for new mapping
- * 3) Enter new mapping.
- *
- * SHARING FAULTS IS HORRIBLY BROKEN
- * SHARING_FAULTS complicates this slightly in that it cannot
- * replace the mapping, but must remove it (because adding the
- * pvlist entry for the new mapping may remove others), and
- * hence always enters the new mapping at step 3)
- *
- * If the old physical page is not managed step 1) is skipped
- * (except for updating the TLBs), and the mapping is
- * overwritten at step 3). If the new physical page is not
- * managed, step 2) is skipped.
- */
-
- if (old_pa != (pmap_paddr_t) 0) {
-
- /*
- * Don't do anything to pages outside valid memory here.
- * Instead convince the code that enters a new mapping
- * to overwrite the old one.
- */
-
- if (valid_page(i386_btop(old_pa))) {
-
- pai = pa_index(old_pa);
- LOCK_PVH(pai);
-
- assert(pmap->stats.resident_count >= 1);
- pmap->stats.resident_count--;
- if (iswired(*pte)) {
- assert(pmap->stats.wired_count >= 1);
- pmap->stats.wired_count--;
- }
-
- pmap_phys_attributes[pai] |=
- *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
-
- pmap_store_pte(pte, 0);
- /*
- * Remove the mapping from the pvlist for
- * this physical page.
- */
- {
- register pv_entry_t prev, cur;
-
- pv_h = pai_to_pvh(pai);
- if (pv_h->pmap == PMAP_NULL) {
- panic("pmap_enter: null pv_list!");
- }
-
- if (pv_h->va == vaddr && pv_h->pmap == pmap) {
- /*
- * Header is the pv_entry. Copy the next one
- * to header and free the next one (we cannot
- * free the header)
- */
- cur = pv_h->next;
- if (cur != PV_ENTRY_NULL) {
- *pv_h = *cur;
- pv_e = cur;
- }
- else {
- pv_h->pmap = PMAP_NULL;
- }
- }
- else {
- cur = pv_h;
- do {
- prev = cur;
- if ((cur = prev->next) == PV_ENTRY_NULL) {
- panic("pmap_enter: mapping not in pv_list!");
- }
- } while (cur->va != vaddr || cur->pmap != pmap);
- prev->next = cur->next;
- pv_e = cur;
- }
- }
- UNLOCK_PVH(pai);
- }
- else {
-
- /*
- * old_pa is not managed. Pretend it's zero so code
- * at Step 3) will enter new mapping (overwriting old
- * one). Do removal part of accounting.
- */
- old_pa = (pmap_paddr_t) 0;
-
- if (iswired(*pte)) {
- assert(pmap->stats.wired_count >= 1);
- pmap->stats.wired_count--;
- }
- }
- need_tlbflush = TRUE;
-
- }
-
- if (valid_page(i386_btop(pa))) {
-
- /*
- * Step 2) Enter the mapping in the PV list for this
- * physical page.
- */
-
- pai = pa_index(pa);
-
-
-#if SHARING_FAULTS /* this is horribly broken , do not enable */
-RetryPvList:
- /*
- * We can return here from the sharing fault code below
- * in case we removed the only entry on the pv list and thus
- * must enter the new one in the list header.
- */
-#endif /* SHARING_FAULTS */
- LOCK_PVH(pai);
- pv_h = pai_to_pvh(pai);
-
- if (pv_h->pmap == PMAP_NULL) {
- /*
- * No mappings yet
- */
- pv_h->va = vaddr;
- pv_h->pmap = pmap;
- pv_h->next = PV_ENTRY_NULL;
- }
- else {
-#if DEBUG
- {
- /*
- * check that this mapping is not already there
- * or there is no alias for this mapping in the same map
- */
- pv_entry_t e = pv_h;
- while (e != PV_ENTRY_NULL) {
- if (e->pmap == pmap && e->va == vaddr)
- panic("pmap_enter: already in pv_list");
- e = e->next;
- }
- }
-#endif /* DEBUG */
-#if SHARING_FAULTS /* broken, do not enable */
- {
- /*
- * do sharing faults.
- * if we find an entry on this pv list in the same address
- * space, remove it. we know there will not be more
- * than one.
- */
- pv_entry_t e = pv_h;
- pt_entry_t *opte;
-
- while (e != PV_ENTRY_NULL) {
- if (e->pmap == pmap) {
- /*
- * Remove it, drop pv list lock first.
- */
- UNLOCK_PVH(pai);
-
- opte = pmap_pte(pmap, e->va);
- assert(opte != PT_ENTRY_NULL);
- /*
- * Invalidate the translation buffer,
- * then remove the mapping.
- */
- pmap_remove_range(pmap, e->va, opte,
- opte + 1);
-
- PMAP_UPDATE_TLBS(pmap, e->va, e->va + PAGE_SIZE);
-
- /*
- * We could have remove the head entry,
- * so there could be no more entries
- * and so we have to use the pv head entry.
- * so, go back to the top and try the entry
- * again.
- */
- goto RetryPvList;
- }
- e = e->next;
- }
-
- /*
- * check that this mapping is not already there
- */
- e = pv_h;
- while (e != PV_ENTRY_NULL) {
- if (e->pmap == pmap)
- panic("pmap_enter: alias in pv_list");
- e = e->next;
- }
- }
-#endif /* SHARING_FAULTS */
-#if DEBUG_ALIAS
- {
- /*
- * check for aliases within the same address space.
- */
- pv_entry_t e = pv_h;
- vm_offset_t rpc = get_rpc();
-
- while (e != PV_ENTRY_NULL) {
- if (e->pmap == pmap) {
- /*
- * log this entry in the alias ring buffer
- * if it's not there already.
- */
- struct pmap_alias *pma;
- int ii, logit;
-
- logit = TRUE;
- for (ii = 0; ii < pmap_alias_index; ii++) {
- if (pmap_aliasbuf[ii].rpc == rpc) {
- /* found it in the log already */
- logit = FALSE;
- break;
- }
- }
- if (logit) {
- pma = &pmap_aliasbuf[pmap_alias_index];
- pma->pmap = pmap;
- pma->va = vaddr;
- pma->rpc = rpc;
- pma->cookie = PMAP_ALIAS_COOKIE;
- if (++pmap_alias_index >= PMAP_ALIAS_MAX)
- panic("pmap_enter: exhausted alias log");
- }
- }
- e = e->next;
- }
- }
-#endif /* DEBUG_ALIAS */
- /*
- * Add new pv_entry after header.
- */
- if (pv_e == PV_ENTRY_NULL) {
- PV_ALLOC(pv_e);
- if (pv_e == PV_ENTRY_NULL) {
- panic("pmap no pv_e's");
- }
- }
- pv_e->va = vaddr;
- pv_e->pmap = pmap;
- pv_e->next = pv_h->next;
- pv_h->next = pv_e;
- /*
- * Remember that we used the pvlist entry.
- */
- pv_e = PV_ENTRY_NULL;
- }
- UNLOCK_PVH(pai);
-
- /*
- * only count the mapping
- * for 'managed memory'
- */
- pmap->stats.resident_count++;
- }
-
- /*
- * Step 3) Enter the mapping.
- */
-
-
- /*
- * Build a template to speed up entering -
- * only the pfn changes.
- */
- template = pa_to_pte(pa) | INTEL_PTE_VALID;
-
- if(flags & VM_MEM_NOT_CACHEABLE) {
- if(!(flags & VM_MEM_GUARDED))
- template |= INTEL_PTE_PTA;
- template |= INTEL_PTE_NCACHE;
- }
-
- if (pmap != kernel_pmap)
- template |= INTEL_PTE_USER;
- if (prot & VM_PROT_WRITE)
- template |= INTEL_PTE_WRITE;
-
- if (set_NX == TRUE)
- template |= INTEL_PTE_NX;
-
- if (wired) {
- template |= INTEL_PTE_WIRED;
- pmap->stats.wired_count++;
- }
- pmap_store_pte(pte, template);
-
-Done:
- if (need_tlbflush == TRUE)
- PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
-
- if (pv_e != PV_ENTRY_NULL) {
- PV_FREE(pv_e);
- }
-
- PMAP_READ_UNLOCK(pmap, spl);
-}
-
-/*
- * Routine: pmap_change_wiring
- * Function: Change the wiring attribute for a map/virtual-address
- * pair.
- * In/out conditions:
- * The mapping must already exist in the pmap.
- */
-void
-pmap_change_wiring(
- register pmap_t map,
- vm_map_offset_t vaddr,
- boolean_t wired)
-{
- register pt_entry_t *pte;
- spl_t spl;