ml_set_interrupts_enabled(istate);
}
- kernel_pmap->pm_hold = kernel_pmap->pm_pml4;
+ kernel_pmap->pm_hold = (vm_offset_t)kernel_pmap->pm_pml4;
kprintf("Kernel virtual space from 0x%x to 0x%x.\n",
VADDR(KPTDI,0), virtual_end);
vm_size_t size;
pdpt_entry_t *pdpt;
pml4_entry_t *pml4p;
- vm_page_t m;
int template;
pd_entry_t *pdp;
spl_t s;
p->nx_enabled = 1;
p->pm_64bit = is_64bit;
p->pm_kernel_cr3 = FALSE;
+ p->pm_shared = FALSE;
if (!cpu_64bit) {
/* legacy 32 bit setup */
{
int spl;
pdpt_entry_t *user_pdptp;
- uint32_t cr3;
if (!p->pm_kernel_cr3)
return;
static void
pmap_remove_range(
pmap_t pmap,
- vm_map_offset_t vaddr,
+ vm_map_offset_t start_vaddr,
pt_entry_t *spte,
pt_entry_t *epte)
{
register pt_entry_t *cpte;
- int num_removed, num_unwired;
+ int num_removed, num_unwired, num_found;
int pai;
pmap_paddr_t pa;
+ vm_map_offset_t vaddr;
num_removed = 0;
num_unwired = 0;
+ num_found = 0;
- for (cpte = spte; cpte < epte;
- cpte++, vaddr += PAGE_SIZE) {
+ /* invalidate the PTEs first to "freeze" them */
+ for (cpte = spte, vaddr = start_vaddr;
+ cpte < epte;
+ cpte++, vaddr += PAGE_SIZE_64) {
pa = pte_to_pa(*cpte);
if (pa == 0)
continue;
+ num_found++;
if (iswired(*cpte))
num_unwired++;
* Outside range of managed physical memory.
* Just remove the mappings.
*/
- register pt_entry_t *lpte = cpte;
-
- pmap_store_pte(lpte, 0);
+ pmap_store_pte(cpte, 0);
continue;
}
- num_removed++;
+
+ /* invalidate the PTE */
+ pmap_update_pte(cpte, *cpte, (*cpte & ~INTEL_PTE_VALID));
+ }
+
+ if (0 == num_found) {
+ /* nothing was changed, we're done */
+ goto update_counts;
+ }
+
+ /* propagate the invalidates to other CPUs */
+
+ PMAP_UPDATE_TLBS(pmap, start_vaddr, vaddr);
+
+ for (cpte = spte, vaddr = start_vaddr;
+ cpte < epte;
+ cpte++, vaddr += PAGE_SIZE_64) {
+
+ pa = pte_to_pa(*cpte);
+ if (pa == 0)
+ continue;
pai = pa_index(pa);
+
LOCK_PVH(pai);
+ num_removed++;
+
/*
- * Get the modify and reference bits.
+ * Get the modify and reference bits, then
+ * nuke the entry in the page table
*/
- {
- register pt_entry_t *lpte;
-
- lpte = cpte;
- pmap_phys_attributes[pai] |=
- *lpte & (PHYS_MODIFIED|PHYS_REFERENCED);
- pmap_store_pte(lpte, 0);
-
- }
+ /* remember reference and change */
+ pmap_phys_attributes[pai] |=
+ (char)(*cpte & (PHYS_MODIFIED|PHYS_REFERENCED));
+ /* completely invalidate the PTE */
+ pmap_store_pte(cpte, 0);
/*
* Remove the mapping from the pvlist for
}
}
+ update_counts:
/*
* Update the counts
*/
pmap->stats.resident_count -= num_removed;
assert(pmap->stats.wired_count >= num_unwired);
pmap->stats.wired_count -= num_unwired;
+ return;
}
/*
/*
* Remove the mapping, collecting any modify bits.
*/
- pmap_store_pte(pte, *pte & ~INTEL_PTE_VALID);
+ pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_VALID));
PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
/*
* Write-protect.
*/
- pmap_store_pte(pte, *pte & ~INTEL_PTE_WRITE);
-
+ pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_WRITE));
PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
/*
* Advance prev.
vm_map_offset_t orig_sva;
spl_t spl;
boolean_t set_NX;
+ int num_found = 0;
if (map == PMAP_NULL)
return;
if (*spte & INTEL_PTE_VALID) {
if (prot & VM_PROT_WRITE)
- pmap_store_pte(spte, *spte | INTEL_PTE_WRITE);
+ pmap_update_pte(spte, *spte, (*spte | INTEL_PTE_WRITE));
else
- pmap_store_pte(spte, *spte & ~INTEL_PTE_WRITE);
+ pmap_update_pte(spte, *spte, (*spte & ~INTEL_PTE_WRITE));
if (set_NX == TRUE)
- pmap_store_pte(spte, *spte | INTEL_PTE_NX);
+ pmap_update_pte(spte, *spte, (*spte | INTEL_PTE_NX));
else
- pmap_store_pte(spte, *spte & ~INTEL_PTE_NX);
+ pmap_update_pte(spte, *spte, (*spte & ~INTEL_PTE_NX));
+
+ num_found++;
}
spte++;
}
}
sva = lva;
- pde++;
}
- PMAP_UPDATE_TLBS(map, orig_sva, eva);
+ if (num_found)
+ PMAP_UPDATE_TLBS(map, orig_sva, eva);
simple_unlock(&map->lock);
SPLX(spl);
pmap_paddr_t pa = (pmap_paddr_t)i386_ptob(pn);
boolean_t need_tlbflush = FALSE;
boolean_t set_NX;
+ char oattr;
XPR(0x80000000, "%x/%x: pmap_enter %x/%qx/%x\n",
current_thread(),
}
}
- if (*pte & INTEL_PTE_MOD)
- template |= INTEL_PTE_MOD;
-
- pmap_store_pte(pte, template);
- pte++;
-
- need_tlbflush = TRUE;
+ /* store modified PTE and preserve RC bits */
+ pmap_update_pte(pte, *pte, template | (*pte & (INTEL_PTE_REF | INTEL_PTE_MOD)));
+ need_tlbflush = TRUE;
goto Done;
}
* to overwrite the old one.
*/
+ /* invalidate the PTE */
+ pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_VALID));
+ /* propagate the invalidate everywhere */
+ PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
+ /* remember reference and change */
+ oattr = (char)(*pte & (PHYS_MODIFIED | PHYS_REFERENCED));
+ /* completely invalidate the PTE */
+ pmap_store_pte(pte,0);
+
if (valid_page(i386_btop(old_pa))) {
pai = pa_index(old_pa);
pmap->stats.wired_count--;
}
- pmap_phys_attributes[pai] |=
- *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
-
- pmap_store_pte(pte, 0);
+ pmap_phys_attributes[pai] |= oattr;
/*
* Remove the mapping from the pvlist for
* this physical page.
if (pv_h->pmap == PMAP_NULL) {
panic("pmap_enter: null pv_list!");
}
+
if (pv_h->va == vaddr && pv_h->pmap == pmap) {
/*
* Header is the pv_entry. Copy the next one
* wiring down mapping
*/
map->stats.wired_count++;
- pmap_store_pte(pte, *pte | INTEL_PTE_WIRED);
+ pmap_update_pte(pte, *pte, (*pte | INTEL_PTE_WIRED));
pte++;
}
else if (!wired && iswired(*pte)) {
*/
assert(map->stats.wired_count >= 1);
map->stats.wired_count--;
- pmap_store_pte(pte, *pte & ~INTEL_PTE_WIRED);
+ pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_WIRED));
pte++;
}
register vm_map_offset_t va;
va = pv_e->va;
- pte = pmap_pte(pmap, va);
-#if 0
/*
- * Consistency checks.
+ * first make sure any processor actively
+ * using this pmap fluses its TLB state
*/
- assert(*pte & INTEL_PTE_VALID);
- /* assert(pte_to_phys(*pte) == phys); */
-#endif
+
+ PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
/*
* Clear modify or reference bits.
*/
- pmap_store_pte(pte, *pte & ~bits);
- pte++;
- PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
+ pte = pmap_pte(pmap, va);
+ pmap_update_pte(pte, *pte, (*pte & ~bits));
+
}
simple_unlock(&pmap->lock);
}
if ((size >> 28) != 1) panic("pmap_nest: size 0x%llx must be 0x%x", size, NBPDE);
+ subord->pm_shared = TRUE;
+
// prepopulate subord pmap pde's if necessary
if (cpu_64bit) {
if (!cpu_datap(cpu)->cpu_running)
continue;
if ((cpu_datap(cpu)->cpu_task_cr3 == pmap_cr3) ||
- (cpu_datap(cpu)->cpu_active_cr3 == pmap_cr3) ||
+ (CPU_GET_ACTIVE_CR3(cpu) == pmap_cr3) ||
+ (pmap->pm_shared) ||
((pmap == kernel_pmap) &&
(!CPU_CR3_IS_ACTIVE(cpu) ||
cpu_datap(cpu)->cpu_task_map == TASK_MAP_64BIT_SHARED))) {