+/*
+ * Update cache attributes for all extant managed mappings.
+ * Assumes PV for this page is locked, and that the page
+ * is managed.
+ */
+
+void
+pmap_update_cache_attributes_locked(ppnum_t pn, unsigned attributes) {
+ pv_rooted_entry_t pv_h, pv_e;
+ pv_hashed_entry_t pvh_e, nexth;
+ vm_map_offset_t vaddr;
+ pmap_t pmap;
+ pt_entry_t *ptep;
+
+ assert(IS_MANAGED_PAGE(pn));
+
+ pv_h = pai_to_pvh(pn);
+ /* TODO: translate the PHYS_* bits to PTE bits, while they're
+ * currently identical, they may not remain so
+ * Potential optimization (here and in page_protect),
+ * parallel shootdowns, check for redundant
+ * attribute modifications.
+ */
+
+ /*
+ * Alter attributes on all mappings
+ */
+ if (pv_h->pmap != PMAP_NULL) {
+ pv_e = pv_h;
+ pvh_e = (pv_hashed_entry_t)pv_e;
+
+ do {
+ pmap = pv_e->pmap;
+ vaddr = pv_e->va;
+ ptep = pmap_pte(pmap, vaddr);
+
+ if (0 == ptep)
+ panic("pmap_update_cache_attributes_locked: Missing PTE, pmap: %p, pn: 0x%x vaddr: 0x%llx kernel_pmap: %p", pmap, pn, vaddr, kernel_pmap);
+
+ nexth = (pv_hashed_entry_t)queue_next(&pvh_e->qlink);
+ pmap_update_pte(ptep, PHYS_CACHEABILITY_MASK, attributes);
+ PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
+ pvh_e = nexth;
+ } while ((pv_e = (pv_rooted_entry_t)nexth) != pv_h);
+ }
+}
+
+void x86_filter_TLB_coherency_interrupts(boolean_t dofilter) {
+ assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
+
+ if (dofilter) {
+ CPU_CR3_MARK_INACTIVE();
+ } else {
+ CPU_CR3_MARK_ACTIVE();
+ __asm__ volatile("mfence");
+ if (current_cpu_datap()->cpu_tlb_invalid)
+ process_pmap_updates();
+ }
+}
+
+