+/*
+ * Update cache attributes for all extant managed mappings.
+ * Assumes PV for this page is locked, and that the page
+ * is managed. We assume that this physical page may be mapped in
+ * both EPT and normal Intel PTEs, so we convert the attributes
+ * to the corresponding format for each pmap.
+ *
+ * We assert that the passed set of attributes is a subset of the
+ * PHYS_CACHEABILITY_MASK.
+ */
+void
+pmap_update_cache_attributes_locked(ppnum_t pn, unsigned attributes) {
+ pv_rooted_entry_t pv_h, pv_e;
+ pv_hashed_entry_t pvh_e, nexth;
+ vm_map_offset_t vaddr;
+ pmap_t pmap;
+ pt_entry_t *ptep;
+ boolean_t is_ept;
+ unsigned ept_attributes;
+
+ assert(IS_MANAGED_PAGE(pn));
+ assert(((~PHYS_CACHEABILITY_MASK) & attributes) == 0);
+
+ /* We don't support the PTA bit for EPT PTEs */
+ if (attributes & INTEL_PTE_NCACHE)
+ ept_attributes = INTEL_EPT_NCACHE;
+ else
+ ept_attributes = INTEL_EPT_WB;
+
+ pv_h = pai_to_pvh(pn);
+ /* TODO: translate the PHYS_* bits to PTE bits, while they're
+ * currently identical, they may not remain so
+ * Potential optimization (here and in page_protect),
+ * parallel shootdowns, check for redundant
+ * attribute modifications.
+ */
+
+ /*
+ * Alter attributes on all mappings
+ */
+ if (pv_h->pmap != PMAP_NULL) {
+ pv_e = pv_h;
+ pvh_e = (pv_hashed_entry_t)pv_e;
+
+ do {
+ pmap = pv_e->pmap;
+ vaddr = pv_e->va;
+ ptep = pmap_pte(pmap, vaddr);
+
+ if (0 == ptep)
+ panic("pmap_update_cache_attributes_locked: Missing PTE, pmap: %p, pn: 0x%x vaddr: 0x%llx kernel_pmap: %p", pmap, pn, vaddr, kernel_pmap);
+
+ is_ept = is_ept_pmap(pmap);
+
+ nexth = (pv_hashed_entry_t)queue_next(&pvh_e->qlink);
+ if (!is_ept) {
+ pmap_update_pte(ptep, PHYS_CACHEABILITY_MASK, attributes);
+ } else {
+ pmap_update_pte(ptep, INTEL_EPT_CACHE_MASK, ept_attributes);
+ }
+ PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
+ pvh_e = nexth;
+ } while ((pv_e = (pv_rooted_entry_t)nexth) != pv_h);
+ }
+}
+
+void x86_filter_TLB_coherency_interrupts(boolean_t dofilter) {
+ assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
+
+ if (dofilter) {
+ CPU_CR3_MARK_INACTIVE();
+ } else {
+ CPU_CR3_MARK_ACTIVE();
+ mfence();
+ if (current_cpu_datap()->cpu_tlb_invalid)
+ process_pmap_updates();
+ }
+}
+
+