uint64_t pmap_pv_throttle_stat, pmap_pv_throttled_waiters;
unsigned int pmap_cache_attributes(ppnum_t pn) {
- if (pmap_get_cache_attributes(pn) & INTEL_PTE_NCACHE)
+ if (pmap_get_cache_attributes(pn, FALSE) & INTEL_PTE_NCACHE)
return (VM_WIMG_IO);
else
return (VM_WIMG_COPYBACK);
}
}
-unsigned pmap_get_cache_attributes(ppnum_t pn) {
+unsigned pmap_get_cache_attributes(ppnum_t pn, boolean_t is_ept) {
if (last_managed_page == 0)
return 0;
- if (!IS_MANAGED_PAGE(ppn_to_pai(pn))) {
- return INTEL_PTE_NCACHE;
- }
+ if (!IS_MANAGED_PAGE(ppn_to_pai(pn)))
+ return PTE_NCACHE(is_ept);
/*
* The cache attributes are read locklessly for efficiency.
*/
unsigned int attr = pmap_phys_attributes[ppn_to_pai(pn)];
unsigned int template = 0;
-
- if (attr & PHYS_PTA)
+
+ /*
+ * The PTA bit is currently unsupported for EPT PTEs.
+ */
+ if ((attr & PHYS_PTA) && !is_ept)
template |= INTEL_PTE_PTA;
+
+ /*
+ * If the page isn't marked as NCACHE, the default for EPT entries
+ * is WB.
+ */
if (attr & PHYS_NCACHE)
- template |= INTEL_PTE_NCACHE;
+ template |= PTE_NCACHE(is_ept);
+ else if (is_ept)
+ template |= INTEL_EPT_WB;
+
return template;
}
-
+boolean_t
+pmap_has_managed_page(ppnum_t first, ppnum_t last)
+{
+ ppnum_t pn;
+ boolean_t result;
+
+ assert(last_managed_page);
+ assert(first <= last);
+
+ for (result = FALSE, pn = first;
+ !result
+ && (pn <= last)
+ && (pn <= last_managed_page);
+ pn++)
+ {
+ result = (0 != (pmap_phys_attributes[pn] & PHYS_MANAGED));
+ }
+
+ return (result);
+}
boolean_t
pmap_is_noencrypt(ppnum_t pn)
}
+void
+pmap_lock_phys_page(ppnum_t pn)
+{
+ int pai;
+
+ pai = ppn_to_pai(pn);
+
+ if (IS_MANAGED_PAGE(pai)) {
+ LOCK_PVH(pai);
+ } else
+ simple_lock(&phys_backup_lock);
+}
+
+
+void
+pmap_unlock_phys_page(ppnum_t pn)
+{
+ int pai;
+
+ pai = ppn_to_pai(pn);
+
+ if (IS_MANAGED_PAGE(pai)) {
+ UNLOCK_PVH(pai);
+ } else
+ simple_unlock(&phys_backup_lock);
+}
+
+
+
__private_extern__ void
pmap_pagetable_corruption_msg_log(int (*log_func)(const char * fmt, ...)__printflike(1,2)) {
if (pmap_pagetable_corruption_incidents > 0) {
void
pmap_clear_modify(ppnum_t pn)
{
- phys_attribute_clear(pn, PHYS_MODIFIED);
+ phys_attribute_clear(pn, PHYS_MODIFIED, 0, NULL);
}
/*
void
pmap_clear_reference(ppnum_t pn)
{
- phys_attribute_clear(pn, PHYS_REFERENCED);
+ phys_attribute_clear(pn, PHYS_REFERENCED, 0, NULL);
}
void
return (retval);
}
+
+void
+pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *arg)
+{
+ unsigned int x86Mask;
+
+ x86Mask = ( ((mask & VM_MEM_MODIFIED)? PHYS_MODIFIED : 0)
+ | ((mask & VM_MEM_REFERENCED)? PHYS_REFERENCED : 0));
+
+ phys_attribute_clear(pn, x86Mask, options, arg);
+}
+
/*
* pmap_clear_refmod(phys, mask)
* clears the referenced and modified bits as specified by the mask
x86Mask = ( ((mask & VM_MEM_MODIFIED)? PHYS_MODIFIED : 0)
| ((mask & VM_MEM_REFERENCED)? PHYS_REFERENCED : 0));
- phys_attribute_clear(pn, x86Mask);
+
+ phys_attribute_clear(pn, x86Mask, 0, NULL);
+}
+
+unsigned int
+pmap_disconnect(ppnum_t pa)
+{
+ return (pmap_disconnect_options(pa, 0, NULL));
}
/*
* Routine:
- * pmap_disconnect
+ * pmap_disconnect_options
*
* Function:
* Disconnect all mappings for this page and return reference and change status
*
*/
unsigned int
-pmap_disconnect(ppnum_t pa)
+pmap_disconnect_options(ppnum_t pa, unsigned int options, void *arg)
{
unsigned refmod, vmrefmod = 0;
- pmap_page_protect(pa, 0); /* disconnect the page */
+ pmap_page_protect_options(pa, 0, options, arg); /* disconnect the page */
pmap_assert(pa != vm_page_fictitious_addr);
- if ((pa == vm_page_guard_addr) || !IS_MANAGED_PAGE(pa))
+ if ((pa == vm_page_guard_addr) || !IS_MANAGED_PAGE(pa) || (options & PMAP_OPTIONS_NOREFMOD))
return 0;
refmod = pmap_phys_attributes[pa] & (PHYS_MODIFIED | PHYS_REFERENCED);