#include <vm/pmap.h>
#include <sys/kdebug.h>
#include <kern/ledger.h>
+#include <kern/simple_lock.h>
+#include <i386/bit_routines.h>
/*
* pmap locking
//#define PV_DEBUG 1 /* uncomment to enable some PV debugging code */
#ifdef PV_DEBUG
-#define CHK_NPVHASH() if(0 == npvhash) panic("npvhash uninitialized");
+#define CHK_NPVHASH() if(0 == npvhashmask) panic("npvhash uninitialized");
#else
#define CHK_NPVHASH(x)
#endif
-#define NPVHASH 4095 /* MUST BE 2^N - 1 */
+#define NPVHASHBUCKETS (4096)
+#define NPVHASHMASK ((NPVHASHBUCKETS) - 1) /* MUST BE 2^N - 1 */
#define PV_HASHED_LOW_WATER_MARK_DEFAULT 5000
#define PV_HASHED_KERN_LOW_WATER_MARK_DEFAULT 2000
#define PV_HASHED_ALLOC_CHUNK_INITIAL 2000
#define LOCK_PV_HASH(hash) lock_hash_hash(hash)
#define UNLOCK_PV_HASH(hash) unlock_hash_hash(hash)
-extern uint32_t npvhash;
+extern uint32_t npvhashmask;
extern pv_hashed_entry_t *pv_hash_table; /* hash lists */
extern pv_hashed_entry_t pv_hashed_free_list;
extern pv_hashed_entry_t pv_hashed_kern_free_list;
decl_simple_lock_data(extern, pv_hashed_free_list_lock)
decl_simple_lock_data(extern, pv_hashed_kern_free_list_lock)
decl_simple_lock_data(extern, pv_hash_table_lock)
+decl_simple_lock_data(extern, phys_backup_lock)
extern zone_t pv_hashed_list_zone; /* zone of pv_hashed_entry
* structures */
*/
#define PHYS_MODIFIED INTEL_PTE_MOD /* page modified */
#define PHYS_REFERENCED INTEL_PTE_REF /* page referenced */
-#define PHYS_MANAGED INTEL_PTE_VALID /* page is managed */
-#define PHYS_NOENCRYPT INTEL_PTE_USER /* no need to encrypt this page in the hibernation image */
+#define PHYS_MANAGED INTEL_PTE_VALID /* page is managed */
+#define PHYS_NOENCRYPT INTEL_PTE_USER /* no need to encrypt this page in the hibernation image */
#define PHYS_NCACHE INTEL_PTE_NCACHE
#define PHYS_PTA INTEL_PTE_PTA
#define PHYS_CACHEABILITY_MASK (INTEL_PTE_PTA | INTEL_PTE_NCACHE)
-#define PHYS_INTERNAL INTEL_PTE_WTHRU /* page from internal object */
-#define PHYS_REUSABLE INTEL_PTE_WRITE /* page is "reusable" */
+#define PHYS_INTERNAL INTEL_PTE_WTHRU /* page from internal object */
+#define PHYS_REUSABLE INTEL_PTE_WRITE /* page is "reusable" */
-extern const boolean_t pmap_disable_kheap_nx;
-extern const boolean_t pmap_disable_kstack_nx;
+extern boolean_t pmap_disable_kheap_nx;
+extern boolean_t pmap_disable_kstack_nx;
#define PMAP_EXPAND_OPTIONS_NONE (0x0)
#define PMAP_EXPAND_OPTIONS_NOWAIT (PMAP_OPTIONS_NOWAIT)
static inline uint32_t
pvhashidx(pmap_t pmap, vm_map_offset_t va)
{
- return ((uint32_t)(uintptr_t)pmap ^
+ uint32_t hashidx = ((uint32_t)(uintptr_t)pmap ^
((uint32_t)(va >> PAGE_SHIFT) & 0xFFFFFFFF)) &
- npvhash;
+ npvhashmask;
+ return hashidx;
}
pmap_t pvpmap = pv_h->pmap;
vm_map_offset_t pvva = pv_h->va;
boolean_t ppcd = FALSE;
+ boolean_t is_ept;
/* Ideally, we'd consult the Mach VM here to definitively determine
* the nature of the mapping for this address space and address.
/* As a precautionary measure, mark A+D */
pmap_phys_attributes[ppn_to_pai(ppn)] |= (PHYS_MODIFIED | PHYS_REFERENCED);
+ is_ept = is_ept_pmap(pmap);
/*
* Correct potential single bit errors in either (but not both) element
goto pmap_cpc_exit;
}
- /* Check for malformed/inconsistent entries */
-
- if ((cpte & (INTEL_PTE_NCACHE | INTEL_PTE_WTHRU | INTEL_PTE_PTA)) == (INTEL_PTE_NCACHE | INTEL_PTE_WTHRU)) {
+ /*
+ * Check for malformed/inconsistent entries.
+ * The first check here isn't useful for EPT PTEs because INTEL_EPT_NCACHE == 0
+ */
+ if (!is_ept && ((cpte & (INTEL_PTE_NCACHE | INTEL_PTE_WTHRU | INTEL_PTE_PTA)) == (INTEL_PTE_NCACHE | INTEL_PTE_WTHRU))) {
action = PMAP_ACTION_IGNORE;
suppress_reason = PTE_INVALID_CACHEABILITY;
}
action = PMAP_ACTION_IGNORE;
suppress_reason = PTE_RSVD;
}
- else if ((pmap != kernel_pmap) && ((cpte & INTEL_PTE_USER) == 0)) {
+ else if ((pmap != kernel_pmap) && (!is_ept) && ((cpte & INTEL_PTE_USER) == 0)) {
action = PMAP_ACTION_IGNORE;
suppress_reason = PTE_SUPERVISOR;
}
{
pml4_entry_t newpf;
pml4_entry_t *pml4;
+ boolean_t is_ept;
pml4 = pmap64_pml4(pmap, vaddr);
- if (pml4 && ((*pml4 & INTEL_PTE_VALID))) {
+ is_ept = is_ept_pmap(pmap);
+
+ if (pml4 && (*pml4 & PTE_VALID_MASK(is_ept))) {
newpf = *pml4 & PG_FRAME;
return &((pdpt_entry_t *) PHYSMAP_PTOV(newpf))
[(vaddr >> PDPTSHIFT) & (NPDPTPG-1)];
{
pdpt_entry_t newpf;
pdpt_entry_t *pdpt;
+ boolean_t is_ept;
pdpt = pmap64_pdpt(pmap, vaddr);
+ is_ept = is_ept_pmap(pmap);
- if (pdpt && ((*pdpt & INTEL_PTE_VALID))) {
+ if (pdpt && (*pdpt & PTE_VALID_MASK(is_ept))) {
newpf = *pdpt & PG_FRAME;
return &((pd_entry_t *) PHYSMAP_PTOV(newpf))
[(vaddr >> PDSHIFT) & (NPDPG-1)];
{
pd_entry_t *pde;
pd_entry_t newpf;
+ boolean_t is_ept;
assert(pmap);
pde = pmap64_pde(pmap, vaddr);
- if (pde && ((*pde & INTEL_PTE_VALID))) {
- if (*pde & INTEL_PTE_PS)
+ is_ept = is_ept_pmap(pmap);
+
+ if (pde && (*pde & PTE_VALID_MASK(is_ept))) {
+ if (*pde & PTE_PS)
return pde;
newpf = *pde & PG_FRAME;
return &((pt_entry_t *)PHYSMAP_PTOV(newpf))