#ifdef PMAP_TRACES
extern boolean_t pmap_trace;
-#define PMAP_TRACE(x,a,b,c,d,e) \
- if (pmap_trace) { \
- KERNEL_DEBUG_CONSTANT(x,a,b,c,d,e); \
+#define PMAP_TRACE(...) \
+ if (pmap_trace) { \
+ KDBG_RELEASE(__VA_ARGS__); \
}
#else
-#define PMAP_TRACE(x,a,b,c,d,e) KERNEL_DEBUG(x,a,b,c,d,e)
+#define PMAP_TRACE(...) KDBG_DEBUG(__VA_ARGS__)
#endif /* PMAP_TRACES */
-#define PMAP_TRACE_CONSTANT(x,a,b,c,d,e) \
- KERNEL_DEBUG_CONSTANT(x,a,b,c,d,e); \
+#define PMAP_TRACE_CONSTANT(...) KDBG_RELEASE(__VA_ARGS__)
kern_return_t pmap_expand_pml4(
pmap_t map,
(IS_MANAGED_PAGE(x) && (pmap_phys_attributes[x] & PHYS_INTERNAL))
#define IS_REUSABLE_PAGE(x) \
(IS_MANAGED_PAGE(x) && (pmap_phys_attributes[x] & PHYS_REUSABLE))
-#define IS_ALTACCT_PAGE(x) \
+#define IS_ALTACCT_PAGE(x,pve) \
(IS_MANAGED_PAGE((x)) && \
- (PVE_IS_ALTACCT_PAGE(&pv_head_table[(x)])))
+ (PVE_IS_ALTACCT_PAGE((pve))))
/*
* Physical page attributes. Copy bits from PTE definition.
#define PMAP_EXPAND_OPTIONS_NONE (0x0)
#define PMAP_EXPAND_OPTIONS_NOWAIT (PMAP_OPTIONS_NOWAIT)
#define PMAP_EXPAND_OPTIONS_NOENTER (PMAP_OPTIONS_NOENTER)
-
+#define PMAP_EXPAND_OPTIONS_ALIASMAP (0x40000000U)
/*
* Amount of virtual memory mapped by one
* page-directory entry.
#define MAX_PREEMPTION_LATENCY_NS 20000
extern uint64_t max_preemption_latency_tsc;
-/* #define DEBUGINTERRUPTS 1 uncomment to ensure pmap callers have interrupts enabled */
-#ifdef DEBUGINTERRUPTS
+#if DEBUG
+#define PMAP_INTR_DEBUG (1)
+#endif
+
+#if PMAP_INTR_DEBUG
#define pmap_intr_assert() { \
if (processor_avail_count > 1 && !ml_get_interrupts_enabled()) \
- panic("pmap interrupt assert %s, %d",__FILE__, __LINE__); \
+ panic("pmap interrupt assert %d %s, %d", processor_avail_count, __FILE__, __LINE__); \
}
#else
#define pmap_intr_assert()
return hashidx;
}
-
/*
* unlinks the pv_hashed_entry_t pvh from the singly linked hash chain.
* properly deals with the anchor.
uint32_t bitdex;
pmap_t pvpmap = pv_h->pmap;
vm_map_offset_t pvva = PVE_VA(pv_h);
- vm_map_offset_t pve_flags = PVE_FLAGS(pv_h);
+ vm_map_offset_t pve_flags;
boolean_t ppcd = FALSE;
boolean_t is_ept;
do {
if ((popcnt1((uintptr_t)pv_e->pmap ^ (uintptr_t)pmap) && PVE_VA(pv_e) == vaddr) ||
(pv_e->pmap == pmap && popcnt1(PVE_VA(pv_e) ^ vaddr))) {
+ pve_flags = PVE_FLAGS(pv_e);
pv_e->pmap = pmap;
- if (pv_e == pv_h) {
- pv_h->va_and_flags = vaddr | pve_flags;
- } else {
- pv_e->va_and_flags = vaddr;
- }
+ pv_h->va_and_flags = vaddr | pve_flags;
suppress_reason = PV_BITFLIP;
action = PMAP_ACTION_RETRY;
goto pmap_cpc_exit;
static inline __attribute__((always_inline)) pv_hashed_entry_t
pmap_pv_remove(pmap_t pmap,
vm_map_offset_t vaddr,
- ppnum_t *ppnp,
- pt_entry_t *pte)
+ ppnum_t *ppnp,
+ pt_entry_t *pte,
+ boolean_t *was_altacct)
{
pv_hashed_entry_t pvh_e;
pv_rooted_entry_t pv_h;
uint32_t pv_cnt;
ppnum_t ppn;
+ *was_altacct = FALSE;
pmap_pv_remove_retry:
ppn = *ppnp;
pvh_e = PV_HASHED_ENTRY_NULL;
if (pac == PMAP_ACTION_IGNORE)
goto pmap_pv_remove_exit;
else if (pac == PMAP_ACTION_ASSERT)
- panic("Possible memory corruption: pmap_pv_remove(%p,0x%llx,0x%x, 0x%llx, %p, %p): null pv_list!", pmap, vaddr, ppn, *pte, ppnp, pte);
+ panic("Possible memory corruption: pmap_pv_remove(%p,0x%llx,0x%x, 0x%llx, %p, %p): null pv_list, priors: %d", pmap, vaddr, ppn, *pte, ppnp, pte, pmap_pagetable_corruption_incidents);
else if (pac == PMAP_ACTION_RETRY_RELOCK) {
LOCK_PVH(ppn_to_pai(*ppnp));
pmap_phys_attributes[ppn_to_pai(*ppnp)] |= (PHYS_MODIFIED | PHYS_REFERENCED);
}
if (PVE_VA(pv_h) == vaddr && pv_h->pmap == pmap) {
+ *was_altacct = IS_ALTACCT_PAGE(ppn_to_pai(*ppnp), pv_h);
/*
* Header is the pv_rooted_entry.
* We can't free that. If there is a queued
*/
pvh_e = (pv_hashed_entry_t) queue_next(&pv_h->qlink);
if (pv_h != (pv_rooted_entry_t) pvh_e) {
- vm_map_offset_t pve_flags;
-
/*
* Entry queued to root, remove this from hash
* and install as new root.
pprevh = pvhash(pvhash_idx);
if (PV_HASHED_ENTRY_NULL == *pprevh) {
panic("Possible memory corruption: pmap_pv_remove(%p,0x%llx,0x%x): "
- "empty hash, removing rooted",
- pmap, vaddr, ppn);
+ "empty hash, removing rooted, priors: %d",
+ pmap, vaddr, ppn, pmap_pagetable_corruption_incidents);
}
pmap_pvh_unlink(pvh_e);
UNLOCK_PV_HASH(pvhash_idx);
pv_h->pmap = pvh_e->pmap;
- pve_flags = PVE_FLAGS(pv_h);
- pv_h->va_and_flags = PVE_VA(pvh_e) | pve_flags;
+ pv_h->va_and_flags = pvh_e->va_and_flags;
/* dispose of pvh_e */
} else {
/* none queued after rooted */
LOCK_PV_HASH(pvhash_idx);
pprevh = pvhash(pvhash_idx);
if (PV_HASHED_ENTRY_NULL == *pprevh) {
- panic("Possible memory corruption: pmap_pv_remove(%p,0x%llx,0x%x, 0x%llx, %p): empty hash",
- pmap, vaddr, ppn, *pte, pte);
+ panic("Possible memory corruption: pmap_pv_remove(%p,0x%llx,0x%x, 0x%llx, %p): empty hash, priors: %d",
+ pmap, vaddr, ppn, *pte, pte, pmap_pagetable_corruption_incidents);
}
pvh_e = *pprevh;
pmap_pv_hashlist_walks++;
pmap_pagetable_corruption_action_t pac = pmap_classify_pagetable_corruption(pmap, vaddr, ppnp, pte, ROOT_PRESENT);
if (pac == PMAP_ACTION_ASSERT)
- panic("Possible memory corruption: pmap_pv_remove(%p, 0x%llx, 0x%x, 0x%llx, %p, %p): pv not on hash, head: %p, 0x%llx", pmap, vaddr, ppn, *pte, ppnp, pte, pv_h->pmap, PVE_VA(pv_h));
+ panic("Possible memory corruption: pmap_pv_remove(%p, 0x%llx, 0x%x, 0x%llx, %p, %p): pv not on hash, head: %p, 0x%llx, priors: %d", pmap, vaddr, ppn, *pte, ppnp, pte, pv_h->pmap, PVE_VA(pv_h), pmap_pagetable_corruption_incidents);
else {
UNLOCK_PV_HASH(pvhash_idx);
if (pac == PMAP_ACTION_RETRY_RELOCK) {
}
}
+ *was_altacct = IS_ALTACCT_PAGE(ppn_to_pai(*ppnp), pvh_e);
+
pmap_pv_hashlist_cnts += pv_cnt;
if (pmap_pv_hashlist_max < pv_cnt)
pmap_pv_hashlist_max = pv_cnt;
return pvh_e;
}
+static inline __attribute__((always_inline)) boolean_t
+pmap_pv_is_altacct(
+ pmap_t pmap,
+ vm_map_offset_t vaddr,
+ ppnum_t ppn)
+{
+ pv_hashed_entry_t pvh_e;
+ pv_rooted_entry_t pv_h;
+ int pvhash_idx;
+ boolean_t is_altacct;
+
+ pvh_e = PV_HASHED_ENTRY_NULL;
+ pv_h = pai_to_pvh(ppn_to_pai(ppn));
+
+ if (__improbable(pv_h->pmap == PMAP_NULL)) {
+ return FALSE;
+ }
+
+ if (PVE_VA(pv_h) == vaddr && pv_h->pmap == pmap) {
+ /*
+ * Header is the pv_rooted_entry.
+ */
+ return IS_ALTACCT_PAGE(ppn, pv_h);
+ }
+
+ CHK_NPVHASH();
+ pvhash_idx = pvhashidx(pmap, vaddr);
+ LOCK_PV_HASH(pvhash_idx);
+ pvh_e = *(pvhash(pvhash_idx));
+ if (PV_HASHED_ENTRY_NULL == pvh_e) {
+ panic("Possible memory corruption: pmap_pv_is_altacct(%p,0x%llx,0x%x): empty hash",
+ pmap, vaddr, ppn);
+ }
+ while (PV_HASHED_ENTRY_NULL != pvh_e) {
+ if (pvh_e->pmap == pmap &&
+ PVE_VA(pvh_e) == vaddr &&
+ pvh_e->ppn == ppn)
+ break;
+ pvh_e = pvh_e->nexth;
+ }
+ if (PV_HASHED_ENTRY_NULL == pvh_e) {
+ is_altacct = FALSE;
+ } else {
+ is_altacct = IS_ALTACCT_PAGE(ppn, pvh_e);
+ }
+ UNLOCK_PV_HASH(pvhash_idx);
+
+ return is_altacct;
+}
extern int pt_fake_zone_index;
static inline void
extern boolean_t pmap_initialized;/* Has pmap_init completed? */
#define valid_page(x) (pmap_initialized && pmap_valid_page(x))
-// XXX
-#define HIGH_MEM_BASE ((uint32_t)( -NBPDE) ) /* shared gdt etc seg addr */ /* XXX64 ?? */
-// XXX
-
-
int phys_attribute_test(
ppnum_t phys,
int bits);
} while (!pmap_cmpx_pte(mptep, opte, npte));
}
-#if defined(__x86_64__)
/*
* The single pml4 page per pmap is allocated at pmap create time and exists
* for the duration of the pmap. we allocate this page in kernel vm.
#endif
}
+static inline pml4_entry_t *
+pmap64_user_pml4(pmap_t pmap, vm_map_offset_t vaddr)
+{
+ if (__improbable((vaddr > 0x00007FFFFFFFFFFFULL) &&
+ (vaddr < 0xFFFF800000000000ULL))) {
+ return (NULL);
+ }
+
+#if DEBUG
+ return PHYSMAP_PTOV(&((pml4_entry_t *)pmap->pm_ucr3)[(vaddr >> PML4SHIFT) & (NPML4PG-1)]);
+#else
+ return &pmap->pm_upml4[(vaddr >> PML4SHIFT) & (NPML4PG-1)];
+#endif
+}
+
/*
* Returns address of requested PDPT entry in the physmap.
*/
}
return (NULL);
}
-#endif
+extern void pmap_alias(
+ vm_offset_t ava,
+ vm_map_offset_t start,
+ vm_map_offset_t end,
+ vm_prot_t prot,
+ unsigned int options);
+
#if DEBUG
#define DPRINTF(x...) kprintf(x)
#else