+static inline __attribute__((always_inline)) boolean_t
+pmap_pv_is_altacct(
+ pmap_t pmap,
+ vm_map_offset_t vaddr,
+ ppnum_t ppn)
+{
+ pv_hashed_entry_t pvh_e;
+ pv_rooted_entry_t pv_h;
+ int pvhash_idx;
+ boolean_t is_altacct;
+
+ pvh_e = PV_HASHED_ENTRY_NULL;
+ pv_h = pai_to_pvh(ppn_to_pai(ppn));
+
+ if (__improbable(pv_h->pmap == PMAP_NULL)) {
+ return FALSE;
+ }
+
+ if (PVE_VA(pv_h) == vaddr && pv_h->pmap == pmap) {
+ /*
+ * Header is the pv_rooted_entry.
+ */
+ return IS_ALTACCT_PAGE(ppn, pv_h);
+ }
+
+ CHK_NPVHASH();
+ pvhash_idx = pvhashidx(pmap, vaddr);
+ LOCK_PV_HASH(pvhash_idx);
+ pvh_e = *(pvhash(pvhash_idx));
+ if (PV_HASHED_ENTRY_NULL == pvh_e) {
+ panic("Possible memory corruption: pmap_pv_is_altacct(%p,0x%llx,0x%x): empty hash",
+ pmap, vaddr, ppn);
+ }
+ while (PV_HASHED_ENTRY_NULL != pvh_e) {
+ if (pvh_e->pmap == pmap &&
+ PVE_VA(pvh_e) == vaddr &&
+ pvh_e->ppn == ppn)
+ break;
+ pvh_e = pvh_e->nexth;
+ }
+ if (PV_HASHED_ENTRY_NULL == pvh_e) {
+ is_altacct = FALSE;
+ } else {
+ is_altacct = IS_ALTACCT_PAGE(ppn, pvh_e);
+ }
+ UNLOCK_PV_HASH(pvhash_idx);
+
+ return is_altacct;
+}
+
+extern int pt_fake_zone_index;
+static inline void
+PMAP_ZINFO_PALLOC(pmap_t pmap, vm_size_t bytes)
+{
+ pmap_ledger_credit(pmap, task_ledgers.tkm_private, bytes);
+}
+
+static inline void
+PMAP_ZINFO_PFREE(pmap_t pmap, vm_size_t bytes)
+{
+ pmap_ledger_debit(pmap, task_ledgers.tkm_private, bytes);
+}
+
+static inline void
+PMAP_ZINFO_SALLOC(pmap_t pmap, vm_size_t bytes)
+{
+ pmap_ledger_credit(pmap, task_ledgers.tkm_shared, bytes);
+}
+
+static inline void
+PMAP_ZINFO_SFREE(pmap_t pmap, vm_size_t bytes)
+{
+ pmap_ledger_debit(pmap, task_ledgers.tkm_shared, bytes);
+}
+
+extern boolean_t pmap_initialized;/* Has pmap_init completed? */
+#define valid_page(x) (pmap_initialized && pmap_valid_page(x))
+
+int phys_attribute_test(
+ ppnum_t phys,
+ int bits);
+void phys_attribute_clear(
+ ppnum_t phys,
+ int bits,
+ unsigned int options,
+ void *arg);
+
+//#define PCID_DEBUG 1
+#if PCID_DEBUG
+#define pmap_pcid_log(fmt, args...) \
+ do { \
+ kprintf(fmt, ##args); \
+ printf(fmt, ##args); \
+ } while(0)
+#else
+#define pmap_pcid_log(fmt, args...)
+#endif
+void pmap_pcid_configure(void);
+
+
+/*
+ * Atomic 64-bit compare and exchange of a page table entry.
+ */
+static inline boolean_t
+pmap_cmpx_pte(pt_entry_t *entryp, pt_entry_t old, pt_entry_t new)
+{
+ boolean_t ret;
+
+ /*
+ * Load the old value into %rax
+ * Load the new value into another register
+ * Compare-exchange-quad at address entryp
+ * If the compare succeeds, the new value is stored, return TRUE.
+ * Otherwise, no swap is made, return FALSE.
+ */
+ asm volatile(
+ " lock; cmpxchgq %2,(%3) \n\t"
+ " setz %%al \n\t"
+ " movzbl %%al,%0"
+ : "=a" (ret)
+ : "a" (old),
+ "r" (new),
+ "r" (entryp)
+ : "memory");
+ return ret;
+}
+
+extern uint32_t pmap_update_clear_pte_count;
+
+static inline void pmap_update_pte(pt_entry_t *mptep, uint64_t pclear_bits, uint64_t pset_bits) {
+ pt_entry_t npte, opte;
+ do {
+ opte = *mptep;
+ if (__improbable(opte == 0)) {
+ pmap_update_clear_pte_count++;
+ break;
+ }
+ npte = opte & ~(pclear_bits);
+ npte |= pset_bits;
+ } while (!pmap_cmpx_pte(mptep, opte, npte));
+}
+
+/*
+ * The single pml4 page per pmap is allocated at pmap create time and exists
+ * for the duration of the pmap. we allocate this page in kernel vm.
+ * this returns the address of the requested pml4 entry in the top level page.
+ */
+static inline
+pml4_entry_t *
+pmap64_pml4(pmap_t pmap, vm_map_offset_t vaddr)
+{
+ if (__improbable((vaddr > 0x00007FFFFFFFFFFFULL) &&
+ (vaddr < 0xFFFF800000000000ULL))) {
+ return (NULL);
+ }
+
+#if DEBUG
+ return PHYSMAP_PTOV(&((pml4_entry_t *)pmap->pm_cr3)[(vaddr >> PML4SHIFT) & (NPML4PG-1)]);
+#else
+ return &pmap->pm_pml4[(vaddr >> PML4SHIFT) & (NPML4PG-1)];
+#endif
+}
+
+static inline pml4_entry_t *
+pmap64_user_pml4(pmap_t pmap, vm_map_offset_t vaddr)
+{
+ if (__improbable((vaddr > 0x00007FFFFFFFFFFFULL) &&
+ (vaddr < 0xFFFF800000000000ULL))) {
+ return (NULL);
+ }
+
+#if DEBUG
+ return PHYSMAP_PTOV(&((pml4_entry_t *)pmap->pm_ucr3)[(vaddr >> PML4SHIFT) & (NPML4PG-1)]);
+#else
+ return &pmap->pm_upml4[(vaddr >> PML4SHIFT) & (NPML4PG-1)];
+#endif
+}
+
+/*
+ * Returns address of requested PDPT entry in the physmap.
+ */
+static inline pdpt_entry_t *
+pmap64_pdpt(pmap_t pmap, vm_map_offset_t vaddr)
+{
+ pml4_entry_t newpf;
+ pml4_entry_t *pml4;
+ boolean_t is_ept;
+
+ pml4 = pmap64_pml4(pmap, vaddr);
+ is_ept = is_ept_pmap(pmap);
+
+ if (pml4 && (*pml4 & PTE_VALID_MASK(is_ept))) {
+ newpf = *pml4 & PG_FRAME;
+ return &((pdpt_entry_t *) PHYSMAP_PTOV(newpf))
+ [(vaddr >> PDPTSHIFT) & (NPDPTPG-1)];
+ }
+ return (NULL);
+}
+/*
+ * Returns the address of the requested PDE entry in the physmap.
+ */
+static inline pd_entry_t *
+pmap64_pde(pmap_t pmap, vm_map_offset_t vaddr)
+{
+ pdpt_entry_t newpf;
+ pdpt_entry_t *pdpt;
+ boolean_t is_ept;
+
+ pdpt = pmap64_pdpt(pmap, vaddr);
+ is_ept = is_ept_pmap(pmap);
+
+ if (pdpt && (*pdpt & PTE_VALID_MASK(is_ept))) {
+ newpf = *pdpt & PG_FRAME;
+ return &((pd_entry_t *) PHYSMAP_PTOV(newpf))
+ [(vaddr >> PDSHIFT) & (NPDPG-1)];
+ }
+ return (NULL);
+}
+
+static inline pd_entry_t *
+pmap_pde(pmap_t m, vm_map_offset_t v)
+{
+ pd_entry_t *pde;
+
+ pde = pmap64_pde(m, v);
+
+ return pde;
+}
+
+
+/*
+ * return address of mapped pte for vaddr va in pmap pmap.
+ *
+ * In case the pde maps a superpage, return the pde, which, in this case
+ * is the actual page table entry.
+ */
+static inline pt_entry_t *
+pmap_pte(pmap_t pmap, vm_map_offset_t vaddr)
+{
+ pd_entry_t *pde;
+ pd_entry_t newpf;
+ boolean_t is_ept;
+
+ assert(pmap);
+ pde = pmap64_pde(pmap, vaddr);
+
+ is_ept = is_ept_pmap(pmap);
+
+ if (pde && (*pde & PTE_VALID_MASK(is_ept))) {
+ if (*pde & PTE_PS)
+ return pde;
+ newpf = *pde & PG_FRAME;
+ return &((pt_entry_t *)PHYSMAP_PTOV(newpf))
+ [i386_btop(vaddr) & (ppnum_t)(NPTEPG-1)];
+ }
+ return (NULL);
+}
+extern void pmap_alias(
+ vm_offset_t ava,
+ vm_map_offset_t start,
+ vm_map_offset_t end,
+ vm_prot_t prot,
+ unsigned int options);
+
+#if DEBUG
+#define DPRINTF(x...) kprintf(x)
+#else
+#define DPRINTF(x...)
+#endif
+