+#define INTEL_PTE_VALID 0x00000001ULL
+
+#define INTEL_PTE_WRITE 0x00000002ULL
+#define INTEL_PTE_RW 0x00000002ULL
+
+#define INTEL_PTE_USER 0x00000004ULL
+
+#define INTEL_PTE_WTHRU 0x00000008ULL
+#define INTEL_PTE_NCACHE 0x00000010ULL
+
+#define INTEL_PTE_REF 0x00000020ULL
+#define INTEL_PTE_MOD 0x00000040ULL
+
+#define INTEL_PTE_PS 0x00000080ULL
+#define INTEL_PTE_PAT 0x00000080ULL
+
+#define INTEL_PTE_GLOBAL 0x00000100ULL
+
+/* These markers use software available bits ignored by the
+ * processor's 4-level and EPT pagetable walkers.
+ * N.B.: WIRED was originally bit 10, but that conflicts with
+ * execute permissions for EPT entries iff mode-based execute controls
+ * are enabled.
+ */
+#define INTEL_PTE_SWLOCK (0x1ULL << 52)
+#define INTEL_PDPTE_NESTED (0x1ULL << 53)
+#define INTEL_PTE_WIRED (0x1ULL << 54)
+/* TODO: Compressed markers, potential conflict with protection keys? */
+#define INTEL_PTE_COMPRESSED_ALT (1ULL << 61) /* compressed but with "alternate accounting" */
+#define INTEL_PTE_COMPRESSED (1ULL << 62) /* marker, for invalid PTE only -- ignored by hardware for both regular/EPT entries*/
+
+#define INTEL_PTE_PFN PG_FRAME
+/* TODO: these should be internal definitions */
+#define INTEL_PTE_NX (1ULL << 63)
+
+#define INTEL_PTE_INVALID 0
+/* This is conservative, but suffices */
+#define INTEL_PTE_RSVD ((1ULL << 10) | (1ULL << 11))
+
+
+#define INTEL_PTE_COMPRESSED_MASK (INTEL_PTE_COMPRESSED | \
+ INTEL_PTE_COMPRESSED_ALT | INTEL_PTE_SWLOCK)
+#define PTE_IS_COMPRESSED(x, ptep, pmap, vaddr) \
+ ((((x) & INTEL_PTE_VALID) == 0) && /* PTE is not valid... */ \
+ ((x) & INTEL_PTE_COMPRESSED) && /* ...has "compressed" marker" */ \
+ ((!((x) & ~INTEL_PTE_COMPRESSED_MASK)) || /* ...no other bits */ \
+ pmap_compressed_pte_corruption_repair((x), &(x), (ptep), (pmap), (vaddr))))
+
+#define pa_to_pte(a) ((a) & INTEL_PTE_PFN) /* XXX */
+#define pte_to_pa(p) ((p) & INTEL_PTE_PFN) /* XXX */
+#define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1)
+
+#define pte_kernel_rw(p) ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID|INTEL_PTE_RW))
+#define pte_kernel_ro(p) ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID))
+#define pte_user_rw(p) ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID|INTEL_PTE_USER|INTEL_PTE_RW))
+#define pte_user_ro(p) ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID|INTEL_PTE_USER))
+
+#define PMAP_INVEPT_SINGLE_CONTEXT 1
+
+
+#define INTEL_EPTP_AD 0x00000040ULL
+
+#define INTEL_EPT_READ 0x00000001ULL
+#define INTEL_EPT_WRITE 0x00000002ULL
+#define INTEL_EPT_EX 0x00000004ULL
+#define INTEL_EPT_IPAT 0x00000040ULL
+#define INTEL_EPT_PS 0x00000080ULL
+#define INTEL_EPT_REF 0x00000100ULL
+#define INTEL_EPT_MOD 0x00000200ULL
+
+#define INTEL_EPT_CACHE_MASK 0x00000038ULL
+#define INTEL_EPT_NCACHE 0x00000000ULL
+#define INTEL_EPT_WC 0x00000008ULL
+#define INTEL_EPT_WTHRU 0x00000020ULL
+#define INTEL_EPT_WP 0x00000028ULL
+#define INTEL_EPT_WB 0x00000030ULL
+
+/*
+ * Routines to filter correct bits depending on the pmap type
+ */
+
+static inline pt_entry_t
+pte_remove_ex(pt_entry_t pte, boolean_t is_ept)
+{
+ if (__probable(!is_ept)) {
+ return pte | INTEL_PTE_NX;
+ }
+
+ return pte & (~INTEL_EPT_EX);
+}
+
+static inline pt_entry_t
+pte_set_ex(pt_entry_t pte, boolean_t is_ept)
+{
+ if (__probable(!is_ept)) {
+ return pte & (~INTEL_PTE_NX);
+ }
+
+ return pte | INTEL_EPT_EX;
+}
+
+static inline pt_entry_t
+physmap_refmod_to_ept(pt_entry_t physmap_pte)
+{
+ pt_entry_t ept_pte = 0;
+
+ if (physmap_pte & INTEL_PTE_MOD) {
+ ept_pte |= INTEL_EPT_MOD;
+ }
+
+ if (physmap_pte & INTEL_PTE_REF) {
+ ept_pte |= INTEL_EPT_REF;
+ }
+
+ return ept_pte;
+}
+
+static inline pt_entry_t
+ept_refmod_to_physmap(pt_entry_t ept_pte)
+{
+ pt_entry_t physmap_pte = 0;
+
+ assert((ept_pte & ~(INTEL_EPT_REF | INTEL_EPT_MOD)) == 0);
+
+ if (ept_pte & INTEL_EPT_REF) {
+ physmap_pte |= INTEL_PTE_REF;
+ }
+
+ if (ept_pte & INTEL_EPT_MOD) {
+ physmap_pte |= INTEL_PTE_MOD;
+ }
+
+ return physmap_pte;
+}
+
+/*
+ * Note: Not all Intel processors support EPT referenced access and dirty bits.
+ * During pmap_init() we check the VMX capability for the current hardware
+ * and update this variable accordingly.
+ */
+extern boolean_t pmap_ept_support_ad;
+
+#define PTE_VALID_MASK(is_ept) ((is_ept) ? (INTEL_EPT_READ | INTEL_EPT_WRITE | INTEL_EPT_EX) : INTEL_PTE_VALID)
+#define PTE_READ(is_ept) ((is_ept) ? INTEL_EPT_READ : INTEL_PTE_VALID)
+#define PTE_WRITE(is_ept) ((is_ept) ? INTEL_EPT_WRITE : INTEL_PTE_WRITE)
+#define PTE_IS_EXECUTABLE(is_ept, pte) ((is_ept) ? (((pte) & INTEL_EPT_EX) != 0) : (((pte) & INTEL_PTE_NX) == 0))
+#define PTE_PS INTEL_PTE_PS
+#define PTE_COMPRESSED INTEL_PTE_COMPRESSED
+#define PTE_COMPRESSED_ALT INTEL_PTE_COMPRESSED_ALT
+#define PTE_NCACHE(is_ept) ((is_ept) ? INTEL_EPT_NCACHE : INTEL_PTE_NCACHE)
+#define PTE_WTHRU(is_ept) ((is_ept) ? INTEL_EPT_WTHRU : INTEL_PTE_WTHRU)
+#define PTE_REF(is_ept) ((is_ept) ? INTEL_EPT_REF : INTEL_PTE_REF)
+#define PTE_MOD(is_ept) ((is_ept) ? INTEL_EPT_MOD : INTEL_PTE_MOD)
+#define PTE_WIRED INTEL_PTE_WIRED
+
+
+#define PMAP_DEFAULT_CACHE 0
+#define PMAP_INHIBIT_CACHE 1
+#define PMAP_GUARDED_CACHE 2
+#define PMAP_ACTIVATE_CACHE 4
+#define PMAP_NO_GUARD_CACHE 8
+
+/* Per-pmap ledger operations */
+#define pmap_ledger_debit(p, e, a) ledger_debit((p)->ledger, e, a)
+#define pmap_ledger_credit(p, e, a) ledger_credit((p)->ledger, e, a)
+
+#ifndef ASSEMBLER