+#include <arm/pmap_public.h>
+#include <kern/ast.h>
+#include <mach/arm/thread_status.h>
+#if defined(__arm64__)
+#include <arm64/tlb.h>
+#else
+#include <arm/tlb.h>
+#endif
+
+
+#define ASID_SHIFT (11) /* Shift for 2048 max virtual ASIDs (2048 pmaps) */
+#define MAX_ASIDS (1 << ASID_SHIFT) /* Max supported ASIDs (can be virtual) */
+#ifndef ARM_ASID_SHIFT
+#define ARM_ASID_SHIFT (8) /* Shift for the maximum ARM ASID value (256) */
+#endif
+#define ARM_MAX_ASIDS (1 << ARM_ASID_SHIFT) /* Max ASIDs supported by the hardware */
+#define NBBY 8
+
+#if __ARM_KERNEL_PROTECT__
+#define MAX_HW_ASIDS ((ARM_MAX_ASIDS >> 1) - 1)
+#else
+#define MAX_HW_ASIDS (ARM_MAX_ASIDS - 1)
+#endif
+
+#ifndef ARM_VMID_SHIFT
+#define ARM_VMID_SHIFT (8)
+#endif
+#define ARM_MAX_VMIDS (1 << ARM_VMID_SHIFT)
+
+/* XPRR virtual register map */
+
+#define CPUWINDOWS_MAX 4
+
+#if defined(__arm64__)
+
+#if defined(ARM_LARGE_MEMORY)
+/*
+ * 2 L1 tables (Linear KVA and V=P), plus 2*16 L2 tables map up to (16*64GB) 1TB of DRAM
+ * Upper limit on how many pages can be consumed by bootstrap page tables
+ */
+#define BOOTSTRAP_TABLE_SIZE (ARM_PGBYTES * 34)
+#else // ARM_LARGE_MEMORY
+#define BOOTSTRAP_TABLE_SIZE (ARM_PGBYTES * 8)
+#endif
+
+typedef uint64_t tt_entry_t; /* translation table entry type */
+#define TT_ENTRY_NULL ((tt_entry_t *) 0)
+
+typedef uint64_t pt_entry_t; /* page table entry type */
+#define PT_ENTRY_NULL ((pt_entry_t *) 0)
+
+#elif defined(__arm__)
+
+typedef uint32_t tt_entry_t; /* translation table entry type */
+#define PT_ENTRY_NULL ((pt_entry_t *) 0)
+
+typedef uint32_t pt_entry_t; /* page table entry type */
+#define TT_ENTRY_NULL ((tt_entry_t *) 0)
+
+#else
+#error unknown arch
+#endif
+
+/* Forward declaration of the structure that controls page table
+ * geometry and TTE/PTE format. */
+struct page_table_attr;
+
+/*
+ * pv_entry_t - structure to track the active mappings for a given page
+ */
+typedef struct pv_entry {
+ struct pv_entry *pve_next; /* next alias */
+ pt_entry_t *pve_ptep; /* page table entry */
+}
+#if __arm__ && (__BIGGEST_ALIGNMENT__ > 4)
+/* For the newer ARMv7k ABI where 64-bit types are 64-bit aligned, but pointers
+ * are 32-bit:
+ * Since pt_desc is 64-bit aligned and we cast often from pv_entry to
+ * pt_desc.
+ */
+__attribute__ ((aligned(8))) pv_entry_t;
+#else
+pv_entry_t;
+#endif
+
+typedef struct {
+ pv_entry_t *list;
+ uint32_t count;
+} pv_free_list_t;
+
+struct pmap_cpu_data {
+#if XNU_MONITOR
+ void * ppl_kern_saved_sp;
+ void * ppl_stack;
+ arm_context_t * save_area;
+ unsigned int ppl_state;
+#endif
+#if defined(__arm64__)
+ pmap_t cpu_nested_pmap;
+ const struct page_table_attr *cpu_nested_pmap_attr;
+ vm_map_address_t cpu_nested_region_addr;
+ vm_map_offset_t cpu_nested_region_size;
+#else
+ pmap_t cpu_user_pmap;
+ unsigned int cpu_user_pmap_stamp;
+#endif
+ unsigned int cpu_number;
+ bool copywindow_strong_sync[CPUWINDOWS_MAX];
+ pv_free_list_t pv_free;
+ pv_entry_t *pv_free_tail;
+
+ /*
+ * This supports overloading of ARM ASIDs by the pmap. The field needs
+ * to be wide enough to cover all the virtual bits in a virtual ASID.
+ * With 256 physical ASIDs, 8-bit fields let us support up to 65536
+ * Virtual ASIDs, minus all that would map on to 0 (as 0 is a global
+ * ASID).
+ *
+ * If we were to use bitfield shenanigans here, we could save a bit of
+ * memory by only having enough bits to support MAX_ASIDS. However, such
+ * an implementation would be more error prone.
+ */
+ uint8_t cpu_sw_asids[MAX_HW_ASIDS];
+};
+typedef struct pmap_cpu_data pmap_cpu_data_t;
+