#ifndef ASSEMBLER
+#include <stdatomic.h>
+#include <libkern/section_keywords.h>
#include <mach/kern_return.h>
#include <mach/machine/vm_types.h>
+#include <arm/pmap_public.h>
+#include <mach/arm/thread_status.h>
#if __ARM_KERNEL_PROTECT__
/*
#define NBBY 8
struct pmap_cpu_data {
+#if defined(__arm64__)
+ pmap_t cpu_nested_pmap;
+#else
pmap_t cpu_user_pmap;
- unsigned int cpu_number;
unsigned int cpu_user_pmap_stamp;
+#endif
+ unsigned int cpu_number;
+
/*
* This supports overloading of ARM ASIDs by the pmap. The field needs
#include <kern/thread.h>
#include <kern/queue.h>
+
+#include <sys/cdefs.h>
+
/* Base address for low globals. */
#define LOW_GLOBAL_BASE_ADDRESS 0xfffffff000000000ULL
#if defined(__arm64__)
+#define BOOTSTRAP_TABLE_SIZE (ARM_PGBYTES * 8)
+
typedef uint64_t tt_entry_t; /* translation table entry type */
#define TT_ENTRY_NULL ((tt_entry_t *) 0)
typedef uint64_t pt_entry_t; /* page table entry type */
#define PT_ENTRY_NULL ((pt_entry_t *) 0)
-typedef uint64_t pmap_paddr_t; /* physical address (not ppnum_t) */
-
#elif defined(__arm__)
typedef uint32_t tt_entry_t; /* translation table entry type */
typedef uint32_t pt_entry_t; /* page table entry type */
#define TT_ENTRY_NULL ((tt_entry_t *) 0)
-typedef uint32_t pmap_paddr_t; /* physical address (not ppnum_t) */
-
#else
#error unknown arch
#endif
#define NPTES (ARM_PGBYTES / sizeof(pt_entry_t))
#endif
+extern void sync_tlb_flush(void);
+extern void flush_mmu_tlb_async(void);
extern void flush_mmu_tlb(void);
+extern void flush_core_tlb_async(void);
extern void flush_core_tlb(void);
#if defined(__arm64__)
+extern void flush_mmu_tlb_allentries_async(uint64_t, uint64_t);
extern void flush_mmu_tlb_allentries(uint64_t, uint64_t);
+extern void flush_mmu_tlb_entry_async(uint64_t);
extern void flush_mmu_tlb_entry(uint64_t);
+extern void flush_mmu_tlb_entries_async(uint64_t, uint64_t);
extern void flush_mmu_tlb_entries(uint64_t, uint64_t);
+extern void flush_mmu_tlb_asid_async(uint64_t);
extern void flush_mmu_tlb_asid(uint64_t);
+extern void flush_core_tlb_asid_async(uint64_t);
extern void flush_core_tlb_asid(uint64_t);
#define tlbi_addr(x) (((x) >> TLBI_ADDR_SHIFT) & TLBI_ADDR_MASK)
#define tlbi_asid(x) (((uint64_t)x << TLBI_ASID_SHIFT) & TLBI_ASID_MASK)
#else
+extern void flush_mmu_tlb_entry_async(uint32_t);
extern void flush_mmu_tlb_entry(uint32_t);
+extern void flush_mmu_tlb_entries_async(uint32_t, uint32_t);
extern void flush_mmu_tlb_entries(uint32_t, uint32_t);
+extern void flush_mmu_tlb_mva_entries_async(uint32_t);
extern void flush_mmu_tlb_mva_entries(uint32_t);
+extern void flush_mmu_tlb_asid_async(uint32_t);
extern void flush_mmu_tlb_asid(uint32_t);
+extern void flush_core_tlb_asid_async(uint32_t);
extern void flush_core_tlb_asid(uint32_t);
#endif
extern void flush_mmu_tlb_region(vm_offset_t va, unsigned length);
extern void set_mmu_ttb_alternate(uint64_t);
extern uint64_t get_tcr(void);
extern void set_tcr(uint64_t);
+extern uint64_t pmap_get_arm64_prot(pmap_t, vm_offset_t);
#else
extern uint32_t get_mmu_control(void);
extern void set_mmu_control(uint32_t);
#define PMAP_GC_INFLIGHT 1
#define PMAP_GC_WAIT 2
+#if DEVELOPMENT || DEBUG
+#define pmap_cs_log(msg, args...) printf("PMAP_CS: " msg "\n", args)
+#define pmap_cs_log_h(msg, args...) { if(pmap_cs_log_hacks) printf("PMAP_CS: " msg "\n", args); }
+
+#define PMAP_CS_EXCEPTION_LIST_HACK 1
+
+#else
+#define pmap_cs_log(msg, args...)
+#define pmap_cs_log_h(msg, args...)
+#endif /* DEVELOPMENT || DEBUG */
+
+
/*
* Convert translation/page table entry to kernel virtual address
*/
pmap_paddr_t ttep; /* translation table physical */
vm_map_address_t min; /* min address in pmap */
vm_map_address_t max; /* max address in pmap */
- unsigned int asid; /* address space id */
- unsigned int vasid; /* Virtual address space id */
- unsigned int stamp; /* creation stamp */
- unsigned int wired; /* wired bits */
- volatile uint32_t ref_count; /* pmap reference count */
- unsigned int cpu_ref; /* number of cpus using pmap */
- unsigned int gc_status; /* gc status */
- ledger_t ledger; /* ledger tracking phys mappings */
+ ledger_t ledger; /* ledger tracking phys mappings */
decl_simple_lock_data(,lock) /* lock on map */
struct pmap_statistics stats; /* map statistics */
queue_chain_t pmaps; /* global list of pmaps */
tt_entry_t *tt_entry_free; /* free translation table entries */
tt_entry_t *prev_tte; /* previous translation table */
- unsigned int tte_index_max; /* max tte index in translation table entries */
- boolean_t nx_enabled; /* no execute */
- boolean_t nested; /* is nested */
- boolean_t is_64bit; /* is 64bit */
struct pmap *nested_pmap; /* nested pmap */
vm_map_address_t nested_region_grand_addr;
vm_map_address_t nested_region_subord_addr;
vm_map_offset_t nested_region_size;
+ vm_map_offset_t nested_region_true_start;
+ vm_map_offset_t nested_region_true_end;
unsigned int *nested_region_asid_bitmap;
- unsigned int nested_region_asid_bitmap_size;
#if (__ARM_VMSA__ <= 7)
decl_simple_lock_data(,tt1_lock) /* lock on tt1 */
+ unsigned int cpu_ref; /* number of cpus using pmap */
#endif
+
+
+ unsigned int asid; /* address space id */
+ unsigned int vasid; /* Virtual address space id */
+ unsigned int stamp; /* creation stamp */
+ _Atomic int32_t ref_count; /* pmap reference count */
+ unsigned int gc_status; /* gc status */
+ unsigned int nested_region_asid_bitmap_size;
+ unsigned int tte_index_max; /* max tte index in translation table entries */
+ uint32_t nested_no_bounds_refcnt;/* number of pmaps that nested this pmap without bounds set */
+
#if MACH_ASSERT
- boolean_t pmap_stats_assert;
int pmap_pid;
char pmap_procname[17];
+ bool pmap_stats_assert;
#endif /* MACH_ASSERT */
#if DEVELOPMENT || DEBUG
- boolean_t footprint_suspended;
- boolean_t footprint_was_suspended;
+ bool footprint_suspended;
+ bool footprint_was_suspended;
#endif /* DEVELOPMENT || DEBUG */
+ bool nx_enabled; /* no execute */
+ bool nested; /* is nested */
+ bool is_64bit; /* is 64bit */
+ bool nested_has_no_bounds_ref; /* nested a pmap when the bounds were not set */
+ bool nested_bounds_set; /* The nesting bounds have been set */
};
/* typedef struct pmap *pmap_t; */
* WIMG control
*/
#define VM_MEM_INNER 0x10
+#define VM_MEM_RT 0x10 // intentionally alias VM_MEM_INNER; will be used with mutually exclusive caching policies
#define VM_MEM_EARLY_ACK 0x20
#define VM_WIMG_DEFAULT (VM_MEM_COHERENT)
#define VM_WIMG_IO (VM_MEM_COHERENT | VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED)
#define VM_WIMG_POSTED (VM_MEM_COHERENT | VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED | VM_MEM_EARLY_ACK)
#define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED)
-#define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT)
-
+#define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT)
+#define VM_WIMG_RT (VM_WIMG_IO | VM_MEM_RT)
#if VM_DEBUG
extern int pmap_list_resident_pages(
* platform dependent Prototypes
*/
extern void pmap_switch_user_ttb(pmap_t pmap);
+extern void pmap_clear_user_ttb(void);
extern void pmap_bootstrap(vm_offset_t);
extern vm_map_address_t pmap_ptov(pmap_t, ppnum_t);
extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va);
extern void pmap_collect(pmap_t pmap);
extern void pmap_gc(void);
#if defined(__arm64__)
-extern vm_offset_t pmap_extract(pmap_t pmap, vm_map_offset_t va);
+extern vm_offset_t pmap_extract(pmap_t pmap, vm_map_offset_t va);
#endif
/*
copyout(from, to, cnt)
extern pmap_paddr_t kvtophys(vm_offset_t va);
+extern vm_map_address_t phystokv(pmap_paddr_t pa);
+extern vm_map_address_t phystokv_range(pmap_paddr_t pa, vm_size_t *max_len);
extern vm_map_address_t pmap_map(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot, unsigned int flags);
extern vm_map_address_t pmap_map_high_window_bd( vm_offset_t pa, vm_size_t len, vm_prot_t prot);
extern vm_map_address_t pmap_map_bd(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot);
extern void pmap_init_pte_page(pmap_t, pt_entry_t *, vm_offset_t, unsigned int ttlevel, boolean_t alloc_ptd);
-extern void pmap_init_pte_static_page(pmap_t, pt_entry_t *, pmap_paddr_t);
extern boolean_t pmap_valid_address(pmap_paddr_t addr);
extern void pmap_disable_NX(pmap_t pmap);
extern vm_map_offset_t pmap_max_offset(boolean_t is64, unsigned int option);
+extern vm_map_offset_t pmap_max_64bit_offset(unsigned int option);
+extern vm_map_offset_t pmap_max_32bit_offset(unsigned int option);
boolean_t pmap_virtual_region(unsigned int region_select, vm_map_offset_t *startp, vm_map_size_t *size);
#define PMAP_SET_PROCESS_INDEX 27
#define PMAP_SWITCH_INDEX 28
#define PMAP_SWITCH_USER_TTB_INDEX 29
-#define PMAP_UNHINT_KV_ADDR_INDEX 30
+#define PMAP_CLEAR_USER_TTB_INDEX 30
#define PMAP_UNMAP_CPU_WINDOWS_COPY_INDEX 31
#define PMAP_UNNEST_OPTIONS_INDEX 32
#define PMAP_FOOTPRINT_SUSPEND_INDEX 33
#define PMAP_CPU_DATA_INIT_INDEX 34
#define PMAP_RELEASE_PAGES_TO_KERNEL_INDEX 35
+#define PMAP_SET_JIT_ENTITLED_INDEX 36
+
+
+#define PMAP_TRIM_INDEX 64
+#define PMAP_LEDGER_ALLOC_INIT_INDEX 65
+#define PMAP_LEDGER_ALLOC_INDEX 66
+#define PMAP_LEDGER_FREE_INDEX 67
-#define MAX_PMAP_INDEX 36
+#define PMAP_COUNT 68
#define PMAP_INVALID_CPU_NUM (~0U)
+struct pmap_cpu_data_array_entry {
+ pmap_cpu_data_t cpu_data;
+} __attribute__((aligned(1 << L2_CLINE)));
+
/* Initialize the pmap per-CPU data for the current CPU. */
extern void pmap_cpu_data_init(void);
/* Get the pmap per-CPU data for the current CPU. */
extern pmap_cpu_data_t * pmap_get_cpu_data(void);
+
#define MARK_AS_PMAP_TEXT
#define MARK_AS_PMAP_DATA
+#define MARK_AS_PMAP_RODATA
+
+
extern kern_return_t pmap_return(boolean_t do_panic, boolean_t do_recurse);
+#define pmap_force_dcache_clean(va, sz) CleanPoC_DcacheRegion_Force(va, sz)
+#define pmap_simple_lock(l) simple_lock(l)
+#define pmap_simple_unlock(l) simple_unlock(l)
+#define pmap_simple_lock_try(l) simple_lock_try(l)
+#define pmap_lock_bit(l, i) hw_lock_bit(l, i)
+#define pmap_unlock_bit(l, i) hw_unlock_bit(l, i)
+
#endif /* #ifndef ASSEMBLER */
#if __ARM_KERNEL_PROTECT__