+/* Inform the pmap layer that there is a JIT entry in this map. */
+extern void pmap_set_jit_entitled(pmap_t pmap);
+
+/*
+ * Tell the pmap layer what range within the nested region the VM intends to
+ * use.
+ */
+extern void pmap_trim(pmap_t grand, pmap_t subord, addr64_t vstart, addr64_t nstart, uint64_t size);
+
+/*
+ * Dump page table contents into the specified buffer. Returns the number of
+ * bytes copied, 0 if insufficient space, (size_t)-1 if unsupported.
+ * This is expected to only be called from kernel debugger context,
+ * so synchronization is not required.
+ */
+
+extern size_t pmap_dump_page_tables(pmap_t pmap, void *bufp, void *buf_end);
+
+/*
+ * Indicates if any special policy is applied to this protection by the pmap
+ * layer.
+ */
+bool pmap_has_prot_policy(vm_prot_t prot);
+
+/*
+ * Causes the pmap to return any available pages that it can return cheaply to
+ * the VM.
+ */
+uint64_t pmap_release_pages_fast(void);
+
+#define PMAP_QUERY_PAGE_PRESENT 0x01
+#define PMAP_QUERY_PAGE_REUSABLE 0x02
+#define PMAP_QUERY_PAGE_INTERNAL 0x04
+#define PMAP_QUERY_PAGE_ALTACCT 0x08
+#define PMAP_QUERY_PAGE_COMPRESSED 0x10
+#define PMAP_QUERY_PAGE_COMPRESSED_ALTACCT 0x20
+extern kern_return_t pmap_query_page_info(
+ pmap_t pmap,
+ vm_map_offset_t va,
+ int *disp);
+
+#if CONFIG_PGTRACE
+int pmap_pgtrace_add_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end);
+int pmap_pgtrace_delete_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end);
+kern_return_t pmap_pgtrace_fault(pmap_t pmap, vm_map_offset_t va, arm_saved_state_t *ss);
+#endif
+
+
+#ifdef PLATFORM_BridgeOS
+struct pmap_legacy_trust_cache {
+ struct pmap_legacy_trust_cache *next;
+ uuid_t uuid;
+ uint32_t num_hashes;
+ uint8_t hashes[][CS_CDHASH_LEN];
+};
+#else
+struct pmap_legacy_trust_cache;
+#endif
+
+extern kern_return_t pmap_load_legacy_trust_cache(struct pmap_legacy_trust_cache *trust_cache,
+ const vm_size_t trust_cache_len);
+
+struct pmap_image4_trust_cache {
+ // Filled by pmap layer.
+ struct pmap_image4_trust_cache const *next; // linked list linkage
+ struct trust_cache_module1 const *module; // pointer into module (within data below)
+
+ // Filled by caller.
+ // data is either an image4,
+ // or just the trust cache payload itself if the image4 manifest is external.
+ size_t data_len;
+ uint8_t const data[];
+};
+
+typedef enum {
+ PMAP_TC_SUCCESS = 0,
+ PMAP_TC_UNKNOWN_FORMAT = -1,
+ PMAP_TC_TOO_SMALL_FOR_HEADER = -2,
+ PMAP_TC_TOO_SMALL_FOR_ENTRIES = -3,
+ PMAP_TC_UNKNOWN_VERSION = -4,
+ PMAP_TC_ALREADY_LOADED = -5,
+ PMAP_TC_TOO_BIG = -6,
+ PMAP_TC_RESOURCE_SHORTAGE = -7,
+ PMAP_TC_MANIFEST_TOO_BIG = -8,
+} pmap_tc_ret_t;
+
+extern pmap_tc_ret_t pmap_load_image4_trust_cache(
+ struct pmap_image4_trust_cache *trust_cache, vm_size_t trust_cache_len,
+ uint8_t const *img4_manifest,
+ vm_size_t img4_manifest_buffer_len,
+ vm_size_t img4_manifest_actual_len,
+ bool dry_run);
+
+extern bool pmap_is_trust_cache_loaded(const uuid_t uuid);
+extern uint32_t pmap_lookup_in_static_trust_cache(const uint8_t cdhash[CS_CDHASH_LEN]);
+extern bool pmap_lookup_in_loaded_trust_caches(const uint8_t cdhash[CS_CDHASH_LEN]);
+
+extern bool pmap_in_ppl(void);
+
+extern void *pmap_claim_reserved_ppl_page(void);
+extern void pmap_free_reserved_ppl_page(void *kva);
+
+extern void pmap_ledger_alloc_init(size_t);
+extern ledger_t pmap_ledger_alloc(void);
+extern void pmap_ledger_free(ledger_t);