+
+#if MACH_ASSERT
+extern int pmap_ledgers_panic;
+extern int pmap_ledgers_panic_leeway;
+
+static void
+pmap_check_ledgers(
+ pmap_t pmap)
+{
+ int pid;
+ char *procname;
+
+ if (pmap->pmap_pid == 0) {
+ /*
+ * This pmap was not or is no longer fully associated
+ * with a task (e.g. the old pmap after a fork()/exec() or
+ * spawn()). Its "ledger" still points at a task that is
+ * now using a different (and active) address space, so
+ * we can't check that all the pmap ledgers are balanced here.
+ *
+ * If the "pid" is set, that means that we went through
+ * pmap_set_process() in task_terminate_internal(), so
+ * this task's ledger should not have been re-used and
+ * all the pmap ledgers should be back to 0.
+ */
+ return;
+ }
+
+ pid = pmap->pmap_pid;
+ procname = pmap->pmap_procname;
+
+ vm_map_pmap_check_ledgers(pmap, pmap->ledger, pid, procname);
+
+ if (pmap->stats.resident_count != 0 ||
+#if 35156815
+ /*
+ * "wired_count" is unfortunately a bit inaccurate, so let's
+ * tolerate some slight deviation to limit the amount of
+ * somewhat-spurious assertion failures.
+ */
+ pmap->stats.wired_count > 10 ||
+#else /* 35156815 */
+ pmap->stats.wired_count != 0 ||
+#endif /* 35156815 */
+ pmap->stats.device != 0 ||
+ pmap->stats.internal != 0 ||
+ pmap->stats.external != 0 ||
+ pmap->stats.reusable != 0 ||
+ pmap->stats.compressed != 0) {
+ if (pmap_stats_assert &&
+ pmap->pmap_stats_assert) {
+ panic("pmap_destroy(%p) %d[%s] imbalanced stats: resident=%d wired=%d device=%d internal=%d external=%d reusable=%d compressed=%lld",
+ pmap, pid, procname,
+ pmap->stats.resident_count,
+ pmap->stats.wired_count,
+ pmap->stats.device,
+ pmap->stats.internal,
+ pmap->stats.external,
+ pmap->stats.reusable,
+ pmap->stats.compressed);
+ } else {
+ printf("pmap_destroy(%p) %d[%s] imbalanced stats: resident=%d wired=%d device=%d internal=%d external=%d reusable=%d compressed=%lld",
+ pmap, pid, procname,
+ pmap->stats.resident_count,
+ pmap->stats.wired_count,
+ pmap->stats.device,
+ pmap->stats.internal,
+ pmap->stats.external,
+ pmap->stats.reusable,
+ pmap->stats.compressed);
+ }
+ }
+}
+
+void
+pmap_set_process(
+ pmap_t pmap,
+ int pid,
+ char *procname)
+{
+ if (pmap == NULL) {
+ return;
+ }
+
+ pmap->pmap_pid = pid;
+ strlcpy(pmap->pmap_procname, procname, sizeof(pmap->pmap_procname));
+ if (pmap_ledgers_panic_leeway) {
+ /*
+ * XXX FBDP
+ * Some processes somehow trigger some issues that make
+ * the pmap stats and ledgers go off track, causing
+ * some assertion failures and ledger panics.
+ * Turn off the sanity checks if we allow some ledger leeway
+ * because of that. We'll still do a final check in
+ * pmap_check_ledgers() for discrepancies larger than the
+ * allowed leeway after the address space has been fully
+ * cleaned up.
+ */
+ pmap->pmap_stats_assert = FALSE;
+ ledger_disable_panic_on_negative(pmap->ledger,
+ task_ledgers.phys_footprint);
+ ledger_disable_panic_on_negative(pmap->ledger,
+ task_ledgers.internal);
+ ledger_disable_panic_on_negative(pmap->ledger,
+ task_ledgers.internal_compressed);
+ ledger_disable_panic_on_negative(pmap->ledger,
+ task_ledgers.iokit_mapped);
+ ledger_disable_panic_on_negative(pmap->ledger,
+ task_ledgers.alternate_accounting);
+ ledger_disable_panic_on_negative(pmap->ledger,
+ task_ledgers.alternate_accounting_compressed);
+ }
+}
+#endif /* MACH_ASSERT */
+
+
+#if DEVELOPMENT || DEBUG
+int pmap_pagezero_mitigation = 1;
+#endif
+
+void
+pmap_advise_pagezero_range(pmap_t lpmap, uint64_t low_bound)
+{
+#if DEVELOPMENT || DEBUG
+ if (pmap_pagezero_mitigation == 0) {
+ lpmap->pagezero_accessible = FALSE;
+ return;
+ }
+#endif
+ lpmap->pagezero_accessible = ((pmap_smap_enabled == FALSE) && (low_bound < 0x1000));
+ if (lpmap == current_pmap()) {
+ mp_disable_preemption();
+ current_cpu_datap()->cpu_pagezero_mapped = lpmap->pagezero_accessible;
+ mp_enable_preemption();
+ }
+}
+
+uintptr_t
+pmap_verify_noncacheable(uintptr_t vaddr)
+{
+ pt_entry_t *ptep = NULL;
+ ptep = pmap_pte(kernel_pmap, vaddr);
+ if (ptep == NULL) {
+ panic("pmap_verify_noncacheable: no translation for 0x%lx", vaddr);
+ }
+ /* Non-cacheable OK */
+ if (*ptep & (INTEL_PTE_NCACHE)) {
+ return pte_to_pa(*ptep) | (vaddr & INTEL_OFFMASK);
+ }
+ /* Write-combined OK */
+ if (*ptep & (INTEL_PTE_PAT)) {
+ return pte_to_pa(*ptep) | (vaddr & INTEL_OFFMASK);
+ }
+ panic("pmap_verify_noncacheable: IO read from a cacheable address? address: 0x%lx, PTE: %p, *PTE: 0x%llx", vaddr, ptep, *ptep);
+ /*NOTREACHED*/
+ return 0;
+}
+
+void
+trust_cache_init(void)
+{
+ // Unsupported on this architecture.
+}
+
+kern_return_t
+pmap_load_legacy_trust_cache(struct pmap_legacy_trust_cache __unused *trust_cache,
+ const vm_size_t __unused trust_cache_len)
+{
+ // Unsupported on this architecture.
+ return KERN_NOT_SUPPORTED;
+}
+
+pmap_tc_ret_t
+pmap_load_image4_trust_cache(struct pmap_image4_trust_cache __unused *trust_cache,
+ const vm_size_t __unused trust_cache_len,
+ uint8_t const * __unused img4_manifest,
+ const vm_size_t __unused img4_manifest_buffer_len,
+ const vm_size_t __unused img4_manifest_actual_len,
+ bool __unused dry_run)
+{
+ // Unsupported on this architecture.
+ return PMAP_TC_UNKNOWN_FORMAT;
+}
+
+
+bool
+pmap_is_trust_cache_loaded(const uuid_t __unused uuid)
+{
+ // Unsupported on this architecture.
+ return false;
+}
+
+bool
+pmap_lookup_in_loaded_trust_caches(const uint8_t __unused cdhash[20])
+{
+ // Unsupported on this architecture.
+ return false;
+}
+
+uint32_t
+pmap_lookup_in_static_trust_cache(const uint8_t __unused cdhash[20])
+{
+ // Unsupported on this architecture.
+ return false;
+}
+
+bool
+pmap_in_ppl(void)
+{
+ // Nonexistent on this architecture.
+ return false;
+}
+
+void *
+pmap_claim_reserved_ppl_page(void)
+{
+ // Unsupported on this architecture.
+ return NULL;
+}
+
+void
+pmap_free_reserved_ppl_page(void __unused *kva)
+{
+ // Unsupported on this architecture.
+}