+
+#if MACH_ASSERT
+extern int pmap_ledgers_panic;
+extern int pmap_ledgers_panic_leeway;
+
+static void
+pmap_check_ledgers(
+ pmap_t pmap)
+{
+ ledger_amount_t bal;
+ int pid;
+ char *procname;
+ boolean_t do_panic;
+
+ if (pmap->pmap_pid == 0) {
+ /*
+ * This pmap was not or is no longer fully associated
+ * with a task (e.g. the old pmap after a fork()/exec() or
+ * spawn()). Its "ledger" still points at a task that is
+ * now using a different (and active) address space, so
+ * we can't check that all the pmap ledgers are balanced here.
+ *
+ * If the "pid" is set, that means that we went through
+ * pmap_set_process() in task_terminate_internal(), so
+ * this task's ledger should not have been re-used and
+ * all the pmap ledgers should be back to 0.
+ */
+ return;
+ }
+
+ do_panic = FALSE;
+ pid = pmap->pmap_pid;
+ procname = pmap->pmap_procname;
+
+ pmap_ledgers_drift.num_pmaps_checked++;
+
+#define LEDGER_CHECK_BALANCE(__LEDGER) \
+MACRO_BEGIN \
+ int panic_on_negative = TRUE; \
+ ledger_get_balance(pmap->ledger, \
+ task_ledgers.__LEDGER, \
+ &bal); \
+ ledger_get_panic_on_negative(pmap->ledger, \
+ task_ledgers.__LEDGER, \
+ &panic_on_negative); \
+ if (bal != 0) { \
+ if (panic_on_negative || \
+ (pmap_ledgers_panic && \
+ pmap_ledgers_panic_leeway > 0 && \
+ (bal > (pmap_ledgers_panic_leeway * PAGE_SIZE) || \
+ bal < (pmap_ledgers_panic_leeway * PAGE_SIZE)))) { \
+ do_panic = TRUE; \
+ } \
+ printf("LEDGER BALANCE proc %d (%s) " \
+ "\"%s\" = %lld\n", \
+ pid, procname, #__LEDGER, bal); \
+ if (bal > 0) { \
+ pmap_ledgers_drift.__LEDGER##_over++; \
+ pmap_ledgers_drift.__LEDGER##_over_total += bal; \
+ if (bal > pmap_ledgers_drift.__LEDGER##_over_max) { \
+ pmap_ledgers_drift.__LEDGER##_over_max = bal; \
+ } \
+ } else if (bal < 0) { \
+ pmap_ledgers_drift.__LEDGER##_under++; \
+ pmap_ledgers_drift.__LEDGER##_under_total += bal; \
+ if (bal < pmap_ledgers_drift.__LEDGER##_under_max) { \
+ pmap_ledgers_drift.__LEDGER##_under_max = bal; \
+ } \
+ } \
+ } \
+MACRO_END
+
+ LEDGER_CHECK_BALANCE(phys_footprint);
+ LEDGER_CHECK_BALANCE(internal);
+ LEDGER_CHECK_BALANCE(internal_compressed);
+ LEDGER_CHECK_BALANCE(iokit_mapped);
+ LEDGER_CHECK_BALANCE(alternate_accounting);
+ LEDGER_CHECK_BALANCE(alternate_accounting_compressed);
+ LEDGER_CHECK_BALANCE(page_table);
+ LEDGER_CHECK_BALANCE(purgeable_volatile);
+ LEDGER_CHECK_BALANCE(purgeable_nonvolatile);
+ LEDGER_CHECK_BALANCE(purgeable_volatile_compressed);
+ LEDGER_CHECK_BALANCE(purgeable_nonvolatile_compressed);
+ LEDGER_CHECK_BALANCE(network_volatile);
+ LEDGER_CHECK_BALANCE(network_nonvolatile);
+ LEDGER_CHECK_BALANCE(network_volatile_compressed);
+ LEDGER_CHECK_BALANCE(network_nonvolatile_compressed);
+
+ if (do_panic) {
+ if (pmap_ledgers_panic) {
+ panic("pmap_destroy(%p) %d[%s] has imbalanced ledgers\n",
+ pmap, pid, procname);
+ } else {
+ printf("pmap_destroy(%p) %d[%s] has imbalanced ledgers\n",
+ pmap, pid, procname);
+ }
+ }
+
+ if (pmap->stats.resident_count != 0 ||
+#if 35156815
+ /*
+ * "wired_count" is unfortunately a bit inaccurate, so let's
+ * tolerate some slight deviation to limit the amount of
+ * somewhat-spurious assertion failures.
+ */
+ pmap->stats.wired_count > 10 ||
+#else /* 35156815 */
+ pmap->stats.wired_count != 0 ||
+#endif /* 35156815 */
+ pmap->stats.device != 0 ||
+ pmap->stats.internal != 0 ||
+ pmap->stats.external != 0 ||
+ pmap->stats.reusable != 0 ||
+ pmap->stats.compressed != 0) {
+ if (pmap_stats_assert &&
+ pmap->pmap_stats_assert) {
+ panic("pmap_destroy(%p) %d[%s] imbalanced stats: resident=%d wired=%d device=%d internal=%d external=%d reusable=%d compressed=%lld",
+ pmap, pid, procname,
+ pmap->stats.resident_count,
+ pmap->stats.wired_count,
+ pmap->stats.device,
+ pmap->stats.internal,
+ pmap->stats.external,
+ pmap->stats.reusable,
+ pmap->stats.compressed);
+ } else {
+ printf("pmap_destroy(%p) %d[%s] imbalanced stats: resident=%d wired=%d device=%d internal=%d external=%d reusable=%d compressed=%lld",
+ pmap, pid, procname,
+ pmap->stats.resident_count,
+ pmap->stats.wired_count,
+ pmap->stats.device,
+ pmap->stats.internal,
+ pmap->stats.external,
+ pmap->stats.reusable,
+ pmap->stats.compressed);
+ }
+ }
+}
+
+void
+pmap_set_process(
+ pmap_t pmap,
+ int pid,
+ char *procname)
+{
+ if (pmap == NULL) {
+ return;
+ }
+
+ pmap->pmap_pid = pid;
+ strlcpy(pmap->pmap_procname, procname, sizeof(pmap->pmap_procname));
+ if (pmap_ledgers_panic_leeway) {
+ /*
+ * XXX FBDP
+ * Some processes somehow trigger some issues that make
+ * the pmap stats and ledgers go off track, causing
+ * some assertion failures and ledger panics.
+ * Turn off the sanity checks if we allow some ledger leeway
+ * because of that. We'll still do a final check in
+ * pmap_check_ledgers() for discrepancies larger than the
+ * allowed leeway after the address space has been fully
+ * cleaned up.
+ */
+ pmap->pmap_stats_assert = FALSE;
+ ledger_disable_panic_on_negative(pmap->ledger,
+ task_ledgers.phys_footprint);
+ ledger_disable_panic_on_negative(pmap->ledger,
+ task_ledgers.internal);
+ ledger_disable_panic_on_negative(pmap->ledger,
+ task_ledgers.internal_compressed);
+ ledger_disable_panic_on_negative(pmap->ledger,
+ task_ledgers.iokit_mapped);
+ ledger_disable_panic_on_negative(pmap->ledger,
+ task_ledgers.alternate_accounting);
+ ledger_disable_panic_on_negative(pmap->ledger,
+ task_ledgers.alternate_accounting_compressed);
+ }
+}
+#endif /* MACH_ASSERT */
+
+
+#if DEVELOPMENT || DEBUG
+int pmap_pagezero_mitigation = 1;
+#endif
+
+void
+pmap_advise_pagezero_range(pmap_t lpmap, uint64_t low_bound)
+{
+#if DEVELOPMENT || DEBUG
+ if (pmap_pagezero_mitigation == 0) {
+ lpmap->pagezero_accessible = FALSE;
+ return;
+ }
+#endif
+ lpmap->pagezero_accessible = ((pmap_smap_enabled == FALSE) && (low_bound < 0x1000));
+ if (lpmap == current_pmap()) {
+ mp_disable_preemption();
+ current_cpu_datap()->cpu_pagezero_mapped = lpmap->pagezero_accessible;
+ mp_enable_preemption();
+ }
+}
+
+uintptr_t
+pmap_verify_noncacheable(uintptr_t vaddr)
+{
+ pt_entry_t *ptep = NULL;
+ ptep = pmap_pte(kernel_pmap, vaddr);
+ if (ptep == NULL) {
+ panic("pmap_verify_noncacheable: no translation for 0x%lx", vaddr);
+ }
+ /* Non-cacheable OK */
+ if (*ptep & (INTEL_PTE_NCACHE)) {
+ return pte_to_pa(*ptep) | (vaddr & INTEL_OFFMASK);
+ }
+ /* Write-combined OK */
+ if (*ptep & (INTEL_PTE_PAT)) {
+ return pte_to_pa(*ptep) | (vaddr & INTEL_OFFMASK);
+ }
+ panic("pmap_verify_noncacheable: IO read from a cacheable address? address: 0x%lx, PTE: %p, *PTE: 0x%llx", vaddr, ptep, *ptep);
+ /*NOTREACHED*/
+ return 0;
+}
+
+void
+trust_cache_init(void)
+{
+ // Unsupported on this architecture.
+}
+
+kern_return_t
+pmap_load_legacy_trust_cache(struct pmap_legacy_trust_cache __unused *trust_cache,
+ const vm_size_t __unused trust_cache_len)
+{
+ // Unsupported on this architecture.
+ return KERN_NOT_SUPPORTED;
+}
+
+pmap_tc_ret_t
+pmap_load_image4_trust_cache(struct pmap_image4_trust_cache __unused *trust_cache,
+ const vm_size_t __unused trust_cache_len,
+ uint8_t const * __unused img4_manifest,
+ const vm_size_t __unused img4_manifest_buffer_len,
+ const vm_size_t __unused img4_manifest_actual_len,
+ bool __unused dry_run)
+{
+ // Unsupported on this architecture.
+ return PMAP_TC_UNKNOWN_FORMAT;
+}
+