+
+
+#ifdef CONFIG_XNUPOST
+#ifdef __arm64__
+static volatile bool pmap_test_took_fault = false;
+
+static bool
+pmap_test_fault_handler(arm_saved_state_t * state)
+{
+ bool retval = false;
+ uint32_t esr = get_saved_state_esr(state);
+ esr_exception_class_t class = ESR_EC(esr);
+ fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr));
+
+ if ((class == ESR_EC_DABORT_EL1) &&
+ ((fsc == FSC_PERMISSION_FAULT_L3) || (fsc == FSC_ACCESS_FLAG_FAULT_L3))) {
+ pmap_test_took_fault = true;
+ /* return to the instruction immediately after the call to NX page */
+ set_saved_state_pc(state, get_saved_state_pc(state) + 4);
+ retval = true;
+ }
+
+ return retval;
+}
+
+static bool
+pmap_test_access(pmap_t pmap, vm_map_address_t va, bool should_fault, bool is_write)
+{
+ /*
+ * We're switching pmaps without using the normal thread mechanism;
+ * disable interrupts and preemption to avoid any unexpected memory
+ * accesses.
+ */
+ uint64_t old_int_state = pmap_interrupts_disable();
+ pmap_t old_pmap = current_pmap();
+ mp_disable_preemption();
+ pmap_switch(pmap);
+
+ pmap_test_took_fault = false;
+
+ /* Disable PAN; pmap shouldn't be the kernel pmap. */
+#if __ARM_PAN_AVAILABLE__
+ __builtin_arm_wsr("pan", 0);
+#endif /* __ARM_PAN_AVAILABLE__ */
+ ml_expect_fault_begin(pmap_test_fault_handler, va);
+
+ if (is_write) {
+ *((volatile uint64_t*)(va)) = 0xdec0de;
+ } else {
+ volatile uint64_t tmp = *((volatile uint64_t*)(va));
+ (void)tmp;
+ }
+
+ /* Save the fault bool, and undo the gross stuff we did. */
+ bool took_fault = pmap_test_took_fault;
+ ml_expect_fault_end();
+#if __ARM_PAN_AVAILABLE__
+ __builtin_arm_wsr("pan", 1);
+#endif /* __ARM_PAN_AVAILABLE__ */
+
+ pmap_switch(old_pmap);
+ mp_enable_preemption();
+ pmap_interrupts_restore(old_int_state);
+ bool retval = (took_fault == should_fault);
+ return retval;
+}
+
+static bool
+pmap_test_read(pmap_t pmap, vm_map_address_t va, bool should_fault)
+{
+ bool retval = pmap_test_access(pmap, va, should_fault, false);
+
+ if (!retval) {
+ T_FAIL("%s: %s, "
+ "pmap=%p, va=%p, should_fault=%u",
+ __func__, should_fault ? "did not fault" : "faulted",
+ pmap, (void*)va, (unsigned)should_fault);
+ }
+
+ return retval;
+}
+
+static bool
+pmap_test_write(pmap_t pmap, vm_map_address_t va, bool should_fault)
+{
+ bool retval = pmap_test_access(pmap, va, should_fault, true);
+
+ if (!retval) {
+ T_FAIL("%s: %s, "
+ "pmap=%p, va=%p, should_fault=%u",
+ __func__, should_fault ? "did not fault" : "faulted",
+ pmap, (void*)va, (unsigned)should_fault);
+ }
+
+ return retval;
+}
+
+static bool
+pmap_test_check_refmod(pmap_paddr_t pa, unsigned int should_be_set)
+{
+ unsigned int should_be_clear = (~should_be_set) & (VM_MEM_REFERENCED | VM_MEM_MODIFIED);
+ unsigned int bits = pmap_get_refmod((ppnum_t)atop(pa));
+
+ bool retval = (((bits & should_be_set) == should_be_set) && ((bits & should_be_clear) == 0));
+
+ if (!retval) {
+ T_FAIL("%s: bits=%u, "
+ "pa=%p, should_be_set=%u",
+ __func__, bits,
+ (void*)pa, should_be_set);
+ }
+
+ return retval;
+}
+
+static __attribute__((noinline)) bool
+pmap_test_read_write(pmap_t pmap, vm_map_address_t va, bool allow_read, bool allow_write)
+{
+ bool retval = (pmap_test_read(pmap, va, !allow_read) | pmap_test_write(pmap, va, !allow_write));
+ return retval;
+}
+
+static int
+pmap_test_test_config(unsigned int flags)
+{
+ T_LOG("running pmap_test_test_config flags=0x%X", flags);
+ unsigned int map_count = 0;
+ unsigned long page_ratio = 0;
+ pmap_t pmap = pmap_create_options(NULL, 0, flags);
+
+ if (!pmap) {
+ panic("Failed to allocate pmap");
+ }
+
+ __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
+ uintptr_t native_page_size = pt_attr_page_size(native_pt_attr);
+ uintptr_t pmap_page_size = pt_attr_page_size(pt_attr);
+ uintptr_t pmap_twig_size = pt_attr_twig_size(pt_attr);
+
+ if (pmap_page_size <= native_page_size) {
+ page_ratio = native_page_size / pmap_page_size;
+ } else {
+ /*
+ * We claim to support a page_ratio of less than 1, which is
+ * not currently supported by the pmap layer; panic.
+ */
+ panic("%s: page_ratio < 1, native_page_size=%lu, pmap_page_size=%lu"
+ "flags=%u",
+ __func__, native_page_size, pmap_page_size,
+ flags);
+ }
+
+ if (PAGE_RATIO > 1) {
+ /*
+ * The kernel is deliberately pretending to have 16KB pages.
+ * The pmap layer has code that supports this, so pretend the
+ * page size is larger than it is.
+ */
+ pmap_page_size = PAGE_SIZE;
+ native_page_size = PAGE_SIZE;
+ }
+
+ /*
+ * Get two pages from the VM; one to be mapped wired, and one to be
+ * mapped nonwired.
+ */
+ vm_page_t unwired_vm_page = vm_page_grab();
+ vm_page_t wired_vm_page = vm_page_grab();
+
+ if ((unwired_vm_page == VM_PAGE_NULL) || (wired_vm_page == VM_PAGE_NULL)) {
+ panic("Failed to grab VM pages");
+ }
+
+ ppnum_t pn = VM_PAGE_GET_PHYS_PAGE(unwired_vm_page);
+ ppnum_t wired_pn = VM_PAGE_GET_PHYS_PAGE(wired_vm_page);
+
+ pmap_paddr_t pa = ptoa(pn);
+ pmap_paddr_t wired_pa = ptoa(wired_pn);
+
+ /*
+ * We'll start mappings at the second twig TT. This keeps us from only
+ * using the first entry in each TT, which would trivially be address
+ * 0; one of the things we will need to test is retrieving the VA for
+ * a given PTE.
+ */
+ vm_map_address_t va_base = pmap_twig_size;
+ vm_map_address_t wired_va_base = ((2 * pmap_twig_size) - pmap_page_size);
+
+ if (wired_va_base < (va_base + (page_ratio * pmap_page_size))) {
+ /*
+ * Not exactly a functional failure, but this test relies on
+ * there being a spare PTE slot we can use to pin the TT.
+ */
+ panic("Cannot pin translation table");
+ }
+
+ /*
+ * Create the wired mapping; this will prevent the pmap layer from
+ * reclaiming our test TTs, which would interfere with this test
+ * ("interfere" -> "make it panic").
+ */
+ pmap_enter_addr(pmap, wired_va_base, wired_pa, VM_PROT_READ, VM_PROT_READ, 0, true);
+
+ /*
+ * Create read-only mappings of the nonwired page; if the pmap does
+ * not use the same page size as the kernel, create multiple mappings
+ * so that the kernel page is fully mapped.
+ */
+ for (map_count = 0; map_count < page_ratio; map_count++) {
+ pmap_enter_addr(pmap, va_base + (pmap_page_size * map_count), pa + (pmap_page_size * (map_count)), VM_PROT_READ, VM_PROT_READ, 0, false);
+ }
+
+ /* Validate that all the PTEs have the expected PA and VA. */
+ for (map_count = 0; map_count < page_ratio; map_count++) {
+ pt_entry_t * ptep = pmap_pte(pmap, va_base + (pmap_page_size * map_count));
+
+ if (pte_to_pa(*ptep) != (pa + (pmap_page_size * map_count))) {
+ T_FAIL("Unexpected pa=%p, expected %p, map_count=%u",
+ (void*)pte_to_pa(*ptep), (void*)(pa + (pmap_page_size * map_count)), map_count);
+ }
+
+ if (ptep_get_va(ptep) != (va_base + (pmap_page_size * map_count))) {
+ T_FAIL("Unexpected va=%p, expected %p, map_count=%u",
+ (void*)ptep_get_va(ptep), (void*)(va_base + (pmap_page_size * map_count)), map_count);
+ }
+ }
+
+ T_LOG("Validate that reads to our mapping do not fault.");
+ pmap_test_read(pmap, va_base, false);
+
+ T_LOG("Validate that writes to our mapping fault.");
+ pmap_test_write(pmap, va_base, true);
+
+ T_LOG("Make the first mapping writable.");
+ pmap_enter_addr(pmap, va_base, pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE, 0, false);
+
+ T_LOG("Validate that writes to our mapping do not fault.");
+ pmap_test_write(pmap, va_base, false);
+
+
+ T_LOG("Make the first mapping XO.");
+ pmap_enter_addr(pmap, va_base, pa, VM_PROT_EXECUTE, VM_PROT_EXECUTE, 0, false);
+
+ T_LOG("Validate that reads to our mapping do not fault.");
+ pmap_test_read(pmap, va_base, false);
+
+ T_LOG("Validate that writes to our mapping fault.");
+ pmap_test_write(pmap, va_base, true);
+
+
+ /*
+ * For page ratios of greater than 1: validate that writes to the other
+ * mappings still fault. Remove the mappings afterwards (we're done
+ * with page ratio testing).
+ */
+ for (map_count = 1; map_count < page_ratio; map_count++) {
+ pmap_test_write(pmap, va_base + (pmap_page_size * map_count), true);
+ pmap_remove(pmap, va_base + (pmap_page_size * map_count), va_base + (pmap_page_size * map_count) + pmap_page_size);
+ }
+
+ T_LOG("Mark the page unreferenced and unmodified.");
+ pmap_clear_refmod(pn, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
+ pmap_test_check_refmod(pa, 0);
+
+ /*
+ * Begin testing the ref/mod state machine. Re-enter the mapping with
+ * different protection/fault_type settings, and confirm that the
+ * ref/mod state matches our expectations at each step.
+ */
+ T_LOG("!ref/!mod: read, no fault. Expect ref/!mod");
+ pmap_enter_addr(pmap, va_base, pa, VM_PROT_READ, VM_PROT_NONE, 0, false);
+ pmap_test_check_refmod(pa, VM_MEM_REFERENCED);
+
+ T_LOG("!ref/!mod: read, read fault. Expect ref/!mod");
+ pmap_clear_refmod(pn, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
+ pmap_enter_addr(pmap, va_base, pa, VM_PROT_READ, VM_PROT_READ, 0, false);
+ pmap_test_check_refmod(pa, VM_MEM_REFERENCED);
+
+ T_LOG("!ref/!mod: rw, read fault. Expect ref/!mod");
+ pmap_clear_refmod(pn, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
+ pmap_enter_addr(pmap, va_base, pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, false);
+ pmap_test_check_refmod(pa, VM_MEM_REFERENCED);
+
+ T_LOG("ref/!mod: rw, read fault. Expect ref/!mod");
+ pmap_enter_addr(pmap, va_base, pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ, 0, false);
+ pmap_test_check_refmod(pa, VM_MEM_REFERENCED);
+
+ T_LOG("!ref/!mod: rw, rw fault. Expect ref/mod");
+ pmap_clear_refmod(pn, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
+ pmap_enter_addr(pmap, va_base, pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE, 0, false);
+ pmap_test_check_refmod(pa, VM_MEM_REFERENCED | VM_MEM_MODIFIED);
+
+ /*
+ * Shared memory testing; we'll have two mappings; one read-only,
+ * one read-write.
+ */
+ vm_map_address_t rw_base = va_base;
+ vm_map_address_t ro_base = va_base + pmap_page_size;
+
+ pmap_enter_addr(pmap, rw_base, pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE, 0, false);
+ pmap_enter_addr(pmap, ro_base, pa, VM_PROT_READ, VM_PROT_READ, 0, false);
+
+ /*
+ * Test that we take faults as expected for unreferenced/unmodified
+ * pages. Also test the arm_fast_fault interface, to ensure that
+ * mapping permissions change as expected.
+ */
+ T_LOG("!ref/!mod: expect no access");
+ pmap_clear_refmod(pn, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
+ pmap_test_read_write(pmap, ro_base, false, false);
+ pmap_test_read_write(pmap, rw_base, false, false);
+
+ T_LOG("Read fault; expect !ref/!mod -> ref/!mod, read access");
+ arm_fast_fault(pmap, rw_base, VM_PROT_READ, false, false);
+ pmap_test_check_refmod(pa, VM_MEM_REFERENCED);
+ pmap_test_read_write(pmap, ro_base, true, false);
+ pmap_test_read_write(pmap, rw_base, true, false);
+
+ T_LOG("Write fault; expect ref/!mod -> ref/mod, read and write access");
+ arm_fast_fault(pmap, rw_base, VM_PROT_READ | VM_PROT_WRITE, false, false);
+ pmap_test_check_refmod(pa, VM_MEM_REFERENCED | VM_MEM_MODIFIED);
+ pmap_test_read_write(pmap, ro_base, true, false);
+ pmap_test_read_write(pmap, rw_base, true, true);
+
+ T_LOG("Write fault; expect !ref/!mod -> ref/mod, read and write access");
+ pmap_clear_refmod(pn, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
+ arm_fast_fault(pmap, rw_base, VM_PROT_READ | VM_PROT_WRITE, false, false);
+ pmap_test_check_refmod(pa, VM_MEM_REFERENCED | VM_MEM_MODIFIED);
+ pmap_test_read_write(pmap, ro_base, true, false);
+ pmap_test_read_write(pmap, rw_base, true, true);
+
+ T_LOG("RW protect both mappings; should not change protections.");
+ pmap_protect(pmap, ro_base, ro_base + pmap_page_size, VM_PROT_READ | VM_PROT_WRITE);
+ pmap_protect(pmap, rw_base, rw_base + pmap_page_size, VM_PROT_READ | VM_PROT_WRITE);
+ pmap_test_read_write(pmap, ro_base, true, false);
+ pmap_test_read_write(pmap, rw_base, true, true);
+
+ T_LOG("Read protect both mappings; RW mapping should become RO.");
+ pmap_protect(pmap, ro_base, ro_base + pmap_page_size, VM_PROT_READ);
+ pmap_protect(pmap, rw_base, rw_base + pmap_page_size, VM_PROT_READ);
+ pmap_test_read_write(pmap, ro_base, true, false);
+ pmap_test_read_write(pmap, rw_base, true, false);
+
+ T_LOG("RW protect the page; mappings should not change protections.");
+ pmap_enter_addr(pmap, rw_base, pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE, 0, false);
+ pmap_page_protect(pn, VM_PROT_ALL);
+ pmap_test_read_write(pmap, ro_base, true, false);
+ pmap_test_read_write(pmap, rw_base, true, true);
+
+ T_LOG("Read protect the page; RW mapping should become RO.");
+ pmap_page_protect(pn, VM_PROT_READ);
+ pmap_test_read_write(pmap, ro_base, true, false);
+ pmap_test_read_write(pmap, rw_base, true, false);
+
+ T_LOG("Validate that disconnect removes all known mappings of the page.");
+ pmap_disconnect(pn);
+ if (!pmap_verify_free(pn)) {
+ T_FAIL("Page still has mappings");
+ }
+
+ T_LOG("Remove the wired mapping, so we can tear down the test map.");
+ pmap_remove(pmap, wired_va_base, wired_va_base + pmap_page_size);
+ pmap_destroy(pmap);
+
+ T_LOG("Release the pages back to the VM.");
+ vm_page_lock_queues();
+ vm_page_free(unwired_vm_page);
+ vm_page_free(wired_vm_page);
+ vm_page_unlock_queues();
+
+ T_LOG("Testing successful!");
+ return 0;
+}
+#endif /* __arm64__ */
+
+kern_return_t
+pmap_test(void)
+{
+ T_LOG("Starting pmap_tests");
+#ifdef __arm64__
+ int flags = 0;
+ flags |= PMAP_CREATE_64BIT;
+
+#if __ARM_MIXED_PAGE_SIZE__
+ T_LOG("Testing VM_PAGE_SIZE_4KB");
+ pmap_test_test_config(flags | PMAP_CREATE_FORCE_4K_PAGES);
+ T_LOG("Testing VM_PAGE_SIZE_16KB");
+ pmap_test_test_config(flags);
+#else /* __ARM_MIXED_PAGE_SIZE__ */
+ pmap_test_test_config(flags);
+#endif /* __ARM_MIXED_PAGE_SIZE__ */
+
+#endif /* __arm64__ */
+ T_PASS("completed pmap_test successfully");
+ return KERN_SUCCESS;
+}
+#endif /* CONFIG_XNUPOST */
+
+/*
+ * The following function should never make it to RELEASE code, since
+ * it provides a way to get the PPL to modify text pages.
+ */
+#if DEVELOPMENT || DEBUG
+
+#define ARM_UNDEFINED_INSN 0xe7f000f0
+#define ARM_UNDEFINED_INSN_THUMB 0xde00
+
+/**
+ * Forcibly overwrite executable text with an illegal instruction.
+ *
+ * @note Only used for xnu unit testing.
+ *
+ * @param pa The physical address to corrupt.
+ *
+ * @return KERN_SUCCESS on success.
+ */
+kern_return_t
+pmap_test_text_corruption(pmap_paddr_t pa)
+{
+#if XNU_MONITOR
+ return pmap_test_text_corruption_ppl(pa);
+#else /* XNU_MONITOR */
+ return pmap_test_text_corruption_internal(pa);
+#endif /* XNU_MONITOR */
+}
+
+MARK_AS_PMAP_TEXT kern_return_t
+pmap_test_text_corruption_internal(pmap_paddr_t pa)
+{
+ vm_offset_t va = phystokv(pa);
+ unsigned int pai = pa_index(pa);
+
+ assert(pa_valid(pa));
+
+ LOCK_PVH(pai);
+
+ pv_entry_t **pv_h = pai_to_pvh(pai);
+ assert(!pvh_test_type(pv_h, PVH_TYPE_NULL));
+#if defined(PVH_FLAG_EXEC)
+ const bool need_ap_twiddle = pvh_get_flags(pv_h) & PVH_FLAG_EXEC;
+
+ if (need_ap_twiddle) {
+ pmap_set_ptov_ap(pai, AP_RWNA, FALSE);
+ }
+#endif /* defined(PVH_FLAG_EXEC) */
+
+ /*
+ * The low bit in an instruction address indicates a THUMB instruction
+ */
+ if (va & 1) {
+ va &= ~(vm_offset_t)1;
+ *(uint16_t *)va = ARM_UNDEFINED_INSN_THUMB;
+ } else {
+ *(uint32_t *)va = ARM_UNDEFINED_INSN;
+ }
+
+#if defined(PVH_FLAG_EXEC)
+ if (need_ap_twiddle) {
+ pmap_set_ptov_ap(pai, AP_RONA, FALSE);
+ }
+#endif /* defined(PVH_FLAG_EXEC) */
+
+ InvalidatePoU_IcacheRegion(va, sizeof(uint32_t));
+
+ UNLOCK_PVH(pai);
+
+ return KERN_SUCCESS;
+}
+
+#endif /* DEVELOPMENT || DEBUG */