+#if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST)
+SECURITY_READ_ONLY_LATE(uint64_t) ctrr_ro_test;
+uint64_t ctrr_nx_test = 0xd65f03c0; /* RET */
+volatile uint64_t ctrr_exception_esr;
+vm_offset_t ctrr_test_va;
+vm_offset_t ctrr_test_page;
+
+kern_return_t
+ctrr_test(void)
+{
+ processor_t p;
+ boolean_t ctrr_disable = FALSE;
+
+ PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable, sizeof(ctrr_disable));
+
+#if CONFIG_CSR_FROM_DT
+ if (csr_unsafe_kernel_text) {
+ ctrr_disable = TRUE;
+ }
+#endif /* CONFIG_CSR_FROM_DT */
+
+ if (ctrr_disable) {
+ T_LOG("Skipping CTRR test when -unsafe_kernel_text boot-arg present");
+ return KERN_SUCCESS;
+ }
+
+ T_LOG("Running CTRR test.");
+
+ for (p = processor_list; p != NULL; p = p->processor_list) {
+ thread_bind(p);
+ thread_block(THREAD_CONTINUE_NULL);
+ T_LOG("Running CTRR test on cpu %d\n", p->cpu_id);
+ ctrr_test_cpu();
+ }
+
+ /* unbind thread from specific cpu */
+ thread_bind(PROCESSOR_NULL);
+ thread_block(THREAD_CONTINUE_NULL);
+
+ return KERN_SUCCESS;
+}
+
+static bool
+ctrr_test_ro_fault_handler(arm_saved_state_t * state)
+{
+ bool retval = false;
+ uint32_t esr = get_saved_state_esr(state);
+ esr_exception_class_t class = ESR_EC(esr);
+ fault_status_t fsc = ISS_DA_FSC(ESR_ISS(esr));
+
+ if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) {
+ ctrr_exception_esr = esr;
+ add_saved_state_pc(state, 4);
+ retval = true;
+ }
+
+ return retval;
+}
+
+static bool
+ctrr_test_nx_fault_handler(arm_saved_state_t * state)
+{
+ bool retval = false;
+ uint32_t esr = get_saved_state_esr(state);
+ esr_exception_class_t class = ESR_EC(esr);
+ fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr));
+
+ if ((class == ESR_EC_IABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) {
+ ctrr_exception_esr = esr;
+ /* return to the instruction immediately after the call to NX page */
+ set_saved_state_pc(state, get_saved_state_lr(state));
+ retval = true;
+ }
+
+ return retval;
+}
+
+/* test CTRR on a cpu, caller to bind thread to desired cpu */
+/* ctrr_test_page was reserved during bootstrap process */
+kern_return_t
+ctrr_test_cpu(void)
+{
+ ppnum_t ro_pn, nx_pn;
+ uint64_t *ctrr_ro_test_ptr;
+ void (*ctrr_nx_test_ptr)(void);
+ kern_return_t kr;
+ uint64_t prot = 0;
+ extern vm_offset_t virtual_space_start;
+
+ /* ctrr read only region = [rorgn_begin_va, rorgn_end_va) */
+
+ vm_offset_t rorgn_begin_va = phystokv(ctrr_begin);
+ vm_offset_t rorgn_end_va = phystokv(ctrr_end) + 1;
+ vm_offset_t ro_test_va = (vm_offset_t)&ctrr_ro_test;
+ vm_offset_t nx_test_va = (vm_offset_t)&ctrr_nx_test;
+
+ T_EXPECT(rorgn_begin_va <= ro_test_va && ro_test_va < rorgn_end_va, "Expect ro_test_va to be inside the CTRR region");
+ T_EXPECT((nx_test_va < rorgn_begin_va) ^ (nx_test_va >= rorgn_end_va), "Expect nx_test_va to be outside the CTRR region");
+
+ ro_pn = pmap_find_phys(kernel_pmap, ro_test_va);
+ nx_pn = pmap_find_phys(kernel_pmap, nx_test_va);
+ T_EXPECT(ro_pn && nx_pn, "Expect ro page number and nx page number to be non zero");
+
+ T_LOG("test virtual page: %p, ctrr_ro_test: %p, ctrr_nx_test: %p, ro_pn: %x, nx_pn: %x ",
+ (void *)ctrr_test_page, &ctrr_ro_test, &ctrr_nx_test, ro_pn, nx_pn);
+
+ prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
+ T_EXPECT(~prot & ARM_TTE_VALID, "Expect ctrr_test_page to be unmapped");
+
+ T_LOG("Read only region test mapping virtual page %p to CTRR RO page number %d", ctrr_test_page, ro_pn);
+ kr = pmap_enter(kernel_pmap, ctrr_test_page, ro_pn,
+ VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
+ T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RW mapping to succeed");
+
+ // assert entire mmu prot path (Hierarchical protection model) is NOT RO
+ // fetch effective block level protections from table/block entries
+ prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
+ T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RWNA && (prot & ARM_PTE_PNX), "Mapping is EL1 RWNX");
+
+ ctrr_test_va = ctrr_test_page + (ro_test_va & PAGE_MASK);
+ ctrr_ro_test_ptr = (void *)ctrr_test_va;
+
+ T_LOG("Read only region test writing to %p to provoke data abort", ctrr_ro_test_ptr);
+
+ // should cause data abort
+ ml_expect_fault_begin(ctrr_test_ro_fault_handler, ctrr_test_va);
+ *ctrr_ro_test_ptr = 1;
+ ml_expect_fault_end();
+
+ // ensure write permission fault at expected level
+ // data abort handler will set ctrr_exception_esr when ctrr_test_va takes a permission fault
+
+ T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_DABORT_EL1, "Data Abort from EL1 expected");
+ T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
+ T_EXPECT(ESR_ISS(ctrr_exception_esr) & ISS_DA_WNR, "Write Fault Expected");
+
+ ctrr_test_va = 0;
+ ctrr_exception_esr = 0;
+ pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
+
+ T_LOG("No execute test mapping virtual page %p to CTRR PXN page number %d", ctrr_test_page, nx_pn);
+
+ kr = pmap_enter(kernel_pmap, ctrr_test_page, nx_pn,
+ VM_PROT_READ | VM_PROT_EXECUTE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
+ T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RX mapping to succeed");
+
+ // assert entire mmu prot path (Hierarchical protection model) is NOT XN
+ prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
+ T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RONA && (~prot & ARM_PTE_PNX), "Mapping is EL1 ROX");
+
+ ctrr_test_va = ctrr_test_page + (nx_test_va & PAGE_MASK);
+#if __has_feature(ptrauth_calls)
+ ctrr_nx_test_ptr = ptrauth_sign_unauthenticated((void *)ctrr_test_va, ptrauth_key_function_pointer, 0);
+#else
+ ctrr_nx_test_ptr = (void *)ctrr_test_va;
+#endif
+
+ T_LOG("No execute test calling ctrr_nx_test_ptr(): %p to provoke instruction abort", ctrr_nx_test_ptr);
+
+ // should cause prefetch abort
+ ml_expect_fault_begin(ctrr_test_nx_fault_handler, ctrr_test_va);
+ ctrr_nx_test_ptr();
+ ml_expect_fault_end();
+
+ // TODO: ensure execute permission fault at expected level
+ T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_IABORT_EL1, "Instruction abort from EL1 Expected");
+ T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
+
+ ctrr_test_va = 0;
+ ctrr_exception_esr = 0;
+
+ pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
+
+ T_LOG("Expect no faults when reading CTRR region to verify correct programming of CTRR limits");
+ for (vm_offset_t addr = rorgn_begin_va; addr < rorgn_end_va; addr += 8) {
+ volatile uint64_t x = *(uint64_t *)addr;
+ (void) x; /* read for side effect only */
+ }
+
+ return KERN_SUCCESS;
+}
+#endif /* defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) */