#include <vm/pmap.h>
#include <kern/kalloc.h>
+#include <kern/cpu_number.h>
#include <kern/locks.h>
#include <kern/misc_protos.h>
#include <kern/thread.h>
#include <arm/cpu_data_internal.h>
#include <arm/pmap.h>
+#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
+#include <arm64/amcc_rorgn.h>
+#endif // defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
+
kern_return_t arm64_lock_test(void);
kern_return_t arm64_munger_test(void);
kern_return_t ex_cb_test(void);
static int lt_stress_local_counters[MAX_CPUS];
lck_ticket_t lt_ticket_lock;
+lck_grp_t lt_ticket_grp;
static void
lt_stress_ticket_lock()
{
int local_counter = 0;
- uint cpuid = current_processor()->cpu_id;
+ uint cpuid = cpu_number();
kprintf("%s>cpu %d starting\n", __FUNCTION__, cpuid);
- lck_ticket_lock(<_ticket_lock);
+ lck_ticket_lock(<_ticket_lock, <_ticket_grp);
lt_counter++;
local_counter++;
lck_ticket_unlock(<_ticket_lock);
kprintf("%s>cpu %d started\n", __FUNCTION__, cpuid);
while (lt_counter < limit) {
- lck_ticket_lock(<_ticket_lock);
+ lck_ticket_lock(<_ticket_lock, <_ticket_grp);
if (lt_counter < limit) {
lt_counter++;
local_counter++;
/* Ticket locks stress test */
T_LOG("Running Ticket locks stress test with lck_ticket_lock()");
extern unsigned int real_ncpus;
- lck_ticket_init(<_ticket_lock);
+ lck_grp_init(<_ticket_grp, "ticket lock stress", LCK_GRP_ATTR_NULL);
+ lck_ticket_init(<_ticket_lock, <_ticket_grp);
lt_reset();
lt_target_done_threads = real_ncpus;
for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
{MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
{MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
{MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
+ {MT_FUNC(munge_wwwlwww), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
{MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
{MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
+ {MT_FUNC(munge_wwwwllww), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
{MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
{MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
{MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
#if defined(HAS_APPLE_PAC)
-/*
- *
- * arm64_ropjop_test - basic xnu ROP/JOP test plan
- *
- * - assert ROP/JOP configured and running status match
- * - assert all AppleMode ROP/JOP features enabled
- * - ensure ROP/JOP keys are set and diversified
- * - sign a KVA (the address of this function),assert it was signed (changed)
- * - authenticate the newly signed KVA
- * - assert the authed KVA is the original KVA
- * - corrupt a signed ptr, auth it, ensure auth failed
- * - assert the failed authIB of corrupted pointer is tagged
- *
- */
kern_return_t
arm64_ropjop_test()
/* how is ROP/JOP configured */
boolean_t config_rop_enabled = TRUE;
- boolean_t config_jop_enabled = !(BootArgs->bootFlags & kBootFlagsDisableJOP);
-
-
- /* assert all AppleMode ROP/JOP features enabled */
- uint64_t apctl = __builtin_arm_rsr64(ARM64_REG_APCTL_EL1);
-#if __APSTS_SUPPORTED__
- uint64_t apsts = __builtin_arm_rsr64(ARM64_REG_APSTS_EL1);
- T_ASSERT(apsts & APSTS_EL1_MKEYVld, NULL);
-#else
- T_ASSERT(apctl & APCTL_EL1_MKEYVld, NULL);
-#endif /* __APSTS_SUPPORTED__ */
- T_ASSERT(apctl & APCTL_EL1_AppleMode, NULL);
- T_ASSERT(apctl & APCTL_EL1_KernKeyEn, NULL);
-
- /* ROP/JOP keys enabled current status */
- bool status_jop_enabled, status_rop_enabled;
-#if __APSTS_SUPPORTED__ /* H13+ */
- // TODO: update unit test to understand ROP/JOP enabled config for H13+
- status_jop_enabled = status_rop_enabled = apctl & APCTL_EL1_EnAPKey1;
-#elif __APCFG_SUPPORTED__ /* H12 */
- uint64_t apcfg_el1 = __builtin_arm_rsr64(APCFG_EL1);
- status_jop_enabled = status_rop_enabled = apcfg_el1 & APCFG_EL1_ELXENKEY;
-#else /* !__APCFG_SUPPORTED__ H11 */
- uint64_t sctlr_el1 = __builtin_arm_rsr64("SCTLR_EL1");
- status_jop_enabled = sctlr_el1 & SCTLR_PACIA_ENABLED;
- status_rop_enabled = sctlr_el1 & SCTLR_PACIB_ENABLED;
-#endif /* __APSTS_SUPPORTED__ */
-
- /* assert configured and running status match */
- T_ASSERT(config_rop_enabled == status_rop_enabled, NULL);
- T_ASSERT(config_jop_enabled == status_jop_enabled, NULL);
+ boolean_t config_jop_enabled = TRUE;
if (config_jop_enabled) {
uint64_t apiakey_hi = __builtin_arm_rsr64(ARM64_REG_APIAKEYHI_EL1);
uint64_t apiakey_lo = __builtin_arm_rsr64(ARM64_REG_APIAKEYLO_EL1);
- /* ensure JOP key is set and diversified */
- T_EXPECT(apiakey_hi != KERNEL_ROP_ID && apiakey_lo != KERNEL_ROP_ID, NULL);
T_EXPECT(apiakey_hi != 0 && apiakey_lo != 0, NULL);
}
uint64_t apibkey_hi = __builtin_arm_rsr64(ARM64_REG_APIBKEYHI_EL1);
uint64_t apibkey_lo = __builtin_arm_rsr64(ARM64_REG_APIBKEYLO_EL1);
- /* ensure ROP key is set and diversified */
- T_EXPECT(apibkey_hi != KERNEL_ROP_ID && apibkey_lo != KERNEL_ROP_ID, NULL);
T_EXPECT(apibkey_hi != 0 && apibkey_lo != 0, NULL);
/* sign a KVA (the address of this function) */
return KERN_SUCCESS;
}
+static bool
+arm64_pan_test_pan_enabled_fault_handler(arm_saved_state_t * state)
+{
+ bool retval = false;
+ uint32_t esr = get_saved_state_esr(state);
+ esr_exception_class_t class = ESR_EC(esr);
+ fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr));
+ uint32_t cpsr = get_saved_state_cpsr(state);
+ uint64_t far = get_saved_state_far(state);
+
+ if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) &&
+ (cpsr & PSR64_PAN) &&
+ ((esr & ISS_DA_WNR) ? mmu_kvtop_wpreflight(far) : mmu_kvtop(far))) {
+ ++pan_exception_level;
+ // read the user-accessible value to make sure
+ // pan is enabled and produces a 2nd fault from
+ // the exception handler
+ if (pan_exception_level == 1) {
+ ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, far);
+ pan_fault_value = *(volatile char *)far;
+ ml_expect_fault_end();
+ __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
+ }
+ // this fault address is used for PAN test
+ // disable PAN and rerun
+ mask_saved_state_cpsr(state, 0, PSR64_PAN);
+
+ retval = true;
+ }
+
+ return retval;
+}
+
+static bool
+arm64_pan_test_pan_disabled_fault_handler(arm_saved_state_t * state)
+{
+ bool retval = false;
+ uint32_t esr = get_saved_state_esr(state);
+ esr_exception_class_t class = ESR_EC(esr);
+ fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr));
+ uint32_t cpsr = get_saved_state_cpsr(state);
+
+ if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) &&
+ !(cpsr & PSR64_PAN)) {
+ ++pan_exception_level;
+ // On an exception taken from a PAN-disabled context, verify
+ // that PAN is re-enabled for the exception handler and that
+ // accessing the test address produces a PAN fault.
+ ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr);
+ pan_fault_value = *(volatile char *)pan_test_addr;
+ ml_expect_fault_end();
+ __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
+ add_saved_state_pc(state, 4);
+
+ retval = true;
+ }
+
+ return retval;
+}
+
kern_return_t
arm64_pan_test()
{
+ bool values_match = false;
vm_offset_t priv_addr = _COMM_PAGE_SIGNATURE;
T_LOG("Testing PAN.");
pan_test_addr = priv_addr + _COMM_HIGH_PAGE64_BASE_ADDRESS -
_COMM_PAGE_START_ADDRESS;
+ // Context-switch with PAN disabled is prohibited; prevent test logging from
+ // triggering a voluntary context switch.
+ mp_disable_preemption();
+
// Below should trigger a PAN exception as pan_test_addr is accessible
// in user mode
// The exception handler, upon recognizing the fault address is pan_test_addr,
// will disable PAN and rerun this instruction successfully
- T_ASSERT(*(char *)pan_test_addr == *(char *)priv_addr, NULL);
+ ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr);
+ values_match = (*(volatile char *)pan_test_addr == *(volatile char *)priv_addr);
+ ml_expect_fault_end();
+ T_ASSERT(values_match, NULL);
T_ASSERT(pan_exception_level == 2, NULL);
// Force a permission fault while PAN is disabled to make sure PAN is
// re-enabled during the exception handler.
+ ml_expect_fault_begin(arm64_pan_test_pan_disabled_fault_handler, pan_ro_addr);
*((volatile uint64_t*)pan_ro_addr) = 0xFEEDFACECAFECAFE;
+ ml_expect_fault_end();
T_ASSERT(pan_exception_level == 2, NULL);
__builtin_arm_wsr("pan", 1);
+ mp_enable_preemption();
+
return KERN_SUCCESS;
}
#endif /* __ARM_PAN_AVAILABLE__ */
PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable, sizeof(ctrr_disable));
+#if CONFIG_CSR_FROM_DT
+ if (csr_unsafe_kernel_text) {
+ ctrr_disable = TRUE;
+ }
+#endif /* CONFIG_CSR_FROM_DT */
+
if (ctrr_disable) {
T_LOG("Skipping CTRR test when -unsafe_kernel_text boot-arg present");
return KERN_SUCCESS;
return KERN_SUCCESS;
}
+static bool
+ctrr_test_ro_fault_handler(arm_saved_state_t * state)
+{
+ bool retval = false;
+ uint32_t esr = get_saved_state_esr(state);
+ esr_exception_class_t class = ESR_EC(esr);
+ fault_status_t fsc = ISS_DA_FSC(ESR_ISS(esr));
+
+ if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) {
+ ctrr_exception_esr = esr;
+ add_saved_state_pc(state, 4);
+ retval = true;
+ }
+
+ return retval;
+}
+
+static bool
+ctrr_test_nx_fault_handler(arm_saved_state_t * state)
+{
+ bool retval = false;
+ uint32_t esr = get_saved_state_esr(state);
+ esr_exception_class_t class = ESR_EC(esr);
+ fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr));
+
+ if ((class == ESR_EC_IABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) {
+ ctrr_exception_esr = esr;
+ /* return to the instruction immediately after the call to NX page */
+ set_saved_state_pc(state, get_saved_state_lr(state));
+ retval = true;
+ }
+
+ return retval;
+}
+
/* test CTRR on a cpu, caller to bind thread to desired cpu */
/* ctrr_test_page was reserved during bootstrap process */
kern_return_t
void (*ctrr_nx_test_ptr)(void);
kern_return_t kr;
uint64_t prot = 0;
- extern uint64_t rorgn_begin, rorgn_end;
extern vm_offset_t virtual_space_start;
- /* rorgn = [rorgn_begin_va, rorgn_end_va) */
+ /* ctrr read only region = [rorgn_begin_va, rorgn_end_va) */
- vm_offset_t rorgn_begin_va = phystokv(rorgn_begin);
- vm_offset_t rorgn_end_va = phystokv(rorgn_end) + PAGE_SIZE;
+ vm_offset_t rorgn_begin_va = phystokv(ctrr_begin);
+ vm_offset_t rorgn_end_va = phystokv(ctrr_end) + 1;
vm_offset_t ro_test_va = (vm_offset_t)&ctrr_ro_test;
vm_offset_t nx_test_va = (vm_offset_t)&ctrr_nx_test;
T_LOG("Read only region test writing to %p to provoke data abort", ctrr_ro_test_ptr);
// should cause data abort
+ ml_expect_fault_begin(ctrr_test_ro_fault_handler, ctrr_test_va);
*ctrr_ro_test_ptr = 1;
+ ml_expect_fault_end();
// ensure write permission fault at expected level
// data abort handler will set ctrr_exception_esr when ctrr_test_va takes a permission fault
T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RONA && (~prot & ARM_PTE_PNX), "Mapping is EL1 ROX");
ctrr_test_va = ctrr_test_page + (nx_test_va & PAGE_MASK);
+#if __has_feature(ptrauth_calls)
+ ctrr_nx_test_ptr = ptrauth_sign_unauthenticated((void *)ctrr_test_va, ptrauth_key_function_pointer, 0);
+#else
ctrr_nx_test_ptr = (void *)ctrr_test_va;
+#endif
T_LOG("No execute test calling ctrr_nx_test_ptr(): %p to provoke instruction abort", ctrr_nx_test_ptr);
-#if __has_feature(ptrauth_calls)
- // must sign before calling if we're creating function pointers out of thin air
- ctrr_nx_test_ptr = ptrauth_sign_unauthenticated(ctrr_nx_test_ptr, ptrauth_key_function_pointer, 0);
-#endif
// should cause prefetch abort
+ ml_expect_fault_begin(ctrr_test_nx_fault_handler, ctrr_test_va);
ctrr_nx_test_ptr();
+ ml_expect_fault_end();
// TODO: ensure execute permission fault at expected level
T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_IABORT_EL1, "Instruction abort from EL1 Expected");
ctrr_test_va = 0;
ctrr_exception_esr = 0;
+
pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
+
+ T_LOG("Expect no faults when reading CTRR region to verify correct programming of CTRR limits");
+ for (vm_offset_t addr = rorgn_begin_va; addr < rorgn_end_va; addr += 8) {
+ volatile uint64_t x = *(uint64_t *)addr;
+ (void) x; /* read for side effect only */
+ }
+
return KERN_SUCCESS;
}
#endif /* defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) */