-boolean_t
-arm64_wfe_allowed(void)
-{
- return TRUE;
-}
-
-#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
-
-uint64_t rorgn_begin __attribute__((section("__DATA, __const"))) = 0;
-uint64_t rorgn_end __attribute__((section("__DATA, __const"))) = 0;
-vm_offset_t amcc_base;
-
-static void assert_unlocked(void);
-static void assert_amcc_cache_disabled(void);
-static void lock_amcc(void);
-static void lock_mmu(uint64_t begin, uint64_t end);
-
-void
-rorgn_stash_range(void)
-{
-#if DEVELOPMENT || DEBUG
- boolean_t rorgn_disable = FALSE;
-
- PE_parse_boot_argn("-unsafe_kernel_text", &rorgn_disable, sizeof(rorgn_disable));
-
- if (rorgn_disable) {
- /* take early out if boot arg present, don't query any machine registers to avoid
- * dependency on amcc DT entry
- */
- return;
- }
-#endif
-
- /* Get the AMC values, and stash them into rorgn_begin, rorgn_end.
- * gPhysBase is the base of DRAM managed by xnu. we need DRAM_BASE as
- * the AMCC RO region begin/end registers are in units of 16KB page
- * numbers from DRAM_BASE so we'll truncate gPhysBase at 512MB granule
- * and assert the value is the canonical DRAM_BASE PA of 0x8_0000_0000 for arm64.
- */
-
- uint64_t dram_base = gPhysBase & ~0x1FFFFFFFULL; /* 512MB */
- assert(dram_base == 0x800000000ULL);
-
-#if defined(KERNEL_INTEGRITY_KTRR)
- uint64_t soc_base = 0;
- DTEntry entryP = NULL;
- uintptr_t *reg_prop = NULL;
- uint32_t prop_size = 0;
- int rc;
-
- soc_base = pe_arm_get_soc_base_phys();
- rc = DTFindEntry("name", "mcc", &entryP);
- assert(rc == kSuccess);
- rc = DTGetProperty(entryP, "reg", (void **)®_prop, &prop_size);
- assert(rc == kSuccess);
- amcc_base = ml_io_map(soc_base + *reg_prop, *(reg_prop + 1));
-#elif defined(KERNEL_INTEGRITY_CTRR)
- /* TODO: t8020 mcc entry not in device tree yet; we'll do it LIVE */
-#define TEMP_AMCC_BASE_PA 0x200000000ULL
-#define TEMP_AMCC_SZ 0x100000
- amcc_base = ml_io_map(TEMP_AMCC_BASE_PA, TEMP_AMCC_SZ);
-#else
-#error "KERNEL_INTEGRITY config error"
-#endif
-
-#if defined(KERNEL_INTEGRITY_KTRR)
- assert(rRORGNENDADDR > rRORGNBASEADDR);
- rorgn_begin = (rRORGNBASEADDR << AMCC_PGSHIFT) + dram_base;
- rorgn_end = (rRORGNENDADDR << AMCC_PGSHIFT) + dram_base;
-#elif defined(KERNEL_INTEGRITY_CTRR)
- rorgn_begin = rCTRR_AMCC_PLANE_REG(0, CTRR_A_BASEADDR);
- rorgn_end = rCTRR_AMCC_PLANE_REG(0, CTRR_A_ENDADDR);
- assert(rorgn_end > rorgn_begin);
-
- for (int i = 0; i < CTRR_AMCC_MAX_PLANES; ++i) {
- uint32_t begin = rCTRR_AMCC_PLANE_REG(i, CTRR_A_BASEADDR);
- uint32_t end = rCTRR_AMCC_PLANE_REG(i, CTRR_A_ENDADDR);
- if (!(begin == rorgn_begin && end == rorgn_end)) {
-#if DEVELOPMENT || DEBUG
- panic("iboot programmed CTRR bounds are inconsistent");
-#else
- panic("Inconsistent memory configuration");
-#endif
- }
- }
-
- // convert from page number from DRAM base to PA
- rorgn_begin = (rorgn_begin << AMCC_PGSHIFT) + dram_base;
- rorgn_end = (rorgn_end << AMCC_PGSHIFT) + dram_base;
-
-#else
-#error KERNEL_INTEGRITY config error
-#endif /* defined (KERNEL_INTEGRITY_KTRR) */
-}
-
-static void
-assert_unlocked()
-{
- uint64_t ktrr_lock = 0;
- uint32_t rorgn_lock = 0;
-
- assert(amcc_base);
-#if defined(KERNEL_INTEGRITY_KTRR)
- rorgn_lock = rRORGNLOCK;
- ktrr_lock = __builtin_arm_rsr64(ARM64_REG_KTRR_LOCK_EL1);
-#elif defined(KERNEL_INTEGRITY_CTRR)
- for (int i = 0; i < CTRR_AMCC_MAX_PLANES; ++i) {
- rorgn_lock |= rCTRR_AMCC_PLANE_REG(i, CTRR_A_LOCK);
- }
- ktrr_lock = __builtin_arm_rsr64(ARM64_REG_CTRR_LOCK_EL1);
-#else
-#error KERNEL_INTEGRITY config error
-#endif /* defined(KERNEL_INTEGRITY_KTRR) */
-
- assert(!ktrr_lock);
- assert(!rorgn_lock);
-}
-
-static void
-lock_amcc()
-{
-#if defined(KERNEL_INTEGRITY_KTRR)
- rRORGNLOCK = 1;
- __builtin_arm_isb(ISB_SY);
-#elif defined(KERNEL_INTEGRITY_CTRR)
- /* lockdown planes in reverse order as plane 0 should be locked last */
- for (int i = 0; i < CTRR_AMCC_MAX_PLANES; ++i) {
- rCTRR_AMCC_PLANE_REG(CTRR_AMCC_MAX_PLANES - i - 1, CTRR_A_ENABLE) = 1;
- rCTRR_AMCC_PLANE_REG(CTRR_AMCC_MAX_PLANES - i - 1, CTRR_A_LOCK) = 1;
- __builtin_arm_isb(ISB_SY);
- }
-#else
-#error KERNEL_INTEGRITY config error
-#endif
-}
-
-static void
-lock_mmu(uint64_t begin, uint64_t end)
-{
-#if defined(KERNEL_INTEGRITY_KTRR)
-
- __builtin_arm_wsr64(ARM64_REG_KTRR_LOWER_EL1, begin);
- __builtin_arm_wsr64(ARM64_REG_KTRR_UPPER_EL1, end);
- __builtin_arm_wsr64(ARM64_REG_KTRR_LOCK_EL1, 1ULL);
-
- /* flush TLB */
-
- __builtin_arm_isb(ISB_SY);
- flush_mmu_tlb();
-
-#elif defined (KERNEL_INTEGRITY_CTRR)
- /* this will lock the entire bootstrap cluster. non bootstrap clusters
- * will be locked by respective cluster master in start.s */
-
- __builtin_arm_wsr64(ARM64_REG_CTRR_A_LWR_EL1, begin);
- __builtin_arm_wsr64(ARM64_REG_CTRR_A_UPR_EL1, end);
-
-#if !defined(APPLEVORTEX)
- /* H12 changed sequence, must invalidate TLB immediately after setting CTRR bounds */
- __builtin_arm_isb(ISB_SY); /* ensure all prior MSRs are complete */
- flush_mmu_tlb();
-#endif /* !defined(APPLEVORTEX) */
-
- __builtin_arm_wsr64(ARM64_REG_CTRR_CTL_EL1, CTRR_CTL_EL1_A_PXN | CTRR_CTL_EL1_A_MMUON_WRPROTECT);
- __builtin_arm_wsr64(ARM64_REG_CTRR_LOCK_EL1, 1ULL);
-
- uint64_t current_el = __builtin_arm_rsr64("CurrentEL");
- if (current_el == PSR64_MODE_EL2) {
- // CTRR v2 has explicit registers for cluster config. they can only be written in EL2
-
- __builtin_arm_wsr64(ACC_CTRR_A_LWR_EL2, begin);
- __builtin_arm_wsr64(ACC_CTRR_A_UPR_EL2, end);
- __builtin_arm_wsr64(ACC_CTRR_CTL_EL2, CTRR_CTL_EL1_A_PXN | CTRR_CTL_EL1_A_MMUON_WRPROTECT);
- __builtin_arm_wsr64(ACC_CTRR_LOCK_EL2, 1ULL);
- }
-
- __builtin_arm_isb(ISB_SY); /* ensure all prior MSRs are complete */
-#if defined(APPLEVORTEX)
- flush_mmu_tlb();
-#endif /* defined(APPLEVORTEX) */
-
-#else /* defined(KERNEL_INTEGRITY_KTRR) */
-#error KERNEL_INTEGRITY config error
-#endif /* defined(KERNEL_INTEGRITY_KTRR) */
-}
-
-static void
-assert_amcc_cache_disabled()
-{
-#if defined(KERNEL_INTEGRITY_KTRR)
- assert((rMCCGEN & 1) == 0); /* assert M$ disabled or LLC clean will be unreliable */
-#elif defined(KERNEL_INTEGRITY_CTRR) && (defined(ARM64_BOARD_CONFIG_T8006))
- /*
- * T8006 differentiates between data and tag ways being powered up, so
- * make sure to check that both are zero on its single memory plane.
- */
- assert((rCTRR_AMCC_PLANE_REG(0, CTRR_AMCC_PWRONWAYCNTSTATUS) &
- (AMCC_CURTAGWAYCNT_MASK | AMCC_CURDATWAYCNT_MASK)) == 0);
-#elif defined (KERNEL_INTEGRITY_CTRR)
- for (int i = 0; i < CTRR_AMCC_MAX_PLANES; ++i) {
- assert(rCTRR_AMCC_PLANE_REG(i, CTRR_AMCC_WAYONCNT) == 0);
- }
-#else
-#error KERNEL_INTEGRITY config error
-#endif
-}
-
-/*
- * void rorgn_lockdown(void)
- *
- * Lock the MMU and AMCC RORegion within lower and upper boundaries if not already locked
- *
- * [ ] - ensure this is being called ASAP on secondary CPUs: KTRR programming and lockdown handled in
- * start.s:start_cpu() for subsequent wake/resume of all cores
- */
-void
-rorgn_lockdown(void)
-{
- vm_offset_t ktrr_begin, ktrr_end;
- unsigned long last_segsz;
-
-#if DEVELOPMENT || DEBUG
- boolean_t ktrr_disable = FALSE;
-
- PE_parse_boot_argn("-unsafe_kernel_text", &ktrr_disable, sizeof(ktrr_disable));
-
- if (ktrr_disable) {
- /*
- * take early out if boot arg present, since we may not have amcc DT entry present
- * we can't assert that iboot hasn't programmed the RO region lockdown registers
- */
- goto out;
- }
-#endif /* DEVELOPMENT || DEBUG */
-
- assert_unlocked();
-
- /* [x] - Use final method of determining all kernel text range or expect crashes */
- ktrr_begin = segLOWEST;
- assert(ktrr_begin && gVirtBase && gPhysBase);
-
- ktrr_begin = kvtophys(ktrr_begin);
-
- ktrr_end = kvtophys(segLASTB);
- last_segsz = segSizeLAST;
-#if defined(KERNEL_INTEGRITY_KTRR)
- /* __LAST is not part of the MMU KTRR region (it is however part of the AMCC KTRR region) */
- ktrr_end = (ktrr_end - 1) & ~AMCC_PGMASK;
- /* ensure that iboot and xnu agree on the ktrr range */
- assert(rorgn_begin == ktrr_begin && rorgn_end == (ktrr_end + last_segsz));
- /* assert that __LAST segment containing privileged insns is only a single page */
- assert(last_segsz == PAGE_SIZE);
-#elif defined(KERNEL_INTEGRITY_CTRR)
- ktrr_end = (ktrr_end + last_segsz - 1) & ~AMCC_PGMASK;
- /* __LAST is part of MMU CTRR region. Can't use the KTRR style method of making
- * __pinst no execute because PXN applies with MMU off in CTRR. */
- assert(rorgn_begin == ktrr_begin && rorgn_end == ktrr_end);
-#endif
-
-
-#if DEBUG || DEVELOPMENT
- printf("KTRR Begin: %p End: %p, setting lockdown\n", (void *)ktrr_begin, (void *)ktrr_end);
-#endif
-
- /* [x] - ensure all in flight writes are flushed to AMCC before enabling RO Region Lock */
-
- assert_amcc_cache_disabled();
-
- CleanPoC_DcacheRegion_Force(phystokv(ktrr_begin),
- (unsigned)((ktrr_end + last_segsz) - ktrr_begin + AMCC_PGMASK));
-
- lock_amcc();
-
- lock_mmu(ktrr_begin, ktrr_end);
-
-#if DEVELOPMENT || DEBUG
-out:
-#endif
-
-#if defined(KERNEL_INTEGRITY_CTRR)
- {
- /* wake any threads blocked on cluster master lockdown */
- cpu_data_t *cdp;
- uint64_t mpidr_el1_value;
-
- cdp = getCpuDatap();
- MRS(mpidr_el1_value, "MPIDR_EL1");
- cdp->cpu_cluster_id = (mpidr_el1_value & MPIDR_AFF1_MASK) >> MPIDR_AFF1_SHIFT;
- assert(cdp->cpu_cluster_id < __ARM_CLUSTER_COUNT__);
- ctrr_cluster_locked[cdp->cpu_cluster_id] = 1;
- thread_wakeup(&ctrr_cluster_locked[cdp->cpu_cluster_id]);
- }
-#endif
- /* now we can run lockdown handler */
- ml_lockdown_run_handler();
-}
-
-#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
-