-#if defined(KERNEL_INTEGRITY_CTRR)
- /*
- * Program and lock CTRR if this CPU is non-boot cluster master. boot cluster will be locked
- * in machine_lockdown. pinst insns protected by VMSA_LOCK
- * A_PXN and A_MMUON_WRPROTECT options provides something close to KTRR behavior
- */
-
- /* refuse to boot if machine_lockdown() hasn't completed */
- adrp x17, EXT(lockdown_done)@page
- ldr w17, [x17, EXT(lockdown_done)@pageoff]
- cbz w17, .
-
- // load stashed rorgn_begin
- adrp x17, EXT(ctrr_begin)@page
- add x17, x17, EXT(ctrr_begin)@pageoff
- ldr x17, [x17]
-#if DEBUG || DEVELOPMENT || CONFIG_DTRACE
- // if rorgn_begin is zero, we're debugging. skip enabling ctrr
- cbz x17, Lskip_ctrr
-#else
- cbz x17, .
-#endif
-
- // load stashed rorgn_end
- adrp x19, EXT(ctrr_end)@page
- add x19, x19, EXT(ctrr_end)@pageoff
- ldr x19, [x19]
-#if DEBUG || DEVELOPMENT || CONFIG_DTRACE
- cbz x19, Lskip_ctrr
-#else
- cbz x19, .
-#endif
-
- mrs x18, ARM64_REG_CTRR_LOCK_EL1
- cbnz x18, Lskip_ctrr /* don't touch if already locked */
- msr ARM64_REG_CTRR_A_LWR_EL1, x17
- msr ARM64_REG_CTRR_A_UPR_EL1, x19
- mov x18, #(CTRR_CTL_EL1_A_PXN | CTRR_CTL_EL1_A_MMUON_WRPROTECT)
- msr ARM64_REG_CTRR_CTL_EL1, x18
- mov x18, #1
- msr ARM64_REG_CTRR_LOCK_EL1, x18
-
-
- isb
- tlbi vmalle1
- dsb ish
- isb
-Lspin_ctrr_unlocked:
- /* we shouldn't ever be here as cpu start is serialized by cluster in cpu_start(),
- * and first core started in cluster is designated cluster master and locks
- * both core and cluster. subsequent cores in same cluster will run locked from
- * from reset vector */
- mrs x18, ARM64_REG_CTRR_LOCK_EL1
- cbz x18, Lspin_ctrr_unlocked
-Lskip_ctrr:
-#endif