#endif /* defined(HAS_APPLE_PAC) */
+#if HAS_BP_RET
+/*
+ * void set_bp_ret(void)
+ * Helper function to enable branch predictor state retention
+ * across ACC sleep
+ */
+
+ .align 2
+ .globl EXT(set_bp_ret)
+LEXT(set_bp_ret)
+ // Load bpret boot-arg
+ adrp x14, EXT(bp_ret)@page
+ add x14, x14, EXT(bp_ret)@pageoff
+ ldr w14, [x14]
+
+ mrs x13, ARM64_REG_ACC_CFG
+ and x13, x13, (~(ARM64_REG_ACC_CFG_bpSlp_mask << ARM64_REG_ACC_CFG_bpSlp_shift))
+ and x14, x14, #(ARM64_REG_ACC_CFG_bpSlp_mask)
+ orr x13, x13, x14, lsl #(ARM64_REG_ACC_CFG_bpSlp_shift)
+ msr ARM64_REG_ACC_CFG, x13
+
+ ret
+#endif // HAS_BP_RET
+
+#if HAS_NEX_PG
+ .align 2
+ .globl EXT(set_nex_pg)
+LEXT(set_nex_pg)
+ mrs x14, MPIDR_EL1
+ // Skip if this isn't a p-core; NEX powergating isn't available for e-cores
+ and x14, x14, #(MPIDR_PNE)
+ cbz x14, Lnex_pg_done
+
+ // Set the SEG-recommended value of 12 additional reset cycles
+ mrs x14, ARM64_REG_HID13
+ and x14, x14, (~ARM64_REG_HID13_RstCyc_mask)
+ orr x14, x14, ARM64_REG_HID13_RstCyc_val
+ msr ARM64_REG_HID13, x14
+
+ // Load nexpg boot-arg
+ adrp x14, EXT(nex_pg)@page
+ add x14, x14, EXT(nex_pg)@pageoff
+ ldr w14, [x14]
+
+ mrs x13, ARM64_REG_HID14
+ and x13, x13, (~ARM64_REG_HID14_NexPwgEn)
+ cbz w14, Lset_nex_pg
+ orr x13, x13, ARM64_REG_HID14_NexPwgEn
+Lset_nex_pg:
+ msr ARM64_REG_HID14, x13
+
+Lnex_pg_done:
+ ret
+
+#endif // HAS_NEX_PG
/* uint32_t get_fpscr(void):
* Returns (FPSR | FPCR).
bl EXT(pinst_set_ttbr1)
mov lr, x1
#else
+#if defined(HAS_VMSA_LOCK)
+ mrs x1, ARM64_REG_VMSA_LOCK_EL1
+ and x1, x1, #(VMSA_LOCK_TTBR1_EL1)
+ cbnz x1, L_set_locked_reg_panic
+#endif /* defined(HAS_VMSA_LOCK) */
msr TTBR1_EL1, x0
#endif /* defined(KERNEL_INTEGRITY_KTRR) */
isb sy
ret
+#if XNU_MONITOR
+ .section __PPLTEXT,__text,regular,pure_instructions
+#else
.text
+#endif
.align 2
.globl EXT(set_mmu_ttb)
LEXT(set_mmu_ttb)
#endif
#endif /* __ARM_KERNEL_PROTECT__ */
+#if defined(HAS_VMSA_LOCK)
+ .text
+ .align 2
+ .globl EXT(vmsa_lock)
+LEXT(vmsa_lock)
+ isb sy
+ mov x1, #(VMSA_LOCK_SCTLR_M_BIT)
+ mov x0, #(VMSA_LOCK_TTBR1_EL1 | VMSA_LOCK_TCR_EL1 | VMSA_LOCK_VBAR_EL1)
+ orr x0, x0, x1
+ msr ARM64_REG_VMSA_LOCK_EL1, x0
+ isb sy
+ ret
+#endif /* defined(HAS_VMSA_LOCK) */
/*
* set translation control register
bl EXT(pinst_set_tcr)
mov lr, x1
#else
+#if defined(HAS_VMSA_LOCK)
+ // assert TCR unlocked
+ mrs x1, ARM64_REG_VMSA_LOCK_EL1
+ and x1, x1, #(VMSA_LOCK_TCR_EL1)
+ cbnz x1, L_set_locked_reg_panic
+#endif /* defined(HAS_VMSA_LOCK) */
msr TCR_EL1, x0
#endif /* defined(KERNEL_INTRITY_KTRR) */
isb sy
L_set_locked_reg_panic_str:
.asciz "attempt to set locked register: (%llx)\n"
#else
-#if defined(KERNEL_INTEGRITY_KTRR)
+#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
mov x1, lr
bl EXT(pinst_set_tcr)
mov lr, x1
/*
* SET_RECOVERY_HANDLER
*
- * Sets up a page fault recovery handler
+ * Sets up a page fault recovery handler. This macro clobbers x16 and x17.
*
- * arg0 - persisted thread pointer
- * arg1 - persisted recovery handler
- * arg2 - scratch reg
- * arg3 - recovery label
+ * label - recovery label
+ * tpidr - persisted thread pointer
+ * old_handler - persisted recovery handler
+ * label_in_adr_range - whether \label is within 1 MB of PC
*/
-.macro SET_RECOVERY_HANDLER
- mrs $0, TPIDR_EL1 // Load thread pointer
- adrp $2, $3@page // Load the recovery handler address
- add $2, $2, $3@pageoff
+.macro SET_RECOVERY_HANDLER label, tpidr=x16, old_handler=x10, label_in_adr_range=0
+ // Note: x16 and x17 are designated for use as temporaries in
+ // interruptible PAC routines. DO NOT CHANGE THESE REGISTER ASSIGNMENTS.
+.if \label_in_adr_range==1 // Load the recovery handler address
+ adr x17, \label
+.else
+ adrp x17, \label@page
+ add x17, x17, \label@pageoff
+.endif
#if defined(HAS_APPLE_PAC)
- add $1, $0, TH_RECOVER
- movk $1, #PAC_DISCRIMINATOR_RECOVER, lsl 48
- pacia $2, $1 // Sign with IAKey + blended discriminator
+ mrs x16, TPIDR_EL1
+ add x16, x16, TH_RECOVER
+ movk x16, #PAC_DISCRIMINATOR_RECOVER, lsl 48
+ pacia x17, x16 // Sign with IAKey + blended discriminator
#endif
- ldr $1, [$0, TH_RECOVER] // Save previous recovery handler
- str $2, [$0, TH_RECOVER] // Set new signed recovery handler
+ mrs \tpidr, TPIDR_EL1 // Load thread pointer
+ ldr \old_handler, [\tpidr, TH_RECOVER] // Save previous recovery handler
+ str x17, [\tpidr, TH_RECOVER] // Set new signed recovery handler
.endmacro
/*
*
* Clears page fault handler set by SET_RECOVERY_HANDLER
*
- * arg0 - thread pointer saved by SET_RECOVERY_HANDLER
- * arg1 - old recovery handler saved by SET_RECOVERY_HANDLER
+ * tpidr - thread pointer saved by SET_RECOVERY_HANDLER
+ * old_handler - old recovery handler saved by SET_RECOVERY_HANDLER
*/
-.macro CLEAR_RECOVERY_HANDLER
- str $1, [$0, TH_RECOVER] // Restore the previous recovery handler
+.macro CLEAR_RECOVERY_HANDLER tpidr=x16, old_handler=x10
+ str \old_handler, [\tpidr, TH_RECOVER] // Restore the previous recovery handler
.endmacro
.text
.align 2
copyio_error:
- CLEAR_RECOVERY_HANDLER x10, x11
+ CLEAR_RECOVERY_HANDLER
mov x0, #EFAULT // Return an EFAULT error
POP_FRAME
ARM64_STACK_EPILOG
LEXT(_bcopyin)
ARM64_STACK_PROLOG
PUSH_FRAME
- SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
+ SET_RECOVERY_HANDLER copyio_error
/* If len is less than 16 bytes, just do a bytewise copy */
cmp x2, #16
b.lt 2f
strb w3, [x1], #1
b.hi 2b
3:
- CLEAR_RECOVERY_HANDLER x10, x11
+ CLEAR_RECOVERY_HANDLER
mov x0, #0
POP_FRAME
ARM64_STACK_EPILOG
LEXT(_copyin_atomic32)
ARM64_STACK_PROLOG
PUSH_FRAME
- SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
+ SET_RECOVERY_HANDLER copyio_error
ldr w8, [x0]
str w8, [x1]
mov x0, #0
- CLEAR_RECOVERY_HANDLER x10, x11
+ CLEAR_RECOVERY_HANDLER
POP_FRAME
ARM64_STACK_EPILOG
LEXT(_copyin_atomic32_wait_if_equals)
ARM64_STACK_PROLOG
PUSH_FRAME
- SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
+ SET_RECOVERY_HANDLER copyio_error
ldxr w8, [x0]
cmp w8, w1
mov x0, ESTALE
wfe
1:
clrex
- CLEAR_RECOVERY_HANDLER x10, x11
+ CLEAR_RECOVERY_HANDLER
POP_FRAME
ARM64_STACK_EPILOG
LEXT(_copyin_atomic64)
ARM64_STACK_PROLOG
PUSH_FRAME
- SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
+ SET_RECOVERY_HANDLER copyio_error
ldr x8, [x0]
str x8, [x1]
mov x0, #0
- CLEAR_RECOVERY_HANDLER x10, x11
+ CLEAR_RECOVERY_HANDLER
POP_FRAME
ARM64_STACK_EPILOG
LEXT(_copyout_atomic32)
ARM64_STACK_PROLOG
PUSH_FRAME
- SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
+ SET_RECOVERY_HANDLER copyio_error
str w0, [x1]
mov x0, #0
- CLEAR_RECOVERY_HANDLER x10, x11
+ CLEAR_RECOVERY_HANDLER
POP_FRAME
ARM64_STACK_EPILOG
LEXT(_copyout_atomic64)
ARM64_STACK_PROLOG
PUSH_FRAME
- SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
+ SET_RECOVERY_HANDLER copyio_error
str x0, [x1]
mov x0, #0
- CLEAR_RECOVERY_HANDLER x10, x11
+ CLEAR_RECOVERY_HANDLER
POP_FRAME
ARM64_STACK_EPILOG
LEXT(_bcopyout)
ARM64_STACK_PROLOG
PUSH_FRAME
- SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
+ SET_RECOVERY_HANDLER copyio_error
/* If len is less than 16 bytes, just do a bytewise copy */
cmp x2, #16
b.lt 2f
strb w3, [x1], #1
b.hi 2b
3:
- CLEAR_RECOVERY_HANDLER x10, x11
+ CLEAR_RECOVERY_HANDLER
mov x0, #0
POP_FRAME
ARM64_STACK_EPILOG
LEXT(_bcopyinstr)
ARM64_STACK_PROLOG
PUSH_FRAME
- adr x4, Lcopyinstr_error // Get address for recover
- mrs x10, TPIDR_EL1 // Get thread pointer
- ldr x11, [x10, TH_RECOVER] // Save previous recover
-
-#if defined(HAS_APPLE_PAC)
- add x5, x10, TH_RECOVER // Sign new pointer with IAKey + blended discriminator
- movk x5, #PAC_DISCRIMINATOR_RECOVER, lsl 48
- pacia x4, x5
-#endif
- str x4, [x10, TH_RECOVER] // Store new recover
-
+ SET_RECOVERY_HANDLER Lcopyinstr_error, label_in_adr_range=1
mov x4, #0 // x4 - total bytes copied
Lcopyinstr_loop:
ldrb w5, [x0], #1 // Load a byte from the user source
Lcopyinstr_error:
mov x0, #EFAULT // Return EFAULT on error
Lcopyinstr_exit:
- str x11, [x10, TH_RECOVER] // Restore old recover
+ CLEAR_RECOVERY_HANDLER
POP_FRAME
ARM64_STACK_EPILOG
* x3 : temp
* x5 : temp (kernel virtual base)
* x9 : temp
- * x10 : thread pointer (set by SET_RECOVERY_HANDLER)
- * x11 : old recovery function (set by SET_RECOVERY_HANDLER)
+ * x10 : old recovery function (set by SET_RECOVERY_HANDLER)
* x12, x13 : backtrace data
+ * x16 : thread pointer (set by SET_RECOVERY_HANDLER)
*
*/
.text
LEXT(copyinframe)
ARM64_STACK_PROLOG
PUSH_FRAME
- SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
+ SET_RECOVERY_HANDLER copyio_error
cbnz w2, Lcopyinframe64 // Check frame size
adrp x5, EXT(gVirtBase)@page // For 32-bit frame, make sure we're not trying to copy from kernel
add x5, x5, EXT(gVirtBase)@pageoff
mov w0, #0 // Success
Lcopyinframe_done:
- CLEAR_RECOVERY_HANDLER x10, x11
+ CLEAR_RECOVERY_HANDLER
POP_FRAME
ARM64_STACK_EPILOG
orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2TrDnLnk_deepsleep)
and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnCPM_mask))
orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnCPM_deepsleep)
+#if HAS_RETENTION_STATE
+ orr x1, x1, #(ARM64_REG_ACC_OVRD_disPioOnWfiCpu)
+#endif
msr ARM64_REG_ACC_OVRD, x1
// Set "OK to power down" (<rdar://problem/12390433>)
mrs x0, ARM64_REG_CYC_OVRD
orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_down)
+#if HAS_RETENTION_STATE
+ orr x0, x0, #(ARM64_REG_CYC_OVRD_disWfiRetn)
+#endif
msr ARM64_REG_CYC_OVRD, x0
-#if defined(APPLEMONSOON)
+#if defined(APPLEMONSOON) || defined(APPLEVORTEX)
ARM64_IS_PCORE x0
cbz x0, Lwfi_inst // skip if not p-core
* and re-enabling GUPS, which forces the prefetch queue to
* drain. This should be done as close to wfi as possible, i.e.
* at the very end of arm64_prepare_for_sleep(). */
+#if defined(APPLEVORTEX)
+ /* <rdar://problem/32821461>: Cyprus A0/A1 parts have a similar
+ * bug in the HSP prefetcher that can be worked around through
+ * the same method mentioned above for Skye. */
+ SKIP_IF_CPU_VERSION_GREATER_OR_EQUAL x0, VORTEX_CPU_VERSION_B0, Lwfi_inst
+#endif
mrs x0, ARM64_REG_HID10
orr x0, x0, #(ARM64_REG_HID10_DisHwpGups)
msr ARM64_REG_HID10, x0
ARM64_STACK_EPILOG
+#if HAS_RETENTION_STATE
+ .text
+ .align 2
+ .globl EXT(arm64_retention_wfi)
+LEXT(arm64_retention_wfi)
+ wfi
+ cbz lr, Lwfi_retention // If lr is 0, we entered retention state and lost all GPRs except sp and pc
+ ret // Otherwise just return to cpu_idle()
+Lwfi_retention:
+ mov x0, #1
+ bl EXT(ClearIdlePop)
+ mov x0, #0
+ bl EXT(cpu_idle_exit) // cpu_idle_exit(from_reset = FALSE)
+ b . // cpu_idle_exit() should never return
+#endif
#if defined(APPLETYPHOON)
mrs x4, DAIF // Load current DAIF; use x4 as pinst may trash x1-x3
msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF | DAIFSC_ASYNCF) // Disable IRQ/FIQ/serror
// Set SP_EL1 to exception stack
-#if defined(KERNEL_INTEGRITY_KTRR)
+#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
mov x1, lr
bl EXT(pinst_spsel_1)
mov lr, x1
CALL_EXTERN panic_with_thread_kernel_state
Lcheck_hash_str:
.asciz "JOP Hash Mismatch Detected (PC, CPSR, or LR corruption)"
+
+/**
+ * void ml_auth_thread_state_invalid_cpsr(arm_saved_state_t *ss)
+ *
+ * Panics due to an invalid CPSR value in ss.
+ */
+ .text
+ .align 2
+ .globl EXT(ml_auth_thread_state_invalid_cpsr)
+LEXT(ml_auth_thread_state_invalid_cpsr)
+ ARM64_STACK_PROLOG
+ PUSH_FRAME
+ mov x1, x0
+ adr x0, Linvalid_cpsr_str
+ CALL_EXTERN panic_with_thread_kernel_state
+
+Linvalid_cpsr_str:
+ .asciz "Thread state corruption detected (PE mode == 0)"
#endif /* HAS_APPLE_PAC */
.text