+/*
+ * void set_bp_ret(void)
+ * Helper function to enable branch predictor state retention
+ * across ACC sleep
+ */
+
+ .align 2
+ .globl EXT(set_bp_ret)
+LEXT(set_bp_ret)
+ // Load bpret boot-arg
+ adrp x14, EXT(bp_ret)@page
+ add x14, x14, EXT(bp_ret)@pageoff
+ ldr w14, [x14]
+
+ mrs x13, ARM64_REG_ACC_CFG
+ and x13, x13, (~(ARM64_REG_ACC_CFG_bpSlp_mask << ARM64_REG_ACC_CFG_bpSlp_shift))
+ and x14, x14, #(ARM64_REG_ACC_CFG_bpSlp_mask)
+ orr x13, x13, x14, lsl #(ARM64_REG_ACC_CFG_bpSlp_shift)
+ msr ARM64_REG_ACC_CFG, x13
+
+ ret
+#endif // HAS_BP_RET
+
+#if HAS_NEX_PG
+ .align 2
+ .globl EXT(set_nex_pg)
+LEXT(set_nex_pg)
+ mrs x14, MPIDR_EL1
+ // Skip if this isn't a p-core; NEX powergating isn't available for e-cores
+ and x14, x14, #(MPIDR_PNE)
+ cbz x14, Lnex_pg_done
+
+ // Set the SEG-recommended value of 12 additional reset cycles
+ mrs x14, ARM64_REG_HID13
+ and x14, x14, (~ARM64_REG_HID13_RstCyc_mask)
+ orr x14, x14, ARM64_REG_HID13_RstCyc_val
+ msr ARM64_REG_HID13, x14
+
+ // Load nexpg boot-arg
+ adrp x14, EXT(nex_pg)@page
+ add x14, x14, EXT(nex_pg)@pageoff
+ ldr w14, [x14]
+
+ mrs x13, ARM64_REG_HID14
+ and x13, x13, (~ARM64_REG_HID14_NexPwgEn)
+ cbz w14, Lset_nex_pg
+ orr x13, x13, ARM64_REG_HID14_NexPwgEn
+Lset_nex_pg:
+ msr ARM64_REG_HID14, x13
+
+Lnex_pg_done:
+ ret
+
+#endif // HAS_NEX_PG