+bool
+ml_cpu_signal_is_enabled(void)
+{
+ return !(getCpuDatap()->cpu_signal & SIGPdisabled);
+}
+
+bool
+ml_cpu_can_exit(__unused int cpu_id)
+{
+ /* processor_exit() is always allowed on the S2R path */
+ if (ml_is_quiescing()) {
+ return true;
+ }
+#if HAS_CLUSTER && USE_APPLEARMSMP
+ /*
+ * Cyprus and newer chips can disable individual non-boot CPUs. The
+ * implementation polls cpuX_IMPL_CPU_STS, which differs on older chips.
+ */
+ if (CpuDataEntries[cpu_id].cpu_data_vaddr != &BootCpuData) {
+ return true;
+ }
+#endif
+ return false;
+}
+
+void
+ml_cpu_init_state(void)
+{
+ lck_grp_init(&cpu_lck_grp, "cpu_lck_grp", LCK_GRP_ATTR_NULL);
+ lck_rw_init(&cpu_state_lock, &cpu_lck_grp, LCK_ATTR_NULL);
+}
+
+#ifdef USE_APPLEARMSMP
+
+void
+ml_cpu_begin_state_transition(int cpu_id)
+{
+ lck_rw_lock_exclusive(&cpu_state_lock);
+ CpuDataEntries[cpu_id].cpu_data_vaddr->in_state_transition = true;
+ lck_rw_unlock_exclusive(&cpu_state_lock);
+}
+
+void
+ml_cpu_end_state_transition(int cpu_id)
+{
+ lck_rw_lock_exclusive(&cpu_state_lock);
+ CpuDataEntries[cpu_id].cpu_data_vaddr->in_state_transition = false;
+ lck_rw_unlock_exclusive(&cpu_state_lock);
+}
+
+void
+ml_cpu_begin_loop(void)
+{
+ lck_rw_lock_shared(&cpu_state_lock);
+}
+
+void
+ml_cpu_end_loop(void)
+{
+ lck_rw_unlock_shared(&cpu_state_lock);
+}
+
+#else /* USE_APPLEARMSMP */
+
+void
+ml_cpu_begin_state_transition(__unused int cpu_id)
+{
+}
+
+void
+ml_cpu_end_state_transition(__unused int cpu_id)
+{
+}
+
+void
+ml_cpu_begin_loop(void)
+{
+}
+
+void
+ml_cpu_end_loop(void)
+{
+}
+
+#endif /* USE_APPLEARMSMP */