]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/arm/cpu_common.c
xnu-7195.60.75.tar.gz
[apple/xnu.git] / osfmk / arm / cpu_common.c
index d976ce5c108898cd279d04a2633098e9e925b4af..11ad96d9e0f6948f04012a8fc54b82ed4c28cc2d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 Apple Inc. All rights reserved.
+ * Copyright (c) 2017-2019 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  *
  *     cpu routines common to all supported arm variants
  */
 
-#include <kern/kalloc.h>
 #include <kern/machine.h>
 #include <kern/cpu_number.h>
 #include <kern/thread.h>
+#include <kern/percpu.h>
 #include <kern/timer_queue.h>
+#include <kern/locks.h>
 #include <arm/cpu_data.h>
 #include <arm/cpuid.h>
 #include <arm/caches_internal.h>
 #include <pexpert/device_tree.h>
 #include <sys/kdebug.h>
 #include <arm/machine_routines.h>
+#include <arm/proc_reg.h>
 #include <libkern/OSAtomic.h>
 
-#if KPERF
-void kperf_signal_handler(unsigned int cpu_number);
-#endif
+SECURITY_READ_ONLY_LATE(struct percpu_base) percpu_base;
+vm_address_t     percpu_base_cur;
+cpu_data_t       PERCPU_DATA(cpu_data);
+cpu_data_entry_t CpuDataEntries[MAX_CPUS];
 
-struct processor BootProcessor;
+static lck_grp_t cpu_lck_grp;
+static lck_rw_t cpu_state_lock;
 
-unsigned int   real_ncpus = 1;
-boolean_t      idle_enable = FALSE;
-uint64_t       wake_abstime=0x0ULL;
+unsigned int    real_ncpus = 1;
+boolean_t       idle_enable = FALSE;
+uint64_t        wake_abstime = 0x0ULL;
 
+#if defined(HAS_IPI)
+extern unsigned int gFastIPI;
+#endif /* defined(HAS_IPI) */
 
 cpu_data_t *
 cpu_datap(int cpu)
 {
-       assert(cpu < MAX_CPUS);
-       return (CpuDataEntries[cpu].cpu_data_vaddr);
+       assert(cpu <= ml_get_max_cpu_number());
+       return CpuDataEntries[cpu].cpu_data_vaddr;
 }
 
 kern_return_t
 cpu_control(int slot_num,
-           processor_info_t info,
-           unsigned int count)
+    processor_info_t info,
+    unsigned int count)
 {
        printf("cpu_control(%d,%p,%d) not implemented\n",
-              slot_num, info, count);
-       return (KERN_FAILURE);
+           slot_num, info, count);
+       return KERN_FAILURE;
 }
 
 kern_return_t
 cpu_info_count(processor_flavor_t flavor,
-              unsigned int *count)
+    unsigned int *count)
 {
-
        switch (flavor) {
        case PROCESSOR_CPU_STAT:
                *count = PROCESSOR_CPU_STAT_COUNT;
-               return (KERN_SUCCESS);
+               return KERN_SUCCESS;
+
+       case PROCESSOR_CPU_STAT64:
+               *count = PROCESSOR_CPU_STAT64_COUNT;
+               return KERN_SUCCESS;
 
        default:
                *count = 0;
-               return (KERN_FAILURE);
+               return KERN_FAILURE;
        }
 }
 
 kern_return_t
-cpu_info(processor_flavor_t flavor,
-        int slot_num,
-        processor_info_t info,
-        unsigned int *count)
+cpu_info(processor_flavor_t flavor, int slot_num, processor_info_t info,
+    unsigned int *count)
 {
+       cpu_data_t *cpu_data_ptr = CpuDataEntries[slot_num].cpu_data_vaddr;
+
        switch (flavor) {
        case PROCESSOR_CPU_STAT:
-               {
-                       processor_cpu_stat_t cpu_stat;
-                       cpu_data_t     *cpu_data_ptr = CpuDataEntries[slot_num].cpu_data_vaddr;
-
-                       if (*count < PROCESSOR_CPU_STAT_COUNT)
-                               return (KERN_FAILURE);
-
-                       cpu_stat = (processor_cpu_stat_t) info;
-                       cpu_stat->irq_ex_cnt = cpu_data_ptr->cpu_stat.irq_ex_cnt;
-                       cpu_stat->ipi_cnt = cpu_data_ptr->cpu_stat.ipi_cnt;
-                       cpu_stat->timer_cnt = cpu_data_ptr->cpu_stat.timer_cnt;
-                       cpu_stat->undef_ex_cnt = cpu_data_ptr->cpu_stat.undef_ex_cnt;
-                       cpu_stat->unaligned_cnt = cpu_data_ptr->cpu_stat.unaligned_cnt;
-                       cpu_stat->vfp_cnt = cpu_data_ptr->cpu_stat.vfp_cnt;
-                       cpu_stat->vfp_shortv_cnt = 0;
-                       cpu_stat->data_ex_cnt = cpu_data_ptr->cpu_stat.data_ex_cnt;
-                       cpu_stat->instr_ex_cnt = cpu_data_ptr->cpu_stat.instr_ex_cnt;
-
-                       *count = PROCESSOR_CPU_STAT_COUNT;
-
-                       return (KERN_SUCCESS);
+       {
+               if (*count < PROCESSOR_CPU_STAT_COUNT) {
+                       return KERN_FAILURE;
+               }
+
+               processor_cpu_stat_t cpu_stat = (processor_cpu_stat_t)info;
+               cpu_stat->irq_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.irq_ex_cnt;
+               cpu_stat->ipi_cnt = (uint32_t)cpu_data_ptr->cpu_stat.ipi_cnt;
+               cpu_stat->timer_cnt = (uint32_t)cpu_data_ptr->cpu_stat.timer_cnt;
+               cpu_stat->undef_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.undef_ex_cnt;
+               cpu_stat->unaligned_cnt = (uint32_t)cpu_data_ptr->cpu_stat.unaligned_cnt;
+               cpu_stat->vfp_cnt = (uint32_t)cpu_data_ptr->cpu_stat.vfp_cnt;
+               cpu_stat->vfp_shortv_cnt = 0;
+               cpu_stat->data_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.data_ex_cnt;
+               cpu_stat->instr_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.instr_ex_cnt;
+
+               *count = PROCESSOR_CPU_STAT_COUNT;
+
+               return KERN_SUCCESS;
+       }
+
+       case PROCESSOR_CPU_STAT64:
+       {
+               if (*count < PROCESSOR_CPU_STAT64_COUNT) {
+                       return KERN_FAILURE;
                }
 
+               processor_cpu_stat64_t cpu_stat = (processor_cpu_stat64_t)info;
+               cpu_stat->irq_ex_cnt = cpu_data_ptr->cpu_stat.irq_ex_cnt;
+               cpu_stat->ipi_cnt = cpu_data_ptr->cpu_stat.ipi_cnt;
+               cpu_stat->timer_cnt = cpu_data_ptr->cpu_stat.timer_cnt;
+               cpu_stat->undef_ex_cnt = cpu_data_ptr->cpu_stat.undef_ex_cnt;
+               cpu_stat->unaligned_cnt = cpu_data_ptr->cpu_stat.unaligned_cnt;
+               cpu_stat->vfp_cnt = cpu_data_ptr->cpu_stat.vfp_cnt;
+               cpu_stat->vfp_shortv_cnt = 0;
+               cpu_stat->data_ex_cnt = cpu_data_ptr->cpu_stat.data_ex_cnt;
+               cpu_stat->instr_ex_cnt = cpu_data_ptr->cpu_stat.instr_ex_cnt;
+#if MONOTONIC
+               cpu_stat->pmi_cnt = cpu_data_ptr->cpu_monotonic.mtc_npmis;
+#endif /* MONOTONIC */
+
+               *count = PROCESSOR_CPU_STAT64_COUNT;
+
+               return KERN_SUCCESS;
+       }
+
        default:
-               return (KERN_FAILURE);
+               return KERN_FAILURE;
        }
 }
 
@@ -140,8 +173,8 @@ cpu_info(processor_flavor_t flavor,
  *     Function:
  */
 void
-cpu_doshutdown(void (*doshutdown) (processor_t),
-              processor_t processor)
+cpu_doshutdown(void (*doshutdown)(processor_t),
+    processor_t processor)
 {
        doshutdown(processor);
 }
@@ -153,15 +186,15 @@ cpu_doshutdown(void (*doshutdown) (processor_t),
 void
 cpu_idle_tickle(void)
 {
-       boolean_t       intr;
-       cpu_data_t      *cpu_data_ptr;
-       uint64_t        new_idle_timeout_ticks = 0x0ULL;
+       boolean_t       intr;
+       cpu_data_t      *cpu_data_ptr;
+       uint64_t        new_idle_timeout_ticks = 0x0ULL;
 
        intr = ml_set_interrupts_enabled(FALSE);
        cpu_data_ptr = getCpuDatap();
 
-       if (cpu_data_ptr->idle_timer_notify != (void *)NULL) {
-               ((idle_timer_t)cpu_data_ptr->idle_timer_notify)(cpu_data_ptr->idle_timer_refcon, &new_idle_timeout_ticks);
+       if (cpu_data_ptr->idle_timer_notify != NULL) {
+               cpu_data_ptr->idle_timer_notify(cpu_data_ptr->idle_timer_refcon, &new_idle_timeout_ticks);
                if (new_idle_timeout_ticks != 0x0ULL) {
                        /* if a new idle timeout was requested set the new idle timer deadline */
                        clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
@@ -177,36 +210,53 @@ cpu_idle_tickle(void)
 static void
 cpu_handle_xcall(cpu_data_t *cpu_data_ptr)
 {
-       broadcastFunc   xfunc;
-       void            *xparam;
+       broadcastFunc   xfunc;
+       void            *xparam;
 
-       __c11_atomic_thread_fence(memory_order_acquire_smp);
+       os_atomic_thread_fence(acquire);
        /* Come back around if cpu_signal_internal is running on another CPU and has just
-        * added SIGPxcall to the pending mask, but hasn't yet assigned the call params.*/
+       * added SIGPxcall to the pending mask, but hasn't yet assigned the call params.*/
        if (cpu_data_ptr->cpu_xcall_p0 != NULL && cpu_data_ptr->cpu_xcall_p1 != NULL) {
                xfunc = cpu_data_ptr->cpu_xcall_p0;
+               INTERRUPT_MASKED_DEBUG_START(xfunc, DBG_INTR_TYPE_IPI);
                xparam = cpu_data_ptr->cpu_xcall_p1;
                cpu_data_ptr->cpu_xcall_p0 = NULL;
                cpu_data_ptr->cpu_xcall_p1 = NULL;
-               __c11_atomic_thread_fence(memory_order_acq_rel_smp);
-               hw_atomic_and_noret(&cpu_data_ptr->cpu_signal, ~SIGPxcall);
+               os_atomic_thread_fence(acq_rel);
+               os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPxcall, relaxed);
                xfunc(xparam);
+               INTERRUPT_MASKED_DEBUG_END();
+       }
+       if (cpu_data_ptr->cpu_imm_xcall_p0 != NULL && cpu_data_ptr->cpu_imm_xcall_p1 != NULL) {
+               xfunc = cpu_data_ptr->cpu_imm_xcall_p0;
+               INTERRUPT_MASKED_DEBUG_START(xfunc, DBG_INTR_TYPE_IPI);
+               xparam = cpu_data_ptr->cpu_imm_xcall_p1;
+               cpu_data_ptr->cpu_imm_xcall_p0 = NULL;
+               cpu_data_ptr->cpu_imm_xcall_p1 = NULL;
+               os_atomic_thread_fence(acq_rel);
+               os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPxcallImm, relaxed);
+               xfunc(xparam);
+               INTERRUPT_MASKED_DEBUG_END();
        }
-
 }
 
-unsigned int
-cpu_broadcast_xcall(uint32_t *synch,
-                   boolean_t self_xcall,
-                   broadcastFunc func,
-                   void *parm)
+static unsigned int
+cpu_broadcast_xcall_internal(unsigned int signal,
+    uint32_t *synch,
+    boolean_t self_xcall,
+    broadcastFunc func,
+    void *parm)
 {
-       boolean_t       intr;
-       cpu_data_t      *cpu_data_ptr;
-       cpu_data_t      *target_cpu_datap;
-       unsigned int    failsig;
-       int             cpu;
-       int             max_cpu;
+       boolean_t       intr;
+       cpu_data_t      *cpu_data_ptr;
+       cpu_data_t      *target_cpu_datap;
+       unsigned int    failsig;
+       int             cpu;
+       int             max_cpu = ml_get_max_cpu_number() + 1;
+
+       //yes, param ALSO cannot be NULL
+       assert(func);
+       assert(parm);
 
        intr = ml_set_interrupts_enabled(FALSE);
        cpu_data_ptr = getCpuDatap();
@@ -214,18 +264,19 @@ cpu_broadcast_xcall(uint32_t *synch,
        failsig = 0;
 
        if (synch != NULL) {
-               *synch = real_ncpus;
+               *synch = max_cpu;
                assert_wait((event_t)synch, THREAD_UNINT);
        }
 
-       max_cpu = ml_get_max_cpu_number();
-       for (cpu=0; cpu <= max_cpu; cpu++) {
+       for (cpu = 0; cpu < max_cpu; cpu++) {
                target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
 
-               if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr))
+               if (target_cpu_datap == cpu_data_ptr) {
                        continue;
+               }
 
-               if(KERN_SUCCESS != cpu_signal(target_cpu_datap, SIGPxcall, (void *)func, parm)) {
+               if ((target_cpu_datap == NULL) ||
+                   KERN_SUCCESS != cpu_signal(target_cpu_datap, signal, (void *)func, parm)) {
                        failsig++;
                }
        }
@@ -238,95 +289,183 @@ cpu_broadcast_xcall(uint32_t *synch,
        (void) ml_set_interrupts_enabled(intr);
 
        if (synch != NULL) {
-               if (hw_atomic_sub(synch, (!self_xcall)? failsig+1 : failsig) == 0)
+               if (os_atomic_sub(synch, (!self_xcall) ? failsig + 1 : failsig, relaxed) == 0) {
                        clear_wait(current_thread(), THREAD_AWAKENED);
-               else
+               } else {
                        thread_block(THREAD_CONTINUE_NULL);
+               }
        }
 
-       if (!self_xcall)
-               return (real_ncpus - failsig - 1);
-       else
-               return (real_ncpus - failsig);
+       if (!self_xcall) {
+               return max_cpu - failsig - 1;
+       } else {
+               return max_cpu - failsig;
+       }
 }
 
-kern_return_t
-cpu_xcall(int cpu_number, broadcastFunc func, void *param)
+unsigned int
+cpu_broadcast_xcall(uint32_t *synch,
+    boolean_t self_xcall,
+    broadcastFunc func,
+    void *parm)
 {
-       cpu_data_t      *target_cpu_datap;
+       return cpu_broadcast_xcall_internal(SIGPxcall, synch, self_xcall, func, parm);
+}
+
+struct cpu_broadcast_xcall_simple_data {
+       broadcastFunc func;
+       void* parm;
+       uint32_t sync;
+};
+
+static void
+cpu_broadcast_xcall_simple_cbk(void *parm)
+{
+       struct cpu_broadcast_xcall_simple_data *data = (struct cpu_broadcast_xcall_simple_data*)parm;
+
+       data->func(data->parm);
 
-       if ((cpu_number < 0) || (cpu_number > ml_get_max_cpu_number()))
+       if (os_atomic_dec(&data->sync, relaxed) == 0) {
+               thread_wakeup((event_t)&data->sync);
+       }
+}
+
+static unsigned int
+cpu_xcall_simple(boolean_t self_xcall,
+    broadcastFunc func,
+    void *parm,
+    bool immediate)
+{
+       struct cpu_broadcast_xcall_simple_data data = {};
+
+       data.func = func;
+       data.parm = parm;
+
+       return cpu_broadcast_xcall_internal(immediate ? SIGPxcallImm : SIGPxcall, &data.sync, self_xcall, cpu_broadcast_xcall_simple_cbk, &data);
+}
+
+unsigned int
+cpu_broadcast_immediate_xcall(uint32_t *synch,
+    boolean_t self_xcall,
+    broadcastFunc func,
+    void *parm)
+{
+       return cpu_broadcast_xcall_internal(SIGPxcallImm, synch, self_xcall, func, parm);
+}
+
+unsigned int
+cpu_broadcast_xcall_simple(boolean_t self_xcall,
+    broadcastFunc func,
+    void *parm)
+{
+       return cpu_xcall_simple(self_xcall, func, parm, false);
+}
+
+unsigned int
+cpu_broadcast_immediate_xcall_simple(boolean_t self_xcall,
+    broadcastFunc func,
+    void *parm)
+{
+       return cpu_xcall_simple(self_xcall, func, parm, true);
+}
+
+static kern_return_t
+cpu_xcall_internal(unsigned int signal, int cpu_number, broadcastFunc func, void *param)
+{
+       cpu_data_t      *target_cpu_datap;
+
+       if ((cpu_number < 0) || (cpu_number > ml_get_max_cpu_number())) {
                return KERN_INVALID_ARGUMENT;
+       }
+
+       if (func == NULL || param == NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
-       target_cpu_datap = (cpu_data_t*)CpuDataEntries[cpu_number].cpu_data_vaddr;              
-       if (target_cpu_datap == NULL)
+       target_cpu_datap = (cpu_data_t*)CpuDataEntries[cpu_number].cpu_data_vaddr;
+       if (target_cpu_datap == NULL) {
                return KERN_INVALID_ARGUMENT;
+       }
 
-       return cpu_signal(target_cpu_datap, SIGPxcall, (void*)func, param);
+       return cpu_signal(target_cpu_datap, signal, (void*)func, param);
+}
+
+kern_return_t
+cpu_xcall(int cpu_number, broadcastFunc func, void *param)
+{
+       return cpu_xcall_internal(SIGPxcall, cpu_number, func, param);
+}
+
+kern_return_t
+cpu_immediate_xcall(int cpu_number, broadcastFunc func, void *param)
+{
+       return cpu_xcall_internal(SIGPxcallImm, cpu_number, func, param);
 }
 
 static kern_return_t
 cpu_signal_internal(cpu_data_t *target_proc,
-                   unsigned int signal,
-                   void *p0,
-                   void *p1,
-                   boolean_t defer)
+    unsigned int signal,
+    void *p0,
+    void *p1,
+    boolean_t defer)
 {
-       unsigned int    Check_SIGPdisabled;
-       int             current_signals;
-       Boolean         swap_success;
-       boolean_t       interruptible = ml_set_interrupts_enabled(FALSE);
-       cpu_data_t      *current_proc = getCpuDatap();
+       unsigned int    Check_SIGPdisabled;
+       int             current_signals;
+       Boolean         swap_success;
+       boolean_t       interruptible = ml_set_interrupts_enabled(FALSE);
+       cpu_data_t      *current_proc = getCpuDatap();
 
        /* We'll mandate that only IPIs meant to kick a core out of idle may ever be deferred. */
        if (defer) {
                assert(signal == SIGPnop);
        }
 
-       if (current_proc != target_proc)
+       if (current_proc != target_proc) {
                Check_SIGPdisabled = SIGPdisabled;
-       else
+       } else {
                Check_SIGPdisabled = 0;
+       }
 
-       if (signal == SIGPxcall) {
+       if ((signal == SIGPxcall) || (signal == SIGPxcallImm)) {
                do {
                        current_signals = target_proc->cpu_signal;
                        if ((current_signals & SIGPdisabled) == SIGPdisabled) {
-#if DEBUG || DEVELOPMENT
-                               target_proc->failed_signal = SIGPxcall;
-                               target_proc->failed_xcall = p0;
-                               OSIncrementAtomicLong(&target_proc->failed_signal_count);
-#endif
                                ml_set_interrupts_enabled(interruptible);
                                return KERN_FAILURE;
                        }
-                       swap_success = OSCompareAndSwap(current_signals & (~SIGPxcall), current_signals | SIGPxcall,
-                                       &target_proc->cpu_signal);
+                       swap_success = OSCompareAndSwap(current_signals & (~signal), current_signals | signal,
+                           &target_proc->cpu_signal);
+
+                       if (!swap_success && (signal == SIGPxcallImm) && (target_proc->cpu_signal & SIGPxcallImm)) {
+                               ml_set_interrupts_enabled(interruptible);
+                               return KERN_ALREADY_WAITING;
+                       }
 
                        /* Drain pending xcalls on this cpu; the CPU we're trying to xcall may in turn
                         * be trying to xcall us.  Since we have interrupts disabled that can deadlock,
                         * so break the deadlock by draining pending xcalls. */
-                       if (!swap_success && (current_proc->cpu_signal & SIGPxcall))
+                       if (!swap_success && (current_proc->cpu_signal & signal)) {
                                cpu_handle_xcall(current_proc);
-
+                       }
                } while (!swap_success);
 
-               target_proc->cpu_xcall_p0 = p0;
-               target_proc->cpu_xcall_p1 = p1;
+               if (signal == SIGPxcallImm) {
+                       target_proc->cpu_imm_xcall_p0 = p0;
+                       target_proc->cpu_imm_xcall_p1 = p1;
+               } else {
+                       target_proc->cpu_xcall_p0 = p0;
+                       target_proc->cpu_xcall_p1 = p1;
+               }
        } else {
                do {
                        current_signals = target_proc->cpu_signal;
-                       if ((Check_SIGPdisabled !=0 ) && (current_signals & Check_SIGPdisabled) == SIGPdisabled) {
-#if DEBUG || DEVELOPMENT
-                               target_proc->failed_signal = signal;
-                               OSIncrementAtomicLong(&target_proc->failed_signal_count);
-#endif
+                       if ((Check_SIGPdisabled != 0) && (current_signals & Check_SIGPdisabled) == SIGPdisabled) {
                                ml_set_interrupts_enabled(interruptible);
                                return KERN_FAILURE;
                        }
 
                        swap_success = OSCompareAndSwap(current_signals, current_signals | signal,
-                                       &target_proc->cpu_signal);
+                           &target_proc->cpu_signal);
                } while (!swap_success);
        }
 
@@ -340,21 +479,37 @@ cpu_signal_internal(cpu_data_t *target_proc,
 
        if (!(target_proc->cpu_signal & SIGPdisabled)) {
                if (defer) {
+#if defined(HAS_IPI)
+                       if (gFastIPI) {
+                               ml_cpu_signal_deferred(target_proc->cpu_phys_id);
+                       } else {
+                               PE_cpu_signal_deferred(getCpuDatap()->cpu_id, target_proc->cpu_id);
+                       }
+#else
                        PE_cpu_signal_deferred(getCpuDatap()->cpu_id, target_proc->cpu_id);
+#endif /* defined(HAS_IPI) */
                } else {
+#if defined(HAS_IPI)
+                       if (gFastIPI) {
+                               ml_cpu_signal(target_proc->cpu_phys_id);
+                       } else {
+                               PE_cpu_signal(getCpuDatap()->cpu_id, target_proc->cpu_id);
+                       }
+#else
                        PE_cpu_signal(getCpuDatap()->cpu_id, target_proc->cpu_id);
+#endif /* defined(HAS_IPI) */
                }
        }
 
        ml_set_interrupts_enabled(interruptible);
-       return (KERN_SUCCESS);
+       return KERN_SUCCESS;
 }
 
 kern_return_t
 cpu_signal(cpu_data_t *target_proc,
-          unsigned int signal,
-          void *p0,
-          void *p1)
+    unsigned int signal,
+    void *p0,
+    void *p1)
 {
        return cpu_signal_internal(target_proc, signal, p0, p1, FALSE);
 }
@@ -370,7 +525,15 @@ cpu_signal_cancel(cpu_data_t *target_proc)
 {
        /* TODO: Should we care about the state of a core as far as squashing deferred IPIs goes? */
        if (!(target_proc->cpu_signal & SIGPdisabled)) {
+#if defined(HAS_IPI)
+               if (gFastIPI) {
+                       ml_cpu_signal_retract(target_proc->cpu_phys_id);
+               } else {
+                       PE_cpu_signal_cancel(getCpuDatap()->cpu_id, target_proc->cpu_id);
+               }
+#else
                PE_cpu_signal_cancel(getCpuDatap()->cpu_id, target_proc->cpu_id);
+#endif /* defined(HAS_IPI) */
        }
 }
 
@@ -384,210 +547,251 @@ void
 cpu_signal_handler_internal(boolean_t disable_signal)
 {
        cpu_data_t     *cpu_data_ptr = getCpuDatap();
-       unsigned int    cpu_signal;
-
+       unsigned int    cpu_signal;
 
        cpu_data_ptr->cpu_stat.ipi_cnt++;
        cpu_data_ptr->cpu_stat.ipi_cnt_wake++;
+       SCHED_STATS_INC(ipi_count);
 
-       SCHED_STATS_IPI(current_processor());
-
-       cpu_signal = hw_atomic_or(&cpu_data_ptr->cpu_signal, 0);
+       cpu_signal = os_atomic_or(&cpu_data_ptr->cpu_signal, 0, relaxed);
 
-       if ((!(cpu_signal & SIGPdisabled)) && (disable_signal == TRUE))
-               (void)hw_atomic_or(&cpu_data_ptr->cpu_signal, SIGPdisabled);
-       else if ((cpu_signal & SIGPdisabled) && (disable_signal == FALSE))
-               (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPdisabled);
+       if ((!(cpu_signal & SIGPdisabled)) && (disable_signal == TRUE)) {
+               os_atomic_or(&cpu_data_ptr->cpu_signal, SIGPdisabled, relaxed);
+       } else if ((cpu_signal & SIGPdisabled) && (disable_signal == FALSE)) {
+               os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdisabled, relaxed);
+       }
 
        while (cpu_signal & ~SIGPdisabled) {
                if (cpu_signal & SIGPdec) {
-                       (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPdec);
+                       os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdec, relaxed);
+                       INTERRUPT_MASKED_DEBUG_START(rtclock_intr, DBG_INTR_TYPE_IPI);
                        rtclock_intr(FALSE);
+                       INTERRUPT_MASKED_DEBUG_END();
                }
 #if KPERF
-               if (cpu_signal & SIGPkptimer) {
-                       (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPkptimer);
-                       kperf_signal_handler((unsigned int)cpu_data_ptr->cpu_number);
+               if (cpu_signal & SIGPkppet) {
+                       os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPkppet, relaxed);
+                       extern void kperf_signal_handler(void);
+                       INTERRUPT_MASKED_DEBUG_START(kperf_signal_handler, DBG_INTR_TYPE_IPI);
+                       kperf_signal_handler();
+                       INTERRUPT_MASKED_DEBUG_END();
                }
-#endif
-               if (cpu_signal & SIGPxcall) {
+#endif /* KPERF */
+               if (cpu_signal & (SIGPxcall | SIGPxcallImm)) {
                        cpu_handle_xcall(cpu_data_ptr);
                }
                if (cpu_signal & SIGPast) {
-                       (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPast);
-                       ast_check(cpu_data_ptr->cpu_processor);
+                       os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPast, relaxed);
+                       INTERRUPT_MASKED_DEBUG_START(ast_check, DBG_INTR_TYPE_IPI);
+                       ast_check(current_processor());
+                       INTERRUPT_MASKED_DEBUG_END();
                }
                if (cpu_signal & SIGPdebug) {
-                       (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPdebug);
+                       os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdebug, relaxed);
+                       INTERRUPT_MASKED_DEBUG_START(DebuggerXCall, DBG_INTR_TYPE_IPI);
                        DebuggerXCall(cpu_data_ptr->cpu_int_state);
+                       INTERRUPT_MASKED_DEBUG_END();
                }
-#if    __ARM_SMP__ && defined(ARMA7)
+#if     defined(ARMA7)
                if (cpu_signal & SIGPLWFlush) {
-                       (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPLWFlush);
+                       os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPLWFlush, relaxed);
+                       INTERRUPT_MASKED_DEBUG_START(cache_xcall_handler, DBG_INTR_TYPE_IPI);
                        cache_xcall_handler(LWFlush);
+                       INTERRUPT_MASKED_DEBUG_END();
                }
                if (cpu_signal & SIGPLWClean) {
-                       (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPLWClean);
+                       os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPLWClean, relaxed);
+                       INTERRUPT_MASKED_DEBUG_START(cache_xcall_handler, DBG_INTR_TYPE_IPI);
                        cache_xcall_handler(LWClean);
+                       INTERRUPT_MASKED_DEBUG_END();
                }
 #endif
 
-               cpu_signal = hw_atomic_or(&cpu_data_ptr->cpu_signal, 0);
+               cpu_signal = os_atomic_or(&cpu_data_ptr->cpu_signal, 0, relaxed);
        }
 }
 
 void
-cpu_exit_wait(int cpu)
-{
-       if ( cpu != master_cpu) {
-               cpu_data_t      *cpu_data_ptr;
+cpu_exit_wait(int cpu_id)
+{
+#if USE_APPLEARMSMP
+       if (!ml_is_quiescing()) {
+               // For runtime disable (non S2R) the CPU will shut down immediately.
+               ml_topology_cpu_t *cpu = &ml_get_topology_info()->cpus[cpu_id];
+               assert(cpu && cpu->cpu_IMPL_regs);
+               volatile uint64_t *cpu_sts = (void *)(cpu->cpu_IMPL_regs + CPU_PIO_CPU_STS_OFFSET);
+
+               // Poll the "CPU running state" field until it is 0 (off)
+               while ((*cpu_sts & CPU_PIO_CPU_STS_cpuRunSt_mask) != 0x00) {
+                       __builtin_arm_dsb(DSB_ISH);
+               }
+               return;
+       }
+#endif /* USE_APPLEARMSMP */
 
-               cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr;
-               while (!((*(volatile unsigned int*)&cpu_data_ptr->cpu_sleep_token) == ARM_CPU_ON_SLEEP_PATH)) {};
+       if (cpu_id != master_cpu) {
+               // For S2R, ml_arm_sleep() will do some extra polling after setting ARM_CPU_ON_SLEEP_PATH.
+               cpu_data_t      *cpu_data_ptr;
+
+               cpu_data_ptr = CpuDataEntries[cpu_id].cpu_data_vaddr;
+               while (!((*(volatile unsigned int*)&cpu_data_ptr->cpu_sleep_token) == ARM_CPU_ON_SLEEP_PATH)) {
+               }
+               ;
        }
 }
 
+boolean_t
+cpu_can_exit(__unused int cpu)
+{
+       return TRUE;
+}
+
 void
 cpu_machine_init(void)
 {
        static boolean_t started = FALSE;
-       cpu_data_t      *cpu_data_ptr;
+       cpu_data_t      *cpu_data_ptr;
 
        cpu_data_ptr = getCpuDatap();
        started = ((cpu_data_ptr->cpu_flags & StartedState) == StartedState);
-       if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL)
+       if (cpu_data_ptr->cpu_cache_dispatch != NULL) {
                platform_cache_init();
+       }
+
+       /* Note: this calls IOCPURunPlatformActiveActions when resuming on boot cpu */
        PE_cpu_machine_init(cpu_data_ptr->cpu_id, !started);
+
        cpu_data_ptr->cpu_flags |= StartedState;
        ml_init_interrupt();
 }
 
-processor_t
-cpu_processor_alloc(boolean_t is_boot_cpu)
-{
-       processor_t proc;
-
-       if (is_boot_cpu)
-               return &BootProcessor;
-
-       proc = kalloc(sizeof(*proc));
-       if (!proc)
-               return NULL;
-
-       bzero((void *) proc, sizeof(*proc));
-       return proc;
-}
-
-void
-cpu_processor_free(processor_t proc)
-{
-       if (proc != NULL && proc != &BootProcessor)
-               kfree((void *) proc, sizeof(*proc));
-}
-
 processor_t
 current_processor(void)
 {
-       return getCpuDatap()->cpu_processor;
+       return PERCPU_GET(processor);
 }
 
 processor_t
 cpu_to_processor(int cpu)
 {
        cpu_data_t *cpu_data = cpu_datap(cpu);
-       if (cpu_data != NULL)
-               return cpu_data->cpu_processor;
-       else
+       if (cpu_data != NULL) {
+               return PERCPU_GET_RELATIVE(processor, cpu_data, cpu_data);
+       } else {
                return NULL;
+       }
 }
 
 cpu_data_t *
 processor_to_cpu_datap(processor_t processor)
 {
-       cpu_data_t *target_cpu_datap;
-
-       assert(processor->cpu_id < MAX_CPUS);
+       assert(processor->cpu_id <= ml_get_max_cpu_number());
        assert(CpuDataEntries[processor->cpu_id].cpu_data_vaddr != NULL);
 
-       target_cpu_datap = (cpu_data_t*)CpuDataEntries[processor->cpu_id].cpu_data_vaddr;
-       assert(target_cpu_datap->cpu_processor == processor);
+       return PERCPU_GET_RELATIVE(cpu_data, processor, processor);
+}
+
+__startup_func
+static void
+cpu_data_startup_init(void)
+{
+       vm_size_t size = percpu_section_size() * (ml_get_cpu_count() - 1);
+
+       percpu_base.size = percpu_section_size();
+       if (ml_get_cpu_count() == 1) {
+               percpu_base.start = VM_MAX_KERNEL_ADDRESS;
+               return;
+       }
+
+       /*
+        * The memory needs to be physically contiguous because it contains
+        * cpu_data_t structures sometimes accessed during reset
+        * with the MMU off.
+        *
+        * kmem_alloc_contig() can't be used early, at the time STARTUP_SUB_PERCPU
+        * normally runs, so we instead steal the memory for the PERCPU subsystem
+        * even earlier.
+        */
+       percpu_base.start  = (vm_offset_t)pmap_steal_memory(round_page(size));
+       bzero((void *)percpu_base.start, round_page(size));
 
-       return target_cpu_datap;
+       percpu_base.start -= percpu_section_start();
+       percpu_base.end    = percpu_base.start + size - 1;
+       percpu_base_cur    = percpu_base.start;
 }
+STARTUP(PMAP_STEAL, STARTUP_RANK_FIRST, cpu_data_startup_init);
 
 cpu_data_t *
 cpu_data_alloc(boolean_t is_boot_cpu)
 {
-       cpu_data_t              *cpu_data_ptr = NULL;
+       cpu_data_t   *cpu_data_ptr = NULL;
+       vm_address_t  base;
 
-       if (is_boot_cpu)
-               cpu_data_ptr = &BootCpuData;
-       else {
-               if ((kmem_alloc(kernel_map, (vm_offset_t *)&cpu_data_ptr, sizeof(cpu_data_t), VM_KERN_MEMORY_CPU)) != KERN_SUCCESS)
-                       goto cpu_data_alloc_error;
-
-               bzero((void *)cpu_data_ptr, sizeof(cpu_data_t));
+       if (is_boot_cpu) {
+               cpu_data_ptr = PERCPU_GET_MASTER(cpu_data);
+       } else {
+               base = os_atomic_add_orig(&percpu_base_cur,
+                   percpu_section_size(), relaxed);
 
+               cpu_data_ptr = PERCPU_GET_WITH_BASE(base, cpu_data);
                cpu_stack_alloc(cpu_data_ptr);
        }
 
-       cpu_data_ptr->cpu_processor = cpu_processor_alloc(is_boot_cpu);
-       if (cpu_data_ptr->cpu_processor == (struct processor *)NULL)
-               goto cpu_data_alloc_error;
-
        return cpu_data_ptr;
-
-cpu_data_alloc_error:
-       panic("cpu_data_alloc() failed\n");
-       return (cpu_data_t *)NULL;
 }
 
 ast_t *
 ast_pending(void)
 {
-       return (&getCpuDatap()->cpu_pending_ast);
+       return &getCpuDatap()->cpu_pending_ast;
 }
 
 cpu_type_t
 slot_type(int slot_num)
 {
-       return (cpu_datap(slot_num)->cpu_type);
+       return cpu_datap(slot_num)->cpu_type;
 }
 
 cpu_subtype_t
 slot_subtype(int slot_num)
 {
-       return (cpu_datap(slot_num)->cpu_subtype);
+       return cpu_datap(slot_num)->cpu_subtype;
 }
 
 cpu_threadtype_t
 slot_threadtype(int slot_num)
 {
-       return (cpu_datap(slot_num)->cpu_threadtype);
+       return cpu_datap(slot_num)->cpu_threadtype;
 }
 
 cpu_type_t
 cpu_type(void)
 {
-       return (getCpuDatap()->cpu_type);
+       return getCpuDatap()->cpu_type;
 }
 
 cpu_subtype_t
 cpu_subtype(void)
 {
-       return (getCpuDatap()->cpu_subtype);
+       return getCpuDatap()->cpu_subtype;
 }
 
 cpu_threadtype_t
 cpu_threadtype(void)
 {
-       return (getCpuDatap()->cpu_threadtype);
+       return getCpuDatap()->cpu_threadtype;
 }
 
 int
 cpu_number(void)
 {
-       return (getCpuDatap()->cpu_number);
+       return getCpuDatap()->cpu_number;
+}
+
+vm_offset_t
+current_percpu_base(void)
+{
+       return current_thread()->machine.pcpu_data_base;
 }
 
 uint64_t
@@ -596,3 +800,88 @@ ml_get_wake_timebase(void)
        return wake_abstime;
 }
 
+bool
+ml_cpu_signal_is_enabled(void)
+{
+       return !(getCpuDatap()->cpu_signal & SIGPdisabled);
+}
+
+bool
+ml_cpu_can_exit(__unused int cpu_id)
+{
+       /* processor_exit() is always allowed on the S2R path */
+       if (ml_is_quiescing()) {
+               return true;
+       }
+#if HAS_CLUSTER && USE_APPLEARMSMP
+       /*
+        * Cyprus and newer chips can disable individual non-boot CPUs. The
+        * implementation polls cpuX_IMPL_CPU_STS, which differs on older chips.
+        */
+       if (CpuDataEntries[cpu_id].cpu_data_vaddr != &BootCpuData) {
+               return true;
+       }
+#endif
+       return false;
+}
+
+void
+ml_cpu_init_state(void)
+{
+       lck_grp_init(&cpu_lck_grp, "cpu_lck_grp", LCK_GRP_ATTR_NULL);
+       lck_rw_init(&cpu_state_lock, &cpu_lck_grp, LCK_ATTR_NULL);
+}
+
+#ifdef USE_APPLEARMSMP
+
+void
+ml_cpu_begin_state_transition(int cpu_id)
+{
+       lck_rw_lock_exclusive(&cpu_state_lock);
+       CpuDataEntries[cpu_id].cpu_data_vaddr->in_state_transition = true;
+       lck_rw_unlock_exclusive(&cpu_state_lock);
+}
+
+void
+ml_cpu_end_state_transition(int cpu_id)
+{
+       lck_rw_lock_exclusive(&cpu_state_lock);
+       CpuDataEntries[cpu_id].cpu_data_vaddr->in_state_transition = false;
+       lck_rw_unlock_exclusive(&cpu_state_lock);
+}
+
+void
+ml_cpu_begin_loop(void)
+{
+       lck_rw_lock_shared(&cpu_state_lock);
+}
+
+void
+ml_cpu_end_loop(void)
+{
+       lck_rw_unlock_shared(&cpu_state_lock);
+}
+
+#else /* USE_APPLEARMSMP */
+
+void
+ml_cpu_begin_state_transition(__unused int cpu_id)
+{
+}
+
+void
+ml_cpu_end_state_transition(__unused int cpu_id)
+{
+}
+
+void
+ml_cpu_begin_loop(void)
+{
+}
+
+void
+ml_cpu_end_loop(void)
+{
+}
+
+#endif /* USE_APPLEARMSMP */